#include "cpu/o3/cpu.hh"
#include "sim/byteswap.hh"
+class EndQuiesceEvent;
+
template <class Impl>
class AlphaFullCPU : public FullO3CPU<Impl>
{
Tick lastActivate;
Tick lastSuspend;
- Event *quiesceEvent;
+ EndQuiesceEvent *quiesceEvent;
virtual BaseCPU *getCpuPtr() { return cpu; }
virtual void unserialize(Checkpoint *cp, const std::string §ion);
#if FULL_SYSTEM
- virtual Event *getQuiesceEvent();
+ virtual EndQuiesceEvent *getQuiesceEvent();
- // Not necessarily the best location for these...
- // Having an extra function just to read these is obnoxious
virtual Tick readLastActivate();
virtual Tick readLastSuspend();
virtual int getThreadNum() { return thread->tid; }
- // Also somewhat obnoxious. Really only used for the TLB fault.
- // However, may be quite useful in SPARC.
virtual TheISA::MachInst getInst();
virtual void copyArchRegs(ExecContext *xc);
virtual void clearArchRegs();
- //
- // New accessors for new decoder.
- //
virtual uint64_t readIntReg(int reg_idx);
virtual float readFloatRegSingle(int reg_idx);
virtual Fault setMiscRegWithEffect(int misc_reg, const MiscReg &val);
- // Also not necessarily the best location for these two.
- // Hopefully will go away once we decide upon where st cond
- // failures goes.
+ // @todo: Figure out where these store cond failures should go.
virtual unsigned readStCondFailures() { return thread->storeCondFailures; }
virtual void setStCondFailures(unsigned sc_failures) { thread->storeCondFailures = sc_failures; }
virtual bool inPalMode() { return TheISA::PcPAL(cpu->readPC(thread->tid)); }
#endif
- // Only really makes sense for old CPU model. Still could be useful though.
+ // Only really makes sense for old CPU model. Lots of code
+ // outside the CPU still checks this function, so it will
+ // always return false to keep everything working.
virtual bool misspeculating() { return false; }
#if !FULL_SYSTEM
virtual IntReg getSyscallArg(int i);
- // used to shift args for indirect syscall
virtual void setSyscallArg(int i, IntReg val);
virtual void setSyscallReturn(SyscallReturn return_value);
virtual void syscall() { return cpu->syscall(thread->tid); }
- // Same with st cond failures.
virtual Counter readFuncExeInst() { return thread->funcExeInst; }
#endif
};
- friend class AlphaXC;
+// friend class AlphaXC;
- std::vector<AlphaXC *> xcProxies;
+// std::vector<ExecContext *> xcProxies;
#if FULL_SYSTEM
/** ITB pointer. */
void regStats();
#if FULL_SYSTEM
- //Note that the interrupt stuff from the base CPU might be somewhat
- //ISA specific (ie NumInterruptLevels). These functions might not
- //be needed in FullCPU though.
-// void post_interrupt(int int_num, int index);
-// void clear_interrupt(int int_num, int index);
-// void clear_interrupts();
-
/** Translates instruction requestion. */
Fault translateInstReq(MemReqPtr &req)
{
}
#endif
-
- // Later on may want to remove this misc stuff from the regfile and
- // have it handled at this level. This would be similar to moving certain
- // IPRs into the devices themselves. Might prove to be an issue when
- // trying to rename source/destination registers...
MiscReg readMiscReg(int misc_reg, unsigned tid);
MiscReg readMiscRegWithEffect(int misc_reg, Fault &fault, unsigned tid);
/** Traps to handle given fault. */
void trap(Fault fault, unsigned tid);
- bool simPalCheck(int palFunc);
+ bool simPalCheck(int palFunc, unsigned tid);
/** Processes any interrupts. */
void processInterrupts();
+
+ /** Halts the CPU. */
+ void halt() { panic("Halt not implemented!\n"); }
#endif
#if !FULL_SYSTEM
- // Need to change these into regfile calls that directly set a certain
- // register. Actually, these functions should handle most of this
- // functionality by themselves; should look up the rename and then
- // set the register.
+ /** Executes a syscall.
+ * @todo: Determine if this needs to be virtual.
+ */
+ void syscall(int thread_num);
/** Gets a syscall argument. */
IntReg getSyscallArg(int i, int tid);
/** Sets the return value of a syscall. */
void setSyscallReturn(SyscallReturn return_value, int tid);
-
- /** Executes a syscall.
- * @todo: Determine if this needs to be virtual.
- */
- virtual void syscall(int thread_num);
-
-#endif
-
- public:
-#if FULL_SYSTEM
- /** Halts the CPU. */
- void halt() { panic("Halt not implemented!\n"); }
#endif
- /** Old CPU read from memory function. No longer used. */
+ /** Read from memory function. */
template <class T>
Fault read(MemReqPtr &req, T &data)
{
-// panic("CPU READ NOT IMPLEMENTED W/NEW MEMORY\n");
#if 0
#if FULL_SYSTEM && defined(TARGET_ALPHA)
if (req->flags & LOCKED) {
#endif
#endif
Fault error;
+
+#if FULL_SYSTEM
+ // @todo: Fix this LL/SC hack.
if (req->flags & LOCKED) {
lockAddr = req->paddr;
lockFlag = true;
}
+#endif
error = this->mem->read(req, data);
data = gtoh(data);
return this->iew.ldstQueue.read(req, data, load_idx);
}
- /** Old CPU write to memory function. No longer used. */
+ /** Write to memory function. */
template <class T>
Fault write(MemReqPtr &req, T &data)
{
#endif
#endif
+#if FULL_SYSTEM
+ // @todo: Fix this LL/SC hack.
if (req->flags & LOCKED) {
if (req->flags & UNCACHEABLE) {
req->result = 2;
} else {
- if (this->lockFlag/* && this->lockAddr == req->paddr*/) {
+ if (this->lockFlag) {
req->result = 1;
} else {
req->result = 0;
}
}
}
+#endif
return this->mem->write(req, (T)htog(data));
}
}
Addr lockAddr;
+
bool lockFlag;
};
/*
- * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * Copyright (c) 2004-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
SimObjectParam<FunctionalMemory *> mem;
+SimObjectParam<BaseCPU *> checker;
+
Param<Counter> max_insts_any_thread;
Param<Counter> max_insts_all_threads;
Param<Counter> max_loads_any_thread;
Param<unsigned> renameToROBDelay;
Param<unsigned> commitWidth;
Param<unsigned> squashWidth;
+Param<Tick> trapLatency;
+Param<Tick> fetchTrapLatency;
Param<unsigned> localPredictorSize;
Param<unsigned> localCtrBits;
INIT_PARAM_DFLT(mem, "Memory", NULL),
+ INIT_PARAM_DFLT(checker, "Checker CPU", NULL),
+
INIT_PARAM_DFLT(max_insts_any_thread,
"Terminate when any thread reaches this inst count",
0),
INIT_PARAM(renameToROBDelay, "Rename to reorder buffer delay"),
INIT_PARAM(commitWidth, "Commit width"),
INIT_PARAM(squashWidth, "Squash width"),
+ INIT_PARAM_DFLT(trapLatency, "Number of cycles before the trap is handled", 6),
+ INIT_PARAM_DFLT(fetchTrapLatency, "Number of cycles before the fetch trap is handled", 12),
INIT_PARAM(localPredictorSize, "Size of local predictor"),
INIT_PARAM(localCtrBits, "Bits per counter"),
params->dtb = dtb;
#else
params->workload = workload;
- //@todo: change to pageTable
// params->pTable = page_table;
#endif // FULL_SYSTEM
params->mem = mem;
+ params->checker = checker;
+
params->max_insts_any_thread = max_insts_any_thread;
params->max_insts_all_threads = max_insts_all_threads;
params->max_loads_any_thread = max_loads_any_thread;
params->renameToROBDelay = renameToROBDelay;
params->commitWidth = commitWidth;
params->squashWidth = squashWidth;
-
+ params->trapLatency = trapLatency;
+ params->fetchTrapLatency = fetchTrapLatency;
params->localPredictorSize = localPredictorSize;
params->localCtrBits = localCtrBits;
/*
- * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * Copyright (c) 2004-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "base/cprintf.hh"
#include "base/statistics.hh"
#include "base/timebuf.hh"
+#include "cpu/checker/exec_context.hh"
#include "cpu/quiesce_event.hh"
-#include "mem/cache/cache.hh" // for dynamic cast
#include "mem/mem_interface.hh"
-#include "sim/builder.hh"
#include "sim/sim_events.hh"
#include "sim/stats.hh"
for (int i = 0; i < this->numThreads; ++i) {
#if FULL_SYSTEM
- assert(i == 0);
+ assert(this->numThreads == 1);
this->thread[i] = new Thread(this, 0, params->mem);
-// this->system->execContexts[i] = this->thread[i]->getXCProxy();
this->thread[i]->setStatus(ExecContext::Suspended);
-
#else
if (i < params->workload.size()) {
DPRINTF(FullCPU, "FullCPU: Workload[%i]'s starting PC is %#x, "
this->thread[i]->numInst = 0;
- xcProxies.push_back(new AlphaXC);
+ ExecContext *xc_proxy;
- xcProxies[i]->cpu = this;
- xcProxies[i]->thread = this->thread[i];
+ AlphaXC *alpha_xc_proxy = new AlphaXC;
- xcProxies[i]->quiesceEvent = new EndQuiesceEvent(xcProxies[i]);
- xcProxies[i]->lastActivate = 0;
- xcProxies[i]->lastSuspend = 0;
+ if (params->checker) {
+ xc_proxy = new CheckerExecContext<AlphaXC>(alpha_xc_proxy, this->checker);
+ } else {
+ xc_proxy = alpha_xc_proxy;
+ }
+ alpha_xc_proxy->cpu = this;
+ alpha_xc_proxy->thread = this->thread[i];
- this->thread[i]->xcProxy = xcProxies[i];
+ alpha_xc_proxy->quiesceEvent =
+ new EndQuiesceEvent(xc_proxy);
+ alpha_xc_proxy->lastActivate = 0;
+ alpha_xc_proxy->lastSuspend = 0;
- this->execContexts.push_back(this->thread[i]->getXCProxy());
+ this->thread[i]->xcProxy = xc_proxy;
+
+ this->execContexts.push_back(xc_proxy);
}
void
AlphaFullCPU<Impl>::AlphaXC::dumpFuncProfile()
{
+ // Currently not supported
}
#endif
thread->funcExeInst = old_context->readFuncExeInst();
#endif
+ EndQuiesceEvent *other_quiesce = old_context->getQuiesceEvent();
+ if (other_quiesce) {
+ // Point the quiesce event's XC at this XC so that it wakes up
+ // the proper CPU.
+ other_quiesce->xc = this;
+ }
+ if (thread->quiesceEvent) {
+ thread->quiesceEvent->xc = this;
+ }
+// storeCondFailures = 0;
+ cpu->lockFlag = false;
+
old_context->setStatus(ExecContext::Unallocated);
thread->inSyscall = false;
AlphaFullCPU<Impl>::AlphaXC::activate(int delay)
{
DPRINTF(FullCPU, "Calling activate on AlphaXC\n");
-// warn("Calling activate on AlphaXC");
+
if (thread->status() == ExecContext::Active)
return;
AlphaFullCPU<Impl>::AlphaXC::suspend()
{
DPRINTF(FullCPU, "Calling suspend on AlphaXC\n");
-// warn("Calling suspend on AlphaXC");
+
if (thread->status() == ExecContext::Suspended)
return;
AlphaFullCPU<Impl>::AlphaXC::deallocate()
{
DPRINTF(FullCPU, "Calling deallocate on AlphaXC\n");
-// warn("Calling deallocate on AlphaXC");
+
if (thread->status() == ExecContext::Unallocated)
return;
AlphaFullCPU<Impl>::AlphaXC::halt()
{
DPRINTF(FullCPU, "Calling halt on AlphaXC\n");
-// warn("Calling halt on AlphaXC");
+
if (thread->status() == ExecContext::Halted)
return;
void
AlphaFullCPU<Impl>::AlphaXC::serialize(std::ostream &os)
{}
+
template <class Impl>
void
AlphaFullCPU<Impl>::AlphaXC::unserialize(Checkpoint *cp, const std::string §ion)
#if FULL_SYSTEM
template <class Impl>
-Event *
+EndQuiesceEvent *
AlphaFullCPU<Impl>::AlphaXC::getQuiesceEvent()
{
return quiesceEvent;
AlphaFullCPU<Impl>::AlphaXC::clearArchRegs()
{}
-//
-// New accessors for new decoder.
-//
template <class Impl>
uint64_t
AlphaFullCPU<Impl>::AlphaXC::readIntReg(int reg_idx)
cpu->setSyscallReturn(return_value, thread->tid);
}
-template <class Impl>
-void
-AlphaFullCPU<Impl>::syscall(int tid)
-{
- DPRINTF(FullCPU, "AlphaFullCPU: [tid:%i] Executing syscall().\n\n", tid);
-
- DPRINTF(Activity,"Activity: syscall() called.\n");
-
- // Temporarily increase this by one to account for the syscall
- // instruction.
- ++(this->thread[tid]->funcExeInst);
-
- // Execute the actual syscall.
- this->thread[tid]->syscall();
-
- // Decrease funcExeInst by one as the normal commit will handle
- // incrementing it.
- --(this->thread[tid]->funcExeInst);
-}
-
#endif // FULL_SYSTEM
template <class Impl>
Fault
AlphaFullCPU<Impl>::setMiscReg(int misc_reg, const MiscReg &val, unsigned tid)
{
- // I think that these registers should always be set, regardless of what
- // mode the thread is in. The main difference is if the thread needs to
- // squash as a result of the write, which is controlled by the AlphaXC.
-// if (!this->thread[tid]->trapPending) {
- return this->regFile.setMiscReg(misc_reg, val, tid);
-// } else {
-// return NoFault;
-// }
+ return this->regFile.setMiscReg(misc_reg, val, tid);
}
template <class Impl>
AlphaFullCPU<Impl>::setMiscRegWithEffect(int misc_reg, const MiscReg &val,
unsigned tid)
{
-// if (!this->thread[tid]->trapPending) {
- return this->regFile.setMiscRegWithEffect(misc_reg, val, tid);
-// } else {
-// return NoFault;
-// }
+ return this->regFile.setMiscRegWithEffect(misc_reg, val, tid);
}
template <class Impl>
void
AlphaFullCPU<Impl>::squashFromXC(unsigned tid)
{
-// this->thread[tid]->trapPending = true;
this->thread[tid]->inSyscall = true;
this->commit.generateXCEvent(tid);
}
if (this->thread[0]->status() == ExecContext::Suspended) {
DPRINTF(IPI,"Suspended Processor awoke\n");
- xcProxies[0]->activate();
+// xcProxies[0]->activate();
+ this->execContexts[0]->activate();
}
}
Fault
AlphaFullCPU<Impl>::hwrei(unsigned tid)
{
-#if 0
- if (!inPalMode(this->readPC(tid)))
- return new AlphaISA::UnimplementedOpcodeFault;
-
- setNextPC(cpu->readMiscReg(AlphaISA::IPR_EXC_ADDR, tid), tid);
+ // Need to clear the lock flag upon returning from an interrupt.
+ this->lockFlag = false;
- cpu->kernelStats->hwrei();
+ this->kernelStats->hwrei();
-// if ((this->regFile.miscRegs[tid].readReg(AlphaISA::IPR_EXC_ADDR) & 1) == 0)
-// AlphaISA::swap_palshadow(®s, false);
+ this->checkInterrupts = true;
- cpu->checkInterrupts = true;
-#endif
-// panic("Do not call this function!");
- // Need to clear the lock flag upon returning from an interrupt.
- this->lockFlag = false;
// FIXME: XXX check for interrupts? XXX
return NoFault;
}
template <class Impl>
bool
-AlphaFullCPU<Impl>::simPalCheck(int palFunc)
+AlphaFullCPU<Impl>::simPalCheck(int palFunc, unsigned tid)
{
-// kernelStats.callpal(palFunc);
+ if (this->kernelStats)
+ this->kernelStats->callpal(palFunc,
+ this->execContexts[tid]);
switch (palFunc) {
case PAL::halt:
return true;
}
-// Probably shouldn't be able to switch to the trap handler as quickly as
-// this. Also needs to get the exception restart address from the commit
-// stage.
template <class Impl>
void
AlphaFullCPU<Impl>::trap(Fault fault, unsigned tid)
{
-
- fault->invoke(this->xcProxies[tid]);
-/* // Keep in mind that a trap may be initiated by fetch if there's a TLB
- // miss
- uint64_t PC = this->commit.readCommitPC();
-
- DPRINTF(Fault, "Fault %s\n", fault->name());
- this->recordEvent(csprintf("Fault %s", fault->name()));
-
- //kernelStats.fault(fault);
-
- if (fault->isA<ArithmeticFault>())
- panic("Arithmetic traps are unimplemented!");
-
- // exception restart address - Get the commit PC
- if (!fault->isA<InterruptFault>() || !inPalMode(PC))
- this->regFile.miscRegs.setReg(AlphaISA::IPR_EXC_ADDR, PC);
-
- if (fault->isA<PalFault>() || fault->isA<ArithmeticFault>())
- // || fault == InterruptFault && !PC_PAL(regs.pc)
- {
- // traps... skip faulting instruction
- AlphaISA::MiscReg ipr_exc_addr =
- this->regFile.miscRegs.readReg(AlphaISA::IPR_EXC_ADDR);
- this->regFile.miscRegs.setReg(AlphaISA::IPR_EXC_ADDR,
- ipr_exc_addr + 4);
- }
-
- if (!inPalMode(PC))
- swapPALShadow(true);
-
- this->regFile.setPC(this->regFile.miscRegs.readReg(AlphaISA::IPR_PAL_BASE) +
- (dynamic_cast<AlphaFault *>(fault.get()))->vect(), 0);
- this->regFile.setNextPC(PC + sizeof(MachInst), 0);*/
+ fault->invoke(this->execContexts[tid]);
}
template <class Impl>
// Check for interrupts here. For now can copy the code that
// exists within isa_fullsys_traits.hh. Also assume that thread 0
// is the one that handles the interrupts.
+ // @todo: Possibly consolidate the interrupt checking code.
+ // @todo: Allow other threads to handle interrupts.
// Check if there are any outstanding interrupts
//Handle the interrupts
if (ipl && ipl > this->readMiscReg(IPR_IPLR, 0)) {
this->setMiscReg(IPR_ISR, summary, 0);
this->setMiscReg(IPR_INTID, ipl, 0);
+ if (this->checker) {
+ this->checker->cpuXCBase()->setMiscReg(IPR_ISR, summary);
+ this->checker->cpuXCBase()->setMiscReg(IPR_INTID, ipl);
+ }
this->trap(Fault(new InterruptFault), 0);
DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
this->readMiscReg(IPR_IPLR, 0), ipl, summary);
#endif // FULL_SYSTEM
#if !FULL_SYSTEM
+
+template <class Impl>
+void
+AlphaFullCPU<Impl>::syscall(int tid)
+{
+ DPRINTF(FullCPU, "AlphaFullCPU: [tid:%i] Executing syscall().\n\n", tid);
+
+ DPRINTF(Activity,"Activity: syscall() called.\n");
+
+ // Temporarily increase this by one to account for the syscall
+ // instruction.
+ ++(this->thread[tid]->funcExeInst);
+
+ // Execute the actual syscall.
+ this->thread[tid]->syscall();
+
+ // Decrease funcExeInst by one as the normal commit will handle
+ // incrementing it.
+ --(this->thread[tid]->funcExeInst);
+}
+
template <class Impl>
TheISA::IntReg
AlphaFullCPU<Impl>::getSyscallArg(int i, int tid)
/*
- * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * Copyright (c) 2004-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "cpu/o3/alpha_impl.hh"
/**
- * Mostly implementation & ISA specific AlphaDynInst. As with most other classes
- * in the new CPU model, it is templated on the Impl to allow for passing in of
- * all types, such as the CPU type and the ISA type. The AlphaDynInst serves
- * as the primary interface to the CPU; it plays the role that the ExecContext
- * does for the old CPU and the SimpleCPU. The goal is to abstract ExecContext
- * purely into an interface, and have it forward calls to the appropriate
- * CPU interface, which in the new CPU model's case would be this AlphaDynInst,
- * or any other high level implementation specific DynInst.
+ * Mostly implementation & ISA specific AlphaDynInst. As with most
+ * other classes in the new CPU model, it is templated on the Impl to
+ * allow for passing in of all types, such as the CPU type and the ISA
+ * type. The AlphaDynInst serves as the primary interface to the CPU
+ * for instructions that are executing.
*/
template <class Impl>
class AlphaDynInst : public BaseDynInst<Impl>
/** Executes the instruction.*/
Fault execute();
+ /** Initiates the access. Only valid for memory operations. */
Fault initiateAcc();
+ /** Completes the access. Only valid for memory operations. */
Fault completeAcc();
private:
Fault setMiscReg(int misc_reg, const MiscReg &val)
{
+ this->instResult.integer = val;
return this->cpu->setMiscReg(misc_reg, val, this->threadNumber);
}
void syscall();
#endif
-
-
private:
/** Physical register index of the destination registers of this
* instruction.
}
public:
- /** Calculates EA part of a memory instruction. Currently unused, though
- * it may be useful in the future when memory instructions aren't
- * executed with the EA calculation and the memory access being atomic.
+ /** Calculates EA part of a memory instruction. Currently unused,
+ * though it may be useful in the future if we want to split
+ * memory operations into EA calculation and memory access parts.
*/
Fault calcEA()
{
}
/** Does the memory access part of a memory instruction. Currently unused,
- * though it may be useful in the future when memory instructions aren't
- * executed with the EA calculation and the memory access being atomic.
+ * though it may be useful in the future if we want to split
+ * memory operations into EA calculation and memory access parts.
*/
Fault memAccess()
{
/*
- * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * Copyright (c) 2004-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
this->setNextPC(this->cpu->readMiscReg(AlphaISA::IPR_EXC_ADDR,
this->threadNumber));
- this->cpu->kernelStats->hwrei();
-
// Tell CPU to clear any state it needs to if a hwrei is taken.
this->cpu->hwrei(this->threadNumber);
- this->cpu->checkInterrupts = true;
-
// FIXME: XXX check for interrupts? XXX
return NoFault;
}
bool
AlphaDynInst<Impl>::simPalCheck(int palFunc)
{
- return this->cpu->simPalCheck(palFunc);
+ return this->cpu->simPalCheck(palFunc, this->threadNumber);
}
#else
template <class Impl>
/*
- * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * Copyright (c) 2004-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
FunctionalMemory *mem;
+ BaseCPU *checker;
+
//
// Caches
//
unsigned renameToROBDelay;
unsigned commitWidth;
unsigned squashWidth;
+ Tick trapLatency;
+ Tick fetchTrapLatency;
//
// Branch predictor (BP & BTB)
/*
- * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * Copyright (c) 2004-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
class O3ThreadState;
/**
- * DefaultCommit handles single threaded and SMT commit. Its width is specified
- * by the parameters; each cycle it tries to commit that many instructions. The
- * SMT policy decides which thread it tries to commit instructions from. Non-
- * speculative instructions must reach the head of the ROB before they are
- * ready to execute; once they reach the head, commit will broadcast the
- * instruction's sequence number to the previous stages so that they can issue/
- * execute the instruction. Only one non-speculative instruction is handled per
- * cycle. Commit is responsible for handling all back-end initiated redirects.
- * It receives the redirect, and then broadcasts it to all stages, indicating
- * the sequence number they should squash until, and any necessary branch mis-
- * prediction information as well. It priortizes redirects by instruction's age,
- * only broadcasting a redirect if it corresponds to an instruction that should
- * currently be in the ROB. This is done by tracking the sequence number of the
- * youngest instruction in the ROB, which gets updated to any squashing
- * instruction's sequence number, and only broadcasting a redirect if it
- * corresponds to an older instruction. Commit also supports multiple cycle
- * squashing, to model a ROB that can only remove a certain number of
- * instructions per cycle. Eventually traps and interrupts will most likely
- * be handled here as well.
+ * DefaultCommit handles single threaded and SMT commit. Its width is
+ * specified by the parameters; each cycle it tries to commit that
+ * many instructions. The SMT policy decides which thread it tries to
+ * commit instructions from. Non- speculative instructions must reach
+ * the head of the ROB before they are ready to execute; once they
+ * reach the head, commit will broadcast the instruction's sequence
+ * number to the previous stages so that they can issue/ execute the
+ * instruction. Only one non-speculative instruction is handled per
+ * cycle. Commit is responsible for handling all back-end initiated
+ * redirects. It receives the redirect, and then broadcasts it to all
+ * stages, indicating the sequence number they should squash until,
+ * and any necessary branch misprediction information as well. It
+ * priortizes redirects by instruction's age, only broadcasting a
+ * redirect if it corresponds to an instruction that should currently
+ * be in the ROB. This is done by tracking the sequence number of the
+ * youngest instruction in the ROB, which gets updated to any
+ * squashing instruction's sequence number, and only broadcasting a
+ * redirect if it corresponds to an older instruction. Commit also
+ * supports multiple cycle squashing, to model a ROB that can only
+ * remove a certain number of instructions per cycle. Eventually traps
+ * and interrupts will most likely be handled here as well.
*/
template<class Impl>
class DefaultCommit
typedef typename CPUPol::IEWStruct IEWStruct;
typedef typename CPUPol::RenameStruct RenameStruct;
+ typedef typename CPUPol::Fetch Fetch;
typedef typename CPUPol::IEW IEW;
typedef O3ThreadState<Impl> Thread;
/** Sets the pointer to the queue coming from IEW. */
void setIEWQueue(TimeBuffer<IEWStruct> *iq_ptr);
+ void setFetchStage(Fetch *fetch_stage);
+
+ Fetch *fetchStage;
+
/** Sets the poitner to the IEW stage. */
void setIEWStage(IEW *iew_stage);
- /** The pointer to the IEW stage. Used solely to ensure that syscalls do
- * not execute until all stores have written back.
+ /** The pointer to the IEW stage. Used solely to ensure that
+ * various events (traps, interrupts, syscalls) do not occur until
+ * all stores have written back.
*/
IEW *iewStage;
void switchOut();
+ void doSwitchOut();
+
void takeOverFrom();
/** Ticks the commit stage, which tries to commit instructions. */
*/
bool changedROBEntries();
+ void squashAll(unsigned tid);
+
void squashFromTrap(unsigned tid);
void squashFromXC(unsigned tid);
- void squashInFlightInsts(unsigned tid);
-
- private:
/** Commits as many instructions as possible. */
void commitInsts();
int oldestReady();
public:
- /** Returns the PC of the head instruction of the ROB. */
- uint64_t readPC();
+ /** Returns the PC of the head instruction of the ROB.
+ * @todo: Probably remove this function as it returns only thread 0.
+ */
+ uint64_t readPC() { return PC[0]; }
uint64_t readPC(unsigned tid) { return PC[tid]; }
void setNextPC(uint64_t val, unsigned tid) { nextPC[tid] = val; }
- /** Sets that the ROB is currently squashing. */
- void setSquashing(unsigned tid);
-
private:
/** Time buffer interface. */
TimeBuffer<TimeStruct> *timeBuffer;
std::vector<Thread *> thread;
- private:
Fault fetchFault;
- InstSeqNum fetchFaultSN;
+
int fetchTrapWait;
+
/** Records that commit has written to the time buffer this cycle. Used for
* the CPU to determine if it can deschedule itself if there is no activity.
*/
/** Number of Active Threads */
unsigned numThreads;
+ bool switchPending;
bool switchedOut;
Tick trapLatency;
Tick fetchTrapLatency;
+
Tick fetchFaultTick;
Addr PC[Impl::MaxThreads];
* speculative instruction reaching the head of the ROB.
*/
Stats::Scalar<> commitNonSpecStalls;
- /** Stat for the total number of committed branches. */
-// Stats::Scalar<> commitCommittedBranches;
- /** Stat for the total number of committed loads. */
-// Stats::Scalar<> commitCommittedLoads;
- /** Stat for the total number of committed memory references. */
-// Stats::Scalar<> commitCommittedMemRefs;
/** Stat for the total number of branch mispredicts that caused a squash. */
Stats::Scalar<> branchMispredicts;
/** Distribution of the number of committed instructions each cycle. */
Stats::Distribution<> numCommittedDist;
- // total number of instructions committed
- Stats::Vector<> stat_com_inst;
- Stats::Vector<> stat_com_swp;
- Stats::Vector<> stat_com_refs;
- Stats::Vector<> stat_com_loads;
- Stats::Vector<> stat_com_membars;
- Stats::Vector<> stat_com_branches;
-
- Stats::Scalar<> commit_eligible_samples;
- Stats::Vector<> commit_eligible;
+ /** Total number of instructions committed. */
+ Stats::Vector<> statComInst;
+ /** Total number of software prefetches committed. */
+ Stats::Vector<> statComSwp;
+ /** Stat for the total number of committed memory references. */
+ Stats::Vector<> statComRefs;
+ /** Stat for the total number of committed loads. */
+ Stats::Vector<> statComLoads;
+ /** Total number of committed memory barriers. */
+ Stats::Vector<> statComMembars;
+ /** Total number of committed branches. */
+ Stats::Vector<> statComBranches;
+
+ Stats::Scalar<> commitEligibleSamples;
+ Stats::Vector<> commitEligible;
};
#endif // __CPU_O3_COMMIT_HH__
/*
- * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * Copyright (c) 2004-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "base/loader/symtab.hh"
#include "base/timebuf.hh"
+#include "cpu/checker/cpu.hh"
#include "cpu/exetrace.hh"
#include "cpu/o3/commit.hh"
#include "cpu/o3/thread_state.hh"
void
DefaultCommit<Impl>::TrapEvent::process()
{
- // This will get reset if it was switched out.
+ // This will get reset by commit if it was switched out at the
+ // time of this event processing.
commit->trapSquash[tid] = true;
}
iewWidth(params->executeWidth),
commitWidth(params->commitWidth),
numThreads(params->numberOfThreads),
- switchedOut(false)
+ switchedOut(false),
+ trapLatency(params->trapLatency),
+ fetchTrapLatency(params->fetchTrapLatency)
{
_status = Active;
_nextStatus = Inactive;
xcSquash[i] = false;
}
- // Hardcoded trap latency.
- trapLatency = 6;
- fetchTrapLatency = 12;
fetchFaultTick = 0;
fetchTrapWait = 0;
}
.desc("The number of times commit has been forced to stall to "
"communicate backwards")
.prereq(commitNonSpecStalls);
-/*
- commitCommittedBranches
- .name(name() + ".commitCommittedBranches")
- .desc("The number of committed branches")
- .prereq(commitCommittedBranches);
- commitCommittedLoads
- .name(name() + ".commitCommittedLoads")
- .desc("The number of committed loads")
- .prereq(commitCommittedLoads);
- commitCommittedMemRefs
- .name(name() + ".commitCommittedMemRefs")
- .desc("The number of committed memory references")
- .prereq(commitCommittedMemRefs);
-*/
branchMispredicts
.name(name() + ".branchMispredicts")
.desc("The number of times a branch was mispredicted")
.flags(Stats::pdf)
;
- stat_com_inst
+ statComInst
.init(cpu->number_of_threads)
.name(name() + ".COM:count")
.desc("Number of instructions committed")
.flags(total)
;
- stat_com_swp
+ statComSwp
.init(cpu->number_of_threads)
.name(name() + ".COM:swp_count")
.desc("Number of s/w prefetches committed")
.flags(total)
;
- stat_com_refs
+ statComRefs
.init(cpu->number_of_threads)
.name(name() + ".COM:refs")
.desc("Number of memory references committed")
.flags(total)
;
- stat_com_loads
+ statComLoads
.init(cpu->number_of_threads)
.name(name() + ".COM:loads")
.desc("Number of loads committed")
.flags(total)
;
- stat_com_membars
+ statComMembars
.init(cpu->number_of_threads)
.name(name() + ".COM:membars")
.desc("Number of memory barriers committed")
.flags(total)
;
- stat_com_branches
+ statComBranches
.init(cpu->number_of_threads)
.name(name() + ".COM:branches")
.desc("Number of branches committed")
// -> The standard deviation is computed only over cycles where
// we reached the BW limit
//
- commit_eligible
+ commitEligible
.init(cpu->number_of_threads)
.name(name() + ".COM:bw_limited")
.desc("number of insts not committed due to BW limits")
.flags(total)
;
- commit_eligible_samples
+ commitEligibleSamples
.name(name() + ".COM:bw_lim_events")
.desc("number cycles where commit BW limit reached")
;
// the simulation, so it starts as active.
cpu->activateStage(FullCPU::CommitIdx);
- trapLatency = cpu->cycles(6);
- fetchTrapLatency = cpu->cycles(12);
+ trapLatency = cpu->cycles(trapLatency);
+ fetchTrapLatency = cpu->cycles(fetchTrapLatency);
}
template <class Impl>
fromIEW = iewQueue->getWire(-iewToCommitDelay);
}
+template <class Impl>
+void
+DefaultCommit<Impl>::setFetchStage(Fetch *fetch_stage)
+{
+ fetchStage = fetch_stage;
+}
+
template <class Impl>
void
DefaultCommit<Impl>::setIEWStage(IEW *iew_stage)
void
DefaultCommit<Impl>::switchOut()
{
+ switchPending = true;
+}
+
+template <class Impl>
+void
+DefaultCommit<Impl>::doSwitchOut()
+{
+ switchedOut = true;
+ switchPending = false;
rob->switchOut();
}
void
DefaultCommit<Impl>::takeOverFrom()
{
+ switchedOut = false;
_status = Active;
_nextStatus = Inactive;
for (int i=0; i < numThreads; i++) {
void
DefaultCommit<Impl>::updateStatus()
{
- if (commitStatus[0] == TrapPending ||
- commitStatus[0] == FetchTrapPending) {
- _nextStatus = Active;
+ // reset ROB changed variable
+ list<unsigned>::iterator threads = (*activeThreads).begin();
+ while (threads != (*activeThreads).end()) {
+ unsigned tid = *threads++;
+ changedROBNumEntries[tid] = false;
+
+ // Also check if any of the threads has a trap pending
+ if (commitStatus[tid] == TrapPending ||
+ commitStatus[tid] == FetchTrapPending) {
+ _nextStatus = Active;
+ }
}
if (_nextStatus == Inactive && _status == Active) {
}
_status = _nextStatus;
-
- // reset ROB changed variable
- list<unsigned>::iterator threads = (*activeThreads).begin();
- while (threads != (*activeThreads).end()) {
- unsigned tid = *threads++;
- changedROBNumEntries[tid] = false;
- }
}
template <class Impl>
template <class Impl>
void
-DefaultCommit<Impl>::squashFromTrap(unsigned tid)
+DefaultCommit<Impl>::squashAll(unsigned tid)
{
// If we want to include the squashing instruction in the squash,
// then use one older sequence number.
// Hopefully this doesn't mess things up. Basically I want to squash
// all instructions of this thread.
InstSeqNum squashed_inst = rob->isEmpty() ?
- 0 : rob->readHeadInst(tid)->seqNum - 1;
+ 0 : rob->readHeadInst(tid)->seqNum - 1;;
// All younger instructions will be squashed. Set the sequence
// number as the youngest instruction in the ROB (0 in this case.
toIEW->commitInfo[tid].branchMispredict = false;
-// toIEW->commitInfo[tid].branchTaken = fromIEW->branchTaken[tid];
-
toIEW->commitInfo[tid].nextPC = PC[tid];
+}
+
+template <class Impl>
+void
+DefaultCommit<Impl>::squashFromTrap(unsigned tid)
+{
+ squashAll(tid);
DPRINTF(Commit, "Squashing from trap, restarting at PC %#x\n", PC[tid]);
- // Hopefully nobody tries to use the mispredPC becuase I said there
- // wasn't a branch mispredict.
-// toIEW->commitInfo[tid].mispredPC = fromIEW->mispredPC[tid];
thread[tid]->trapPending = false;
thread[tid]->inSyscall = false;
trapSquash[tid] = false;
- // Not sure what to set this to...
commitStatus[tid] = ROBSquashing;
cpu->activityThisCycle();
void
DefaultCommit<Impl>::squashFromXC(unsigned tid)
{
- // For now these are identical. In the future, the squash from trap
- // might execute the trap prior to the squash.
-
- // If we want to include the squashing instruction in the squash,
- // then use one older sequence number.
- // Hopefully this doesn't mess things up. Basically I want to squash
- // all instructions of this thread.
- InstSeqNum squashed_inst = rob->isEmpty() ?
- 0 : rob->readHeadInst(tid)->seqNum - 1;;
-
- // All younger instructions will be squashed. Set the sequence
- // number as the youngest instruction in the ROB (0 in this case.
- // Hopefully nothing breaks.)
- youngestSeqNum[tid] = 0;
-
- rob->squash(squashed_inst, tid);
- changedROBNumEntries[tid] = true;
-
- // Send back the sequence number of the squashed instruction.
- toIEW->commitInfo[tid].doneSeqNum = squashed_inst;
-
- // Send back the squash signal to tell stages that they should
- // squash.
- toIEW->commitInfo[tid].squash = true;
-
- // Send back the rob squashing signal so other stages know that
- // the ROB is in the process of squashing.
- toIEW->commitInfo[tid].robSquashing = true;
-
- toIEW->commitInfo[tid].branchMispredict = false;
-
-// toIEW->commitInfo[tid].branchTaken = fromIEW->branchTaken[tid];
-
- toIEW->commitInfo[tid].nextPC = PC[tid];
+ squashAll(tid);
DPRINTF(Commit, "Squashing from XC, restarting at PC %#x\n", PC[tid]);
- // Hopefully nobody tries to use the mispredPC becuase I said there
- // wasn't a branch mispredict.
-// toIEW->commitInfo[tid].mispredPC = fromIEW->mispredPC[tid];
thread[tid]->inSyscall = false;
assert(!thread[tid]->trapPending);
- // Not sure what to set this to...
+
commitStatus[tid] = ROBSquashing;
cpu->activityThisCycle();
++squashCounter;
}
-template <class Impl>
-void
-DefaultCommit<Impl>::squashInFlightInsts(unsigned tid)
-{
- // @todo: Fix this hardcoded number.
- for (int i = 0; i < -5; ++i) {
- for (int j = 0; j < (*iewQueue)[i].size; ++j) {
- DynInstPtr inst = (*iewQueue)[i].insts[j];
- if (inst->threadNumber == tid &&
- !inst->isSquashed()) {
- inst->setSquashed();
- }
- }
- }
-}
-
template <class Impl>
void
DefaultCommit<Impl>::tick()
wroteToTimeBuffer = false;
_nextStatus = Inactive;
- // If the ROB is currently in its squash sequence, then continue
- // to squash. In this case, commit does not do anything. Otherwise
- // run commit.
+ if (switchPending && rob->isEmpty() && !iewStage->hasStoresToWB()) {
+ cpu->signalSwitched();
+ return;
+ }
+
list<unsigned>::iterator threads = (*activeThreads).begin();
- // Maybe this should be dependent upon any of the commits actually
- // squashing.
+ // Check if any of the threads are done squashing. Change the
+ // status if they are done.
while (threads != (*activeThreads).end()) {
unsigned tid = *threads++;
if (wroteToTimeBuffer) {
- DPRINTF(Activity,"Activity This Cycle.\n");
+ DPRINTF(Activity, "Activity This Cycle.\n");
cpu->activityThisCycle();
}
// Check for interrupts
//////////////////////////////////////
- // Process interrupts if interrupts are enabled and not in PAL mode.
- // Take the PC from commit and write it to the IPR, then squash. The
- // interrupt completing will take care of restoring the PC from that value
- // in the IPR. Look at IPR[EXC_ADDR];
- // hwrei() is what resets the PC to the place where instruction execution
- // beings again.
#if FULL_SYSTEM
-//#if 0
+ // Process interrupts if interrupts are enabled, not in PAL mode,
+ // and no other traps or external squashes are currently pending.
+ // @todo: Allow other threads to handle interrupts.
if (cpu->checkInterrupts &&
cpu->check_interrupts() &&
!cpu->inPalMode(readPC()) &&
!trapSquash[0] &&
!xcSquash[0]) {
-// commitStatus[0] = TrapPending;
+ // Tell fetch that there is an interrupt pending. This will
+ // make fetch wait until it sees a non PAL-mode PC, at which
+ // point it stops fetching instructions.
toIEW->commitInfo[0].interruptPending = true;
- if (rob->isEmpty() && !iewStage->hasStoresToWB()) {
- // Will need to squash all instructions currently in flight and have
- // the interrupt handler restart at the last non-committed inst.
- // Most of that can be handled through the trap() function. The
- // processInterrupts() function really just checks for interrupts
- // and then calls trap() if there is an interrupt present.
+ // Wait until the ROB is empty and all stores have drained in
+ // order to enter the interrupt.
+ if (rob->isEmpty() && !iewStage->hasStoresToWB()) {
// Not sure which thread should be the one to interrupt. For now
// always do thread 0.
assert(!thread[0]->inSyscall);
#endif // FULL_SYSTEM
////////////////////////////////////
- // Check for squash signal, handle that first
+ // Check for any possible squashes, handle them first
////////////////////////////////////
- // Check if the IEW stage is telling the ROB to squash.
list<unsigned>::iterator threads = (*activeThreads).begin();
while (threads != (*activeThreads).end()) {
unsigned tid = *threads++;
if (fromFetch->fetchFault && commitStatus[0] != TrapPending) {
- // Record the fault. Wait until it's empty in the ROB. Then handle the trap.
- // Ignore it if there's already a trap pending as fetch will be redirected.
+ // Record the fault. Wait until it's empty in the ROB.
+ // Then handle the trap. Ignore it if there's already a
+ // trap pending as fetch will be redirected.
fetchFault = fromFetch->fetchFault;
- fetchFaultSN = fromFetch->fetchFaultSN;
fetchFaultTick = curTick + fetchTrapLatency;
commitStatus[0] = FetchTrapPending;
DPRINTF(Commit, "Fault from fetch recorded. Will trap if the "
"ROB empties without squashing the fault.\n");
fetchTrapWait = 0;
}
+
+ // Fetch may tell commit to clear the trap if it's been squashed.
if (fromFetch->clearFetchFault) {
DPRINTF(Commit, "Received clear fetch fault signal\n");
fetchTrapWait = 0;
commitStatus[tid] != TrapPending &&
fromIEW->squashedSeqNum[tid] <= youngestSeqNum[tid]) {
- DPRINTF(Commit, "[tid:%u]: Squashing instructions in the "
- "ROB.\n",
- tid);
-
DPRINTF(Commit, "[tid:%i]: Squashing due to PC %#x [sn:%i]\n",
tid,
fromIEW->mispredPC[tid],
rob->squash(squashed_inst, tid);
changedROBNumEntries[tid] = true;
- // Send back the sequence number of the squashed instruction.
toIEW->commitInfo[tid].doneSeqNum = squashed_inst;
- // Send back the squash signal to tell stages that they should
- // squash.
toIEW->commitInfo[tid].squash = true;
// Send back the rob squashing signal so other stages know that
toIEW->commitInfo[tid].nextPC = fromIEW->nextPC[tid];
- DPRINTF(Commit, "Squashing from IEW, restarting at PC %#x\n",
- fromIEW->nextPC[tid]);
-
- toIEW->commitInfo[tid].mispredPC =
- fromIEW->mispredPC[tid];
+ toIEW->commitInfo[tid].mispredPC = fromIEW->mispredPC[tid];
if (toIEW->commitInfo[tid].branchMispredict) {
++branchMispredicts;
{
////////////////////////////////////
// Handle commit
- // Note that commit will be handled prior to the ROB so that the ROB
- // only tries to commit instructions it has in this current cycle, and
- // not instructions it is writing in during this cycle.
- // Can't commit and squash things at the same time...
+ // Note that commit will be handled prior to putting new
+ // instructions in the ROB so that the ROB only tries to commit
+ // instructions it has in this current cycle, and not instructions
+ // it is writing in during this cycle. Can't commit and squash
+ // things at the same time...
////////////////////////////////////
DPRINTF(Commit, "Trying to commit instructions in the ROB.\n");
DynInstPtr head_inst;
#if FULL_SYSTEM
- if (commitStatus[0] == FetchTrapPending) {
+ // Not the best way to check if the front end is empty, but it should
+ // work.
+ // @todo: Try to avoid directly accessing fetch.
+ if (commitStatus[0] == FetchTrapPending && rob->isEmpty()) {
DPRINTF(Commit, "Fault from fetch is pending.\n");
- if (rob->isEmpty()) {
- fetchTrapWait++;
- if (fetchTrapWait > 10000000) {
- panic("Fetch trap has been pending for a long time!");
- }
- if (fetchFaultTick > curTick) {
- DPRINTF(Commit, "Not enough cycles since fault, fault will "
- "happen on %lli\n",
- fetchFaultTick);
- cpu->activityThisCycle();
- return;
- } else if (iewStage->hasStoresToWB()) {
- DPRINTF(Commit, "IEW still has stores to WB. Waiting until "
- "they are completed. fetchTrapWait:%i\n",
- fetchTrapWait);
- cpu->activityThisCycle();
- return;
- } else if (cpu->inPalMode(readPC())) {
- DPRINTF(Commit, "In pal mode right now. fetchTrapWait:%i\n",
- fetchTrapWait);
- return;
- }
- fetchTrapWait = 0;
- DPRINTF(Commit, "ROB is empty, handling fetch trap.\n");
- assert(!thread[0]->inSyscall);
+ fetchTrapWait++;
+ if (fetchTrapWait > 10000000) {
+ panic("Fetch trap has been pending for a long time!");
+ }
+ if (fetchFaultTick > curTick) {
+ DPRINTF(Commit, "Not enough cycles since fault, fault will "
+ "happen on %lli\n",
+ fetchFaultTick);
+ cpu->activityThisCycle();
+ return;
+ } else if (iewStage->hasStoresToWB()) {
+ DPRINTF(Commit, "IEW still has stores to WB. Waiting until "
+ "they are completed. fetchTrapWait:%i\n",
+ fetchTrapWait);
+ cpu->activityThisCycle();
+ return;
+ } else if (cpu->inPalMode(readPC())) {
+ DPRINTF(Commit, "In pal mode right now. fetchTrapWait:%i\n",
+ fetchTrapWait);
+ return;
+ } else if (fetchStage->getYoungestSN() > youngestSeqNum[0]) {
+ DPRINTF(Commit, "Waiting for front end to drain. fetchTrapWait:%i\n",
+ fetchTrapWait);
+ return;
+ }
+ fetchTrapWait = 0;
+ DPRINTF(Commit, "ROB is empty, handling fetch trap.\n");
- thread[0]->inSyscall = true;
+ assert(!thread[0]->inSyscall);
- // Consider holding onto the trap and waiting until the trap event
- // happens for this to be executed.
- cpu->trap(fetchFault, 0);
+ thread[0]->inSyscall = true;
- // Exit state update mode to avoid accidental updating.
- thread[0]->inSyscall = false;
+ // Consider holding onto the trap and waiting until the trap event
+ // happens for this to be executed.
+ cpu->trap(fetchFault, 0);
- commitStatus[0] = TrapPending;
- // Set it up so that we squash next cycle
- trapSquash[0] = true;
- return;
- }
+ // Exit state update mode to avoid accidental updating.
+ thread[0]->inSyscall = false;
+
+ commitStatus[0] = TrapPending;
+ // Set it up so that we squash next cycle
+ trapSquash[0] = true;
+ return;
}
#endif
+
// Commit as many instructions as possible until the commit bandwidth
// limit is reached, or it becomes impossible to commit any more.
while (num_committed < commitWidth) {
DPRINTF(Commit, "Trying to commit head instruction, [sn:%i] [tid:%i]\n",
head_inst->seqNum, tid);
- // If the head instruction is squashed, it is ready to retire at any
- // time. However, we need to avoid updating any other state
- // incorrectly if it's already been squashed.
+ // If the head instruction is squashed, it is ready to retire
+ // (be removed from the ROB) at any time.
if (head_inst->isSquashed()) {
DPRINTF(Commit, "Retiring squashed instruction from "
"ROB.\n");
- // Tell ROB to retire head instruction. This retires the head
- // inst in the ROB without affecting any other stages.
rob->retireHead(commit_thread);
++commitSquashedInsts;
if (commit_success) {
++num_committed;
- // Record that the number of ROB entries has changed.
changedROBNumEntries[tid] = true;
// Set the doneSeqNum to the youngest committed instruction.
int count = 0;
Addr oldpc;
do {
+ // Debug statement. Checks to make sure we're not
+ // currently updating state while handling PC events.
if (count == 0)
- assert(!thread[tid]->inSyscall && !thread[tid]->trapPending);
+ assert(!thread[tid]->inSyscall &&
+ !thread[tid]->trapPending);
oldpc = PC[tid];
cpu->system->pcEventQueue.service(
thread[tid]->getXCProxy());
numCommittedDist.sample(num_committed);
if (num_committed == commitWidth) {
- commit_eligible[0]++;
+ commitEligible[0]++;
}
}
bool
DefaultCommit<Impl>::commitHead(DynInstPtr &head_inst, unsigned inst_num)
{
- // Make sure instruction is valid
assert(head_inst);
int tid = head_inst->threadNumber;
- // If the instruction is not executed yet, then it is a non-speculative
- // or store inst. Signal backwards that it should be executed.
+ // If the instruction is not executed yet, then it will need extra
+ // handling. Signal backwards that it should be executed.
if (!head_inst->isExecuted()) {
// Keep this number correct. We have not yet actually executed
// and committed this instruction.
if (head_inst->isNonSpeculative() ||
head_inst->isMemBarrier() ||
head_inst->isWriteBarrier()) {
+
+ DPRINTF(Commit, "Encountered a barrier or non-speculative "
+ "instruction [sn:%lli] at the head of the ROB, PC %#x.\n",
+ head_inst->seqNum, head_inst->readPC());
+
#if !FULL_SYSTEM
- // Hack to make sure syscalls aren't executed until all stores
- // write back their data. This direct communication shouldn't
- // be used for anything other than this.
+ // Hack to make sure syscalls/memory barriers/quiesces
+ // aren't executed until all stores write back their data.
+ // This direct communication shouldn't be used for
+ // anything other than this.
if (inst_num > 0 || iewStage->hasStoresToWB())
#else
if ((head_inst->isMemBarrier() || head_inst->isWriteBarrier() ||
return false;
}
- DPRINTF(Commit, "Encountered a barrier or non-speculative "
- "instruction [sn:%lli] at the head of the ROB, PC %#x.\n",
- head_inst->seqNum, head_inst->readPC());
-
- // Send back the non-speculative instruction's sequence number.
toIEW->commitInfo[tid].nonSpecSeqNum = head_inst->seqNum;
// Change the instruction so it won't try to commit again until
head_inst->seqNum, head_inst->readPC());
// Send back the non-speculative instruction's sequence
- // number. Maybe just tell the lsq to re-execute the load.
+ // number. Tell the lsq to re-execute the load.
toIEW->commitInfo[tid].nonSpecSeqNum = head_inst->seqNum;
toIEW->commitInfo[tid].uncached = true;
toIEW->commitInfo[tid].uncachedLoad = head_inst;
}
}
- // Now check if it's one of the special trap or barrier or
- // serializing instructions.
- if (head_inst->isThreadSync())/* ||
-// head_inst->isMemBarrier() ||
-head_inst->isWriteBarrier())*/
- {
+ if (head_inst->isThreadSync()) {
// Not handled for now.
- panic("Barrier instructions are not handled yet.\n");
+ panic("Thread sync instructions are not handled yet.\n");
}
+ // Stores mark themselves as completed.
if (!head_inst->isStore()) {
head_inst->setCompleted();
}
+ // Use checker prior to updating anything due to traps or PC
+ // based events.
+ if (cpu->checker) {
+ cpu->checker->tick(head_inst);
+ }
+
// Check if the instruction caused a fault. If so, trap.
Fault inst_fault = head_inst->getFault();
if (inst_fault != NoFault) {
- if (!head_inst->isNop()) {
+ head_inst->setCompleted();
#if FULL_SYSTEM
- DPRINTF(Commit, "Inst [sn:%lli] PC %#x has a fault\n",
- head_inst->seqNum, head_inst->readPC());
+ DPRINTF(Commit, "Inst [sn:%lli] PC %#x has a fault\n",
+ head_inst->seqNum, head_inst->readPC());
- if (iewStage->hasStoresToWB()) {
- DPRINTF(Commit, "Stores outstanding, fault must wait.\n");
- return false;
- }
+ if (iewStage->hasStoresToWB() || inst_num > 0) {
+ DPRINTF(Commit, "Stores outstanding, fault must wait.\n");
+ return false;
+ }
- assert(!thread[tid]->inSyscall);
+ if (cpu->checker && head_inst->isStore()) {
+ cpu->checker->tick(head_inst);
+ }
- thread[tid]->inSyscall = true;
+ assert(!thread[tid]->inSyscall);
- // Hack for now; DTB will sometimes need the machine instruction
- // for when faults happen. So we will set it here, prior to the
- // DTB possibly needing it for this translation.
- thread[tid]->setInst(
- static_cast<TheISA::MachInst>(head_inst->staticInst->machInst));
+ // Mark that we're in state update mode so that the trap's
+ // execution doesn't generate extra squashes.
+ thread[tid]->inSyscall = true;
- // Consider holding onto the trap and waiting until the trap event
- // happens for this to be executed.
- cpu->trap(inst_fault, tid);
+ // DTB will sometimes need the machine instruction for when
+ // faults happen. So we will set it here, prior to the DTB
+ // possibly needing it for its fault.
+ thread[tid]->setInst(
+ static_cast<TheISA::MachInst>(head_inst->staticInst->machInst));
- // Exit state update mode to avoid accidental updating.
- thread[tid]->inSyscall = false;
+ // Execute the trap. Although it's slightly unrealistic in
+ // terms of timing (as it doesn't wait for the full timing of
+ // the trap event to complete before updating state), it's
+ // needed to update the state as soon as possible. This
+ // prevents external agents from changing any specific state
+ // that the trap need.
+ cpu->trap(inst_fault, tid);
- commitStatus[tid] = TrapPending;
+ // Exit state update mode to avoid accidental updating.
+ thread[tid]->inSyscall = false;
- // Generate trap squash event.
- generateTrapEvent(tid);
+ commitStatus[tid] = TrapPending;
- return false;
-#else // !FULL_SYSTEM
- panic("fault (%d) detected @ PC %08p", inst_fault,
- head_inst->PC);
-#endif // FULL_SYSTEM
- }
- }
+ // Generate trap squash event.
+ generateTrapEvent(tid);
- // Check if we're really ready to commit. If not then return false.
- // I'm pretty sure all instructions should be able to commit if they've
- // reached this far. For now leave this in as a check.
- if (!rob->isHeadReady(tid)) {
- panic("Unable to commit head instruction!\n");
return false;
+#else // !FULL_SYSTEM
+ panic("fault (%d) detected @ PC %08p", inst_fault,
+ head_inst->PC);
+#endif // FULL_SYSTEM
}
updateComInstStats(head_inst);
- // Now that the instruction is going to be committed, finalize its
- // trace data.
if (head_inst->traceData) {
head_inst->traceData->setFetchSeq(head_inst->seqNum);
head_inst->traceData->setCPSeq(thread[tid]->numInst);
void
DefaultCommit<Impl>::getInsts()
{
- //////////////////////////////////////
- // Handle ROB functions
- //////////////////////////////////////
-
- // Read any renamed instructions and place them into the ROB. Do this
- // prior to squashing to avoid having instructions in the ROB that
- // don't get squashed properly.
+ // Read any renamed instructions and place them into the ROB.
int insts_to_process = min((int)renameWidth, fromRename->size);
for (int inst_num = 0; inst_num < insts_to_process; ++inst_num)
++inst_num)
{
if (!fromIEW->insts[inst_num]->isSquashed()) {
- DPRINTF(Commit, "[tid:%i]: Marking PC %#x, SN %i ready within ROB.\n",
+ DPRINTF(Commit, "[tid:%i]: Marking PC %#x, [sn:%lli] ready "
+ "within ROB.\n",
fromIEW->insts[inst_num]->threadNumber,
fromIEW->insts[inst_num]->readPC(),
fromIEW->insts[inst_num]->seqNum);
}
}
-template <class Impl>
-uint64_t
-DefaultCommit<Impl>::readPC()
-{
- // @todo: Fix this single thread hack.
- return PC[0];
-}
-
-template <class Impl>
-void
-DefaultCommit<Impl>::setSquashing(unsigned tid)
-{
- if (_status == Inactive) {
- DPRINTF(Activity, "Activating stage.\n");
- _status = Active;
- cpu->activateStage(FullCPU::CommitIdx);
- }
-
- if (commitStatus[tid] != ROBSquashing) {
- commitStatus[tid] = ROBSquashing;
- ++squashCounter;
- }
-}
-
template <class Impl>
bool
DefaultCommit<Impl>::robDoneSquashing()
//
#ifdef TARGET_ALPHA
if (inst->isDataPrefetch()) {
- stat_com_swp[thread]++;
+ statComSwp[thread]++;
} else {
- stat_com_inst[thread]++;
+ statComInst[thread]++;
}
#else
- stat_com_inst[thread]++;
+ statComInst[thread]++;
#endif
//
// Control Instructions
//
if (inst->isControl())
- stat_com_branches[thread]++;
+ statComBranches[thread]++;
//
// Memory references
//
if (inst->isMemRef()) {
- stat_com_refs[thread]++;
+ statComRefs[thread]++;
if (inst->isLoad()) {
- stat_com_loads[thread]++;
+ statComLoads[thread]++;
}
}
if (inst->isMemBarrier()) {
- stat_com_membars[thread]++;
+ statComMembars[thread]++;
}
}
////////////////////////////////////////
// //
-// SMT COMMIT POLICY MAITAINED HERE //
+// SMT COMMIT POLICY MAINTAINED HERE //
// //
////////////////////////////////////////
template <class Impl>
#endif
#include "sim/root.hh"
+#include "cpu/checker/cpu.hh"
#include "cpu/cpu_exec_context.hh"
#include "cpu/exec_context.hh"
#include "cpu/o3/alpha_dyn_inst.hh"
return "FullO3CPU tick event";
}
-//Call constructor to all the pipeline stages here
template <class Impl>
FullO3CPU<Impl>::FullO3CPU(Params *params)
: BaseFullCPU(params),
// pTable(params->pTable),
mem(params->workload[0]->getMemory()),
#endif // FULL_SYSTEM
-
+ switchCount(0),
icacheInterface(params->icacheInterface),
dcacheInterface(params->dcacheInterface),
- deferRegistration(params->deferRegistration)
+ deferRegistration(params->deferRegistration),
+ numThreads(number_of_threads)
{
_status = Idle;
+ if (params->checker) {
+ BaseCPU *temp_checker = params->checker;
+ checker = dynamic_cast<Checker<DynInstPtr> *>(temp_checker);
+ checker->setMemory(mem);
+#if FULL_SYSTEM
+ checker->setSystem(params->system);
+#endif
+ } else {
+ checker = NULL;
+ }
+
#if !FULL_SYSTEM
thread.resize(number_of_threads);
tids.resize(number_of_threads);
commit.setIEWQueue(&iewQueue);
commit.setRenameQueue(&renameQueue);
+ commit.setFetchStage(&fetch);
commit.setIEWStage(&iew);
rename.setIEWStage(&iew);
rename.setCommitStage(&commit);
- //Make Sure That this a Valid Architeture
- //@todo: move this up in constructor
- numThreads = number_of_threads;
-
#if !FULL_SYSTEM
int active_threads = params->workload.size();
#else
int active_threads = 1;
#endif
+ //Make Sure That this a Valid Architeture
assert(params->numPhysIntRegs >= numThreads * TheISA::NumIntRegs);
assert(params->numPhysFloatRegs >= numThreads * TheISA::NumFloatRegs);
cleanUpRemovedInsts();
}
- if (activityCount && !tickEvent.scheduled()) {
+ if (_status != SwitchedOut && activityCount && !tickEvent.scheduled()) {
tickEvent.schedule(curTick + cycles(1));
}
for (int i = 0; i < number_of_threads; ++i)
thread[i]->inSyscall = true;
-
- // Need to do a copy of the xc->regs into the CPU's regfile so
- // that it can start properly.
-
for (int tid=0; tid < number_of_threads; tid++) {
- // Need to do a copy of the xc->regs into the CPU's regfile so
- // that it can start properly.
#if FULL_SYSTEM
ExecContext *src_xc = execContexts[tid];
#else
for (int i = 0; i < number_of_threads; ++i)
thread[i]->inSyscall = false;
- // Probably should just make a call to all the stages to init stage,
- // regardless of whether or not they need it. Keeps it more independent.
+ // Initialize stages.
fetch.initStage();
iew.initStage();
rename.initStage();
void
FullO3CPU<Impl>::activateContext(int tid, int delay)
{
-
// Needs to set each stage to running as well.
list<unsigned>::iterator isActive = find(
activeThreads.begin(), activeThreads.end(), tid);
template <class Impl>
void
-FullO3CPU<Impl>::switchOut(Sampler *sampler)
+FullO3CPU<Impl>::switchOut(Sampler *_sampler)
{
-// panic("FullO3CPU does not have a switch out function.\n");
+ sampler = _sampler;
+ switchCount = 0;
fetch.switchOut();
decode.switchOut();
rename.switchOut();
iew.switchOut();
commit.switchOut();
+}
- instList.clear();
- while (!removeList.empty()) {
- removeList.pop();
- }
+template <class Impl>
+void
+FullO3CPU<Impl>::signalSwitched()
+{
+ if (++switchCount == 5) {
+ fetch.doSwitchOut();
+ rename.doSwitchOut();
+ commit.doSwitchOut();
+ instList.clear();
+ while (!removeList.empty()) {
+ removeList.pop();
+ }
- if (tickEvent.scheduled())
- tickEvent.squash();
- sampler->signalSwitched();
- _status = SwitchedOut;
+ if (checker)
+ checker->switchOut(sampler);
+
+ if (tickEvent.scheduled())
+ tickEvent.squash();
+ sampler->signalSwitched();
+ _status = SwitchedOut;
+ }
+ assert(switchCount <= 5);
}
template <class Impl>
void
FullO3CPU<Impl>::takeOverFrom(BaseCPU *oldCPU)
{
+ // Flush out any old data from the activity buffers.
for (int i = 0; i < 6; ++i) {
timeBuffer.advance();
fetchQueue.advance();
tickEvent.schedule(curTick);
}
-template <class Impl>
-InstSeqNum
-FullO3CPU<Impl>::getAndIncrementInstSeq()
-{
- return globalSeqNum++;
-}
-
template <class Impl>
uint64_t
FullO3CPU<Impl>::readIntReg(int reg_idx)
while (inst_it != end_it) {
assert(!instList.empty());
- bool break_loop = (inst_it == instList.begin());
-
squashInstIt(inst_it, tid);
inst_it--;
-
- if (break_loop)
- break;
}
// If the ROB was empty, then we actually need to remove the first
inst_list_it++;
++num;
}
-
-
}
template <class Impl>
#include "cpu/o3/thread_state.hh"
#include "sim/process.hh"
+template <class>
+class Checker;
class ExecContext;
class MemInterface;
class Process;
*/
void switchOut(Sampler *sampler);
+ void signalSwitched();
+
/** Takes over from another CPU.
* @todo: Implement this.
*/
void takeOverFrom(BaseCPU *oldCPU);
/** Get the current instruction sequence number, and increment it. */
- InstSeqNum getAndIncrementInstSeq();
+ InstSeqNum getAndIncrementInstSeq()
+ { return globalSeqNum++; }
#if FULL_SYSTEM
/** Check if this address is a valid instruction address. */
*/
std::queue<ListIt> removeList;
-#ifdef DEBUG
+//#ifdef DEBUG
std::set<InstSeqNum> snList;
-#endif
+//#endif
/** Records if instructions need to be removed this cycle due to being
* retired or squashed.
/** The global sequence number counter. */
InstSeqNum globalSeqNum;
+ Checker<DynInstPtr> *checker;
+
#if FULL_SYSTEM
/** Pointer to the system. */
System *system;
PhysicalMemory *physmem;
#endif
- // List of all ExecContexts.
- std::vector<Thread *> thread;
-
/** Pointer to memory. */
FunctionalMemory *mem;
+ Sampler *sampler;
+
+ int switchCount;
+
+ // List of all ExecContexts.
+ std::vector<Thread *> thread;
+
#if 0
/** Page table pointer. */
PageTable *pTable;
void
DefaultDecode<Impl>::switchOut()
{
+ cpu->signalSwitched();
}
template <class Impl>
void switchOut();
+ void doSwitchOut();
+
void takeOverFrom();
bool isSwitchedOut() { return switchedOut; }
bool switchedOut;
+ public:
+ InstSeqNum &getYoungestSN() { return youngestSN; }
+ private:
+ InstSeqNum youngestSN;
+
#if !FULL_SYSTEM
/** Page table pointer. */
// PageTable *pTable;
DefaultFetch<Impl>::switchOut()
{
switchedOut = true;
+ cpu->signalSwitched();
+}
+
+template <class Impl>
+void
+DefaultFetch<Impl>::doSwitchOut()
+{
branchPred.switchOut();
}
unsigned flags = 0;
#endif // FULL_SYSTEM
- if (interruptPending && flags == 0) {
+ if (interruptPending && flags == 0 || switchedOut) {
// Hold off fetch from getting new instructions while an interrupt
// is pending.
return false;
// instruction.
if (fault == NoFault) {
#if FULL_SYSTEM
- if (cpu->system->memctrl->badaddr(memReq[tid]->paddr)) {
+ if (cpu->system->memctrl->badaddr(memReq[tid]->paddr) ||
+ memReq[tid]->flags & UNCACHEABLE) {
DPRINTF(Fetch, "Fetch: Bad address %#x (hopefully on a "
"misspeculating path!",
memReq[tid]->paddr);
template<class Impl>
void
DefaultFetch<Impl>::squashFromDecode(const Addr &new_PC,
- const InstSeqNum &seq_num,
- unsigned tid)
+ const InstSeqNum &seq_num,
+ unsigned tid)
{
DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n",tid);
// Tell the CPU to remove any instructions that are in flight between
// fetch and decode.
cpu->removeInstsUntil(seq_num, tid);
+ youngestSN = seq_num;
}
template<class Impl>
// In any case, squash.
squash(fromCommit->commitInfo[tid].nextPC,tid);
+ youngestSN = fromCommit->commitInfo[tid].doneSeqNum;
// Also check if there's a mispredict that happened.
if (fromCommit->commitInfo[tid].branchMispredict) {
// Get a sequence number.
inst_seq = cpu->getAndIncrementInstSeq();
+ youngestSN = inst_seq;
+
// Make sure this is a valid index.
assert(offset <= cacheBlkSize - instSize);
void switchOut();
+ void doSwitchOut();
+
void takeOverFrom();
bool isSwitchedOut() { return switchedOut; }
//iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
- if (inst->isSquashed() || iewStage->isSwitchedOut()) {
+ if (iewStage->isSwitchedOut()) {
+ inst = NULL;
+ return;
+ } else if (inst->isSquashed()) {
+ iewStage->wakeCPU();
inst = NULL;
return;
}
template <class Impl>
void
DefaultIEW<Impl>::switchOut()
+{
+ cpu->signalSwitched();
+}
+
+template <class Impl>
+void
+DefaultIEW<Impl>::doSwitchOut()
{
switchedOut = true;
+
instQueue.switchOut();
ldstQueue.switchOut();
fuPool->switchOut();
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include "cpu/checker/cpu.hh"
#include "cpu/o3/lsq_unit.hh"
#include "base/str.hh"
}
if (!(req->flags & LOCKED)) {
storeQueue[storeWBIdx].inst->setCompleted();
+ if (cpu->checker) {
+ cpu->checker->tick(storeQueue[storeWBIdx].inst);
+ }
}
if (dcacheInterface) {
stallingStoreIsn = 0;
iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
}
+
+ storeQueue[store_idx].inst->setCompleted();
+ if (cpu->checker) {
+ cpu->checker->tick(storeQueue[store_idx].inst);
+ }
}
template <class Impl>
unsigned thread_id)
{
return miscRegs[thread_id].readRegWithEffect(misc_reg, fault,
- cpu->xcProxies[thread_id]);
+ cpu->xcBase(thread_id));
}
Fault setMiscReg(int misc_reg, const MiscReg &val, unsigned thread_id)
unsigned thread_id)
{
return miscRegs[thread_id].setRegWithEffect(misc_reg, val,
- cpu->xcProxies[thread_id]);
+ cpu->xcBase(thread_id));
}
#if FULL_SYSTEM
void switchOut();
+ void doSwitchOut();
+
void takeOverFrom();
/** Squashes all instructions in a thread. */
template <class Impl>
void
DefaultRename<Impl>::switchOut()
+{
+ cpu->signalSwitched();
+}
+
+template <class Impl>
+void
+DefaultRename<Impl>::doSwitchOut()
{
for (int i = 0; i < numThreads; i++) {
typename list<RenameHistory>::iterator hb_it = historyBuffer[i].begin();