namespace AlphaISA {
-inline Tick
+inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt)
{
panic("No handleIprRead implementation in Alpha\n");
}
-inline Tick
+inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt)
{
panic("No handleIprWrite implementation in Alpha\n");
// Alpha IPR register accessors
inline bool PcPAL(Addr addr) { return addr & 0x3; }
-inline void startupCPU(ThreadContext *tc, int cpuId) { tc->activate(0); }
+inline void startupCPU(ThreadContext *tc, int cpuId)
+{ tc->activate(Cycles(0)); }
////////////////////////////////////////////////////////////////////////
//
namespace ArmISA
{
-inline Tick
+inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt)
{
panic("No implementation for handleIprRead in ARM\n");
}
-inline Tick
+inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt)
{
panic("No implementation for handleIprWrite in ARM\n");
if (currState->timing) {
port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t),
&doL1DescEvent, (uint8_t*)&currState->l1Desc.data,
- currState->tc->getCpuPtr()->ticks(1), flag);
- DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
+ currState->tc->getCpuPtr()->clockPeriod(), flag);
+ DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before "
+ "adding: %d\n",
stateQueueL1.size());
stateQueueL1.push_back(currState);
currState = NULL;
} else if (!currState->functional) {
port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t),
NULL, (uint8_t*)&currState->l1Desc.data,
- currState->tc->getCpuPtr()->ticks(1), flag);
+ currState->tc->getCpuPtr()->clockPeriod(), flag);
doL1Descriptor();
f = currState->fault;
} else {
if (currState->timing) {
currState->delayed = true;
port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t),
- &doL2DescEvent, (uint8_t*)&currState->l2Desc.data,
- currState->tc->getCpuPtr()->ticks(1));
+ &doL2DescEvent, (uint8_t*)&currState->l2Desc.data,
+ currState->tc->getCpuPtr()->clockPeriod());
} else if (!currState->functional) {
port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t),
- NULL, (uint8_t*)&currState->l2Desc.data,
- currState->tc->getCpuPtr()->ticks(1));
+ NULL, (uint8_t*)&currState->l2Desc.data,
+ currState->tc->getCpuPtr()->clockPeriod());
doL2Descriptor();
} else {
RequestPtr req = new Request(l2desc_addr, sizeof(uint32_t), 0,
TableWalker::nextWalk(ThreadContext *tc)
{
if (pendingQueue.size())
- schedule(doProcessEvent, tc->getCpuPtr()->clockEdge(1));
+ schedule(doProcessEvent, tc->getCpuPtr()->clockEdge(Cycles(1)));
}
inline void startupCPU(ThreadContext *tc, int cpuId)
{
- tc->activate(0);
+ tc->activate(Cycles(0));
}
void copyRegs(ThreadContext *src, ThreadContext *dest);
miscRegFile[misc_reg][reg_sel] = cp0_val;
- scheduleCP0Update(tc->getCpuPtr(), 1);
+ scheduleCP0Update(tc->getCpuPtr(), Cycles(1));
}
/**
}
void
-ISA::scheduleCP0Update(BaseCPU *cpu, int delay)
+ISA::scheduleCP0Update(BaseCPU *cpu, Cycles delay)
{
if (!cp0Updated) {
cp0Updated = true;
//schedule UPDATE
CP0Event *cp0_event = new CP0Event(this, cpu, UpdateCP0);
- cpu->schedule(cp0_event, curTick() + cpu->ticks(delay));
+ cpu->schedule(cp0_event, cpu->clockEdge(delay));
}
}
}
void
-ISA::CP0Event::scheduleEvent(int delay)
+ISA::CP0Event::scheduleEvent(Cycles delay)
{
- cpu->reschedule(this, curTick() + cpu->ticks(delay), true);
+ cpu->reschedule(this, cpu->clockEdge(delay), true);
}
void
const char *description() const;
/** Schedule This Event */
- void scheduleEvent(int delay);
+ void scheduleEvent(Cycles delay);
/** Unschedule This Event */
void unscheduleEvent();
};
// Schedule a CP0 Update Event
- void scheduleCP0Update(BaseCPU *cpu, int delay = 0);
+ void scheduleCP0Update(BaseCPU *cpu, Cycles delay = Cycles(0));
// If any changes have been made, then check the state for changes
// and if necessary alert the CPU
namespace MipsISA
{
-inline Tick
+inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt)
{
panic("No implementation for handleIprRead in MIPS\n");
}
-inline Tick
+inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt)
{
panic("No implementation for handleIprWrite in MIPS\n");
// TODO: SET PC WITH AN EVENT INSTEAD OF INSTANTANEOUSLY
tc->pcState(restartPC);
- tc->activate(0);
+ tc->activate(Cycles(0));
warn("%i: Restoring thread %i in %s @ PC %x",
curTick(), tc->threadId(), tc->getCpuPtr()->name(), restartPC);
void
startupCPU(ThreadContext *tc, int cpuId)
{
- tc->activate(0/*tc->threadId()*/);
+ tc->activate(Cycles(0));
}
void
namespace PowerISA
{
-inline Tick
+inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt)
{
panic("No implementation for handleIprRead in POWER\n");
}
-inline Tick
+inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt)
{
panic("No implementation for handleIprWrite in POWER\n");
inline void
startupCPU(ThreadContext *tc, int cpuId)
{
- tc->activate(0);
+ tc->activate(Cycles(0));
}
void
namespace SparcISA
{
-inline Tick
+inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt)
{
return xc->getDTBPtr()->doMmuRegRead(xc, pkt);
}
-inline Tick
+inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt)
{
return xc->getDTBPtr()->doMmuRegWrite(xc, pkt);
return NoFault;
}
-Tick
+Cycles
TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
{
Addr va = pkt->getAddr();
(uint32_t)asi, va);
}
pkt->makeAtomicResponse();
- return tc->getCpuPtr()->ticks(1);
+ return Cycles(1);
}
-Tick
+Cycles
TLB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
{
uint64_t data = pkt->get<uint64_t>();
(uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
}
pkt->makeAtomicResponse();
- return tc->getCpuPtr()->ticks(1);
+ return Cycles(1);
}
void
* does not support the Checker model at the moment
*/
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode);
- Tick doMmuRegRead(ThreadContext *tc, Packet *pkt);
- Tick doMmuRegWrite(ThreadContext *tc, Packet *pkt);
+ Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt);
+ Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt);
void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs);
// Checkpointing
if (!(tick_cmpr & ~mask(63)) && time > 0) {
if (tickCompare->scheduled())
cpu->deschedule(tickCompare);
- cpu->schedule(tickCompare, curTick() + time * cpu->ticks(1));
+ cpu->schedule(tickCompare, cpu->clockEdge(Cycles(time)));
}
panic("writing to TICK compare register %#X\n", val);
break;
if (!(stick_cmpr & ~mask(63)) && time > 0) {
if (sTickCompare->scheduled())
cpu->deschedule(sTickCompare);
- cpu->schedule(sTickCompare, curTick() + time * cpu->ticks(1));
+ cpu->schedule(sTickCompare, cpu->clockEdge(Cycles(time)));
}
DPRINTF(Timer, "writing to sTICK compare register value %#X\n", val);
break;
if (!(hstick_cmpr & ~mask(63)) && time > 0) {
if (hSTickCompare->scheduled())
cpu->deschedule(hSTickCompare);
- cpu->schedule(hSTickCompare, curTick() + time * cpu->ticks(1));
+ cpu->schedule(hSTickCompare, cpu->clockEdge(Cycles(time)));
}
DPRINTF(Timer, "writing to hsTICK compare register value %#X\n", val);
break;
// since our microcode instructions take two cycles we need to check if
// we're actually at the correct cycle or we need to wait a little while
// more
- int ticks;
- ticks = ((int64_t)(stick_cmpr & mask(63)) - (int64_t)stick) -
+ int delay;
+ delay = ((int64_t)(stick_cmpr & mask(63)) - (int64_t)stick) -
cpu->instCount();
- assert(ticks >= 0 && "stick compare missed interrupt cycle");
+ assert(delay >= 0 && "stick compare missed interrupt cycle");
- if (ticks == 0 || tc->status() == ThreadContext::Suspended) {
+ if (delay == 0 || tc->status() == ThreadContext::Suspended) {
DPRINTF(Timer, "STick compare cycle reached at %#x\n",
(stick_cmpr & mask(63)));
if (!(tc->readMiscRegNoEffect(MISCREG_STICK_CMPR) & (ULL(1) << 63))) {
setMiscReg(MISCREG_SOFTINT, softint | (ULL(1) << 16), tc);
}
} else {
- cpu->schedule(sTickCompare, curTick() + ticks * cpu->ticks(1));
+ cpu->schedule(sTickCompare, cpu->clockEdge(Cycles(delay)));
}
}
// since our microcode instructions take two cycles we need to check if
// we're actually at the correct cycle or we need to wait a little while
// more
- int ticks;
+ int delay;
if ( tc->status() == ThreadContext::Halted)
return;
- ticks = ((int64_t)(hstick_cmpr & mask(63)) - (int64_t)stick) -
+ delay = ((int64_t)(hstick_cmpr & mask(63)) - (int64_t)stick) -
cpu->instCount();
- assert(ticks >= 0 && "hstick compare missed interrupt cycle");
+ assert(delay >= 0 && "hstick compare missed interrupt cycle");
- if (ticks == 0 || tc->status() == ThreadContext::Suspended) {
+ if (delay == 0 || tc->status() == ThreadContext::Suspended) {
DPRINTF(Timer, "HSTick compare cycle reached at %#x\n",
(stick_cmpr & mask(63)));
if (!(tc->readMiscRegNoEffect(MISCREG_HSTICK_CMPR) & (ULL(1) << 63))) {
}
// Need to do something to cause interrupt to happen here !!! @todo
} else {
- cpu->schedule(hSTickCompare, curTick() + ticks * cpu->ticks(1));
+ cpu->schedule(hSTickCompare, cpu->clockEdge(Cycles(delay)));
}
}
{
// Other CPUs will get activated by IPIs
if (cpuId == 0 || !FullSystem)
- tc->activate(0);
+ tc->activate(Cycles(0));
}
void copyRegs(ThreadContext *src, ThreadContext *dest);
namespace X86ISA
{
- inline Tick
+ inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt)
{
Addr offset = pkt->getAddr() & mask(3);
// Make sure we don't trot off the end of data.
assert(offset + pkt->getSize() <= sizeof(MiscReg));
pkt->setData(((uint8_t *)&data) + offset);
- return 1;
+ return Cycles(1);
}
- inline Tick
+ inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt)
{
Addr offset = pkt->getAddr() & mask(3);
assert(offset + pkt->getSize() <= sizeof(MiscReg));
pkt->writeData(((uint8_t *)&data) + offset);
xc->setMiscReg(index, gtoh(data));
- return 1;
+ return Cycles(1);
}
}
// @todo: Control the relative frequency, in this case 16:1, of
// the clocks in the Python code
- interrupts->setClock(tc->getCpuPtr()->ticks(16));
+ interrupts->setClock(tc->getCpuPtr()->clockPeriod() * 16);
// TODO Set the SMRAM base address (SMBASE) to 0x00030000
void startupCPU(ThreadContext *tc, int cpuId)
{
if (cpuId == 0 || !FullSystem) {
- tc->activate(0);
+ tc->activate(Cycles(0));
} else {
// This is an application processor (AP). It should be initialized to
// look like only the BIOS POST has run on it and put then put it into
// a halted state.
- tc->suspend(0);
+ tc->suspend(Cycles(0));
}
}
#include <inttypes.h>
+#include <cassert>
+
/** uint64_t constant */
#define ULL(N) ((uint64_t)N##ULL)
/** int64_t constant */
const Tick MaxTick = ULL(0xffffffffffffffff);
+/**
+ * Cycles is a wrapper class for representing cycle counts, i.e. a
+ * relative difference between two points in time, expressed in a
+ * number of clock cycles.
+ *
+ * The Cycles wrapper class is a type-safe alternative to a
+ * typedef, aiming to avoid unintentional mixing of cycles and ticks
+ * in the code base.
+ *
+ * Operators are defined inside an ifndef block to avoid swig touching
+ * them. Note that there is no overloading of the bool operator as the
+ * compiler is allowed to turn booleans into integers and this causes
+ * a whole range of issues in a handful locations. The solution to
+ * this problem would be to use the safe bool idiom, but for now we
+ * make do without the test and use the more elaborate comparison >
+ * Cycles(0).
+ */
+class Cycles
+{
+
+ private:
+
+ /** Member holding the actual value. */
+ uint64_t c;
+
+ public:
+
+ /** Explicit constructor assigning a value. */
+ explicit Cycles(uint64_t _c) : c(_c) { }
+
+#ifndef SWIG // keep the operators away from SWIG
+
+ /** Converting back to the value type. */
+ operator uint64_t() const { return c; }
+
+ /** Prefix increment operator. */
+ Cycles& operator++()
+ { ++c; return *this; }
+
+ /** Prefix decrement operator. Is only temporarily used in the O3 CPU. */
+ Cycles& operator--()
+ { assert(c != 0); --c; return *this; }
+
+ /** In-place addition of cycles. */
+ const Cycles& operator+=(const Cycles& cc)
+ { c += cc.c; return *this; }
+
+ /** Greater than comparison used for > Cycles(0). */
+ bool operator>(const Cycles& cc) const
+ { return c > cc.c; }
+
+#endif // SWIG not touching operators
+
+};
+
/**
* Address type
* This will probably be moved somewhere else in the near future.
"terminate when all threads have reached this load count")
max_loads_any_thread = Param.Counter(0,
"terminate when any thread reaches this load count")
- progress_interval = Param.Tick(0,
- "interval to print out the progress message")
+ progress_interval = Param.Frequency('0Hz',
+ "frequency to print out the progress message")
defer_registration = Param.Bool(False,
"defer registration with system (for sampling)")
/// Notify the CPU that the indicated context is now active. The
/// delay parameter indicates the number of ticks to wait before
/// executing (typically 0 or 1).
- virtual void activateContext(ThreadID thread_num, int delay) {}
+ virtual void activateContext(ThreadID thread_num, Cycles delay) {}
/// Notify the CPU that the indicated context is now suspended.
virtual void suspendContext(ThreadID thread_num) {}
/// Set the status to Active. Optional delay indicates number of
/// cycles to wait before beginning execution.
- void activate(int delay = 1) { actualTC->activate(delay); }
+ void activate(Cycles delay = Cycles(1))
+ { actualTC->activate(delay); }
/// Set the status to Suspended.
- void suspend(int delay) { actualTC->suspend(delay); }
+ void suspend(Cycles delay) { actualTC->suspend(delay); }
/// Set the status to Halted.
- void halt(int delay) { actualTC->halt(delay); }
+ void halt(Cycles delay) { actualTC->halt(delay); }
void dumpFuncProfile() { actualTC->dumpFuncProfile(); }
}
void
-InOrderCPU::CPUEvent::scheduleEvent(int delay)
+InOrderCPU::CPUEvent::scheduleEvent(Cycles delay)
{
assert(!scheduled() || squashed());
cpu->reschedule(this, cpu->clockEdge(delay), true);
lockFlag = false;
// Schedule First Tick Event, CPU will reschedule itself from here on out.
- scheduleTickEvent(0);
+ scheduleTickEvent(Cycles(0));
}
InOrderCPU::~InOrderCPU()
} else {
//Tick next_tick = curTick() + cycles(1);
//tickEvent.schedule(next_tick);
- schedule(&tickEvent, clockEdge(1));
+ schedule(&tickEvent, clockEdge(Cycles(1)));
DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
- clockEdge(1));
+ clockEdge(Cycles(1)));
}
}
// Schedule Squash Through-out Resource Pool
resPool->scheduleEvent(
(InOrderCPU::CPUEventType)ResourcePool::SquashAll,
- dummyTrapInst[tid], 0);
+ dummyTrapInst[tid], Cycles(0));
// Finally, Setup Trap to happen at end of cycle
trapContext(interrupt, tid, dummyTrapInst[tid]);
}
void
-InOrderCPU::trapContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay)
+InOrderCPU::trapContext(Fault fault, ThreadID tid, DynInstPtr inst,
+ Cycles delay)
{
scheduleCpuEvent(Trap, fault, tid, inst, delay);
trapPending[tid] = true;
}
void
-InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay)
+InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid,
+ Cycles delay)
{
scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay);
}
void
InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
ThreadID tid, DynInstPtr inst,
- unsigned delay, CPUEventPri event_pri)
+ Cycles delay, CPUEventPri event_pri)
{
CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
event_pri);
// Broadcast event to the Resource Pool
// Need to reset tid just in case this is a dummy instruction
inst->setTid(tid);
- resPool->scheduleEvent(c_event, inst, 0, 0, tid);
+ // @todo: Is this really right? Should the delay not be passed on?
+ resPool->scheduleEvent(c_event, inst, Cycles(0), 0, tid);
}
bool
}
void
-InOrderCPU::deactivateContext(ThreadID tid, int delay)
+InOrderCPU::deactivateContext(ThreadID tid, Cycles delay)
{
DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid);
}
void
-InOrderCPU::activateContext(ThreadID tid, int delay)
+InOrderCPU::activateContext(ThreadID tid, Cycles delay)
{
DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
}
void
-InOrderCPU::activateNextReadyContext(int delay)
+InOrderCPU::activateNextReadyContext(Cycles delay)
{
DPRINTF(InOrderCPU,"Activating next ready thread\n");
}
void
-InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay)
+InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst,
+ Cycles delay)
{
// Syscall must be non-speculative, so squash from last stage
unsigned squash_stage = NumStages - 1;
// Schedule Squash Through-out Resource Pool
resPool->scheduleEvent(
- (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, 0);
+ (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst,
+ Cycles(0));
scheduleCpuEvent(Syscall, fault, tid, inst, delay, Syscall_Pri);
}
TickEvent tickEvent;
/** Schedule tick event, regardless of its current state. */
- void scheduleTickEvent(int delay)
+ void scheduleTickEvent(Cycles delay)
{
assert(!tickEvent.scheduled() || tickEvent.squashed());
reschedule(&tickEvent, clockEdge(delay), true);
const char *description() const;
/** Schedule Event */
- void scheduleEvent(int delay);
+ void scheduleEvent(Cycles delay);
/** Unschedule This Event */
void unscheduleEvent();
/** Schedule a CPU Event */
void scheduleCpuEvent(CPUEventType cpu_event, Fault fault, ThreadID tid,
- DynInstPtr inst, unsigned delay = 0,
+ DynInstPtr inst, Cycles delay = Cycles(0),
CPUEventPri event_pri = InOrderCPU_Pri);
public:
/** Schedule a syscall on the CPU */
void syscallContext(Fault fault, ThreadID tid, DynInstPtr inst,
- int delay = 0);
+ Cycles delay = Cycles(0));
/** Executes a syscall.*/
void syscall(int64_t callnum, ThreadID tid);
/** Schedule a trap on the CPU */
- void trapContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay = 0);
+ void trapContext(Fault fault, ThreadID tid, DynInstPtr inst,
+ Cycles delay = Cycles(0));
/** Perform trap to Handle Given Fault */
void trap(Fault fault, ThreadID tid, DynInstPtr inst);
/** Schedule thread activation on the CPU */
- void activateContext(ThreadID tid, int delay = 0);
+ void activateContext(ThreadID tid, Cycles delay = Cycles(0));
/** Add Thread to Active Threads List. */
void activateThread(ThreadID tid);
void activateThreadInPipeline(ThreadID tid);
/** Schedule Thread Activation from Ready List */
- void activateNextReadyContext(int delay = 0);
+ void activateNextReadyContext(Cycles delay = Cycles(0));
/** Add Thread From Ready List to Active Threads List. */
void activateNextReadyThread();
/** Schedule a thread deactivation on the CPU */
- void deactivateContext(ThreadID tid, int delay = 0);
+ void deactivateContext(ThreadID tid, Cycles delay = Cycles(0));
/** Remove from Active Thread List */
void deactivateThread(ThreadID tid);
* squashDueToMemStall() - squashes pipeline
* @note: maybe squashContext/squashThread would be better?
*/
- void squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay = 0);
+ void squashFromMemStall(DynInstPtr inst, ThreadID tid,
+ Cycles delay = Cycles(0));
void squashDueToMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid);
void removePipelineStalls(ThreadID tid);
// prevent "double"-execution of instructions
cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType)
ResourcePool::UpdateAfterContextSwitch,
- inst, 0, 0, tid);
+ inst, Cycles(0), 0, tid);
// Clear switchout buffer
switchedOutBuffer[tid] = NULL;
using namespace std;
Resource::Resource(string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu)
+ Cycles res_latency, InOrderCPU *_cpu)
: resName(res_name), id(res_id),
width(res_width), latency(res_latency), cpu(_cpu),
resourceEvent(NULL)
// If the resource has a zero-cycle (no latency)
// function, then no reason to have events
// that will process them for the right tick
- if (latency > 0)
+ if (latency > Cycles(0))
resourceEvent = new ResourceEvent[width];
// Schedule Squash Through-out Resource Pool
cpu->resPool->scheduleEvent(
- (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, 0);
+ (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst,
+ Cycles(0));
}
void
int req_slot_num = req_ptr->getSlot();
- if (latency > 0) {
+ if (latency > Cycles(0)) {
if (resourceEvent[req_slot_num].scheduled())
unscheduleEvent(req_slot_num);
}
cpu->trapContext(inst->fault, tid, inst);
}
-Tick
-Resource::ticks(int num_cycles)
-{
- return cpu->ticks(num_cycles);
-}
-
-
void
Resource::scheduleExecution(int slot_num)
{
- if (latency > 0) {
+ if (latency > Cycles(0)) {
scheduleEvent(slot_num, latency);
} else {
execute(slot_num);
}
void
-Resource::scheduleEvent(int slot_idx, int delay)
+Resource::scheduleEvent(int slot_idx, Cycles delay)
{
DPRINTF(Resource, "[tid:%i]: Scheduling event for [sn:%i] on tick %i.\n",
reqs[slot_idx]->inst->readTid(),
reqs[slot_idx]->inst->seqNum,
- cpu->ticks(delay) + curTick());
+ cpu->clockEdge(delay));
resourceEvent[slot_idx].scheduleEvent(delay);
}
bool
-Resource::scheduleEvent(DynInstPtr inst, int delay)
+Resource::scheduleEvent(DynInstPtr inst, Cycles delay)
{
int slot_idx = findSlot(inst);
}
void
-ResourceEvent::scheduleEvent(int delay)
+ResourceEvent::scheduleEvent(Cycles delay)
{
assert(!scheduled() || squashed());
resource->cpu->reschedule(this,
- curTick() + resource->ticks(delay), true);
+ resource->cpu->clockEdge(delay), true);
}
public:
Resource(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu);
+ Cycles res_latency, InOrderCPU *_cpu);
virtual ~Resource();
int slotsInUse();
/** Schedule resource event, regardless of its current state. */
- void scheduleEvent(int slot_idx, int delay);
+ void scheduleEvent(int slot_idx, Cycles delay);
/** Find instruction in list, Schedule resource event, regardless of its
* current state. */
- bool scheduleEvent(DynInstPtr inst, int delay);
+ bool scheduleEvent(DynInstPtr inst, Cycles delay);
/** Unschedule resource event, regardless of its current state. */
void unscheduleEvent(int slot_idx);
/** Unschedule resource event, regardless of its current state. */
bool unscheduleEvent(DynInstPtr inst);
- /** Return the number of cycles in 'Tick' format */
- Tick ticks(int numCycles);
-
/** Find the request that corresponds to this instruction */
virtual ResReqPtr findRequest(DynInstPtr inst);
/** Return Latency of Resource */
/* Can be overridden for complex cases */
- virtual int getLatency(int slot_num) { return latency; }
+ virtual Cycles getLatency(int slot_num) { return latency; }
protected:
/** The name of this resource */
* Note: Dynamic latency resources set this to 0 and
* manage the latency themselves
*/
- const int latency;
+ const Cycles latency;
public:
/** List of all Requests the Resource is Servicing. Each request
void setSlot(int slot) { slotIdx = slot; }
/** Schedule resource event, regardless of its current state. */
- void scheduleEvent(int delay);
+ void scheduleEvent(Cycles delay);
/** Unschedule resource event, regardless of its current state. */
void unscheduleEvent()
// name - id - bandwidth - latency - CPU - Parameters
// --------------------------------------------------
resources.push_back(new FetchSeqUnit("fetch_seq_unit", FetchSeq,
- stage_width * 2, 0, _cpu, params));
+ stage_width * 2, Cycles(0),
+ _cpu, params));
// Keep track of the instruction fetch unit so we can easily
// provide a pointer to it in the CPU.
instUnit = new FetchUnit("icache_port", ICache,
- stage_width * 2 + MaxThreads, 0, _cpu,
+ stage_width * 2 + MaxThreads, Cycles(0), _cpu,
params);
resources.push_back(instUnit);
resources.push_back(new DecodeUnit("decode_unit", Decode,
- stage_width, 0, _cpu, params));
+ stage_width, Cycles(0), _cpu,
+ params));
resources.push_back(new BranchPredictor("branch_predictor", BPred,
- stage_width, 0, _cpu, params));
+ stage_width, Cycles(0),
+ _cpu, params));
resources.push_back(new InstBuffer("fetch_buffer_t0", FetchBuff, 4,
- 0, _cpu, params));
+ Cycles(0), _cpu, params));
resources.push_back(new UseDefUnit("regfile_manager", RegManager,
- stage_width * 3, 0, _cpu,
+ stage_width * 3, Cycles(0), _cpu,
params));
resources.push_back(new AGENUnit("agen_unit", AGEN,
- stage_width, 0, _cpu, params));
+ stage_width, Cycles(0), _cpu,
+ params));
resources.push_back(new ExecutionUnit("execution_unit", ExecUnit,
- stage_width, 0, _cpu, params));
+ stage_width, Cycles(0), _cpu,
+ params));
resources.push_back(new MultDivUnit("mult_div_unit", MDU,
- stage_width * 2,
- 0,
- _cpu,
- params));
+ stage_width * 2, Cycles(0),
+ _cpu, params));
// Keep track of the data load/store unit so we can easily provide
// a pointer to it in the CPU.
dataUnit = new CacheUnit("dcache_port", DCache,
- stage_width * 2 + MaxThreads, 0, _cpu,
+ stage_width * 2 + MaxThreads, Cycles(0), _cpu,
params);
resources.push_back(dataUnit);
gradObjects.push_back(BPred);
resources.push_back(new GraduationUnit("graduation_unit", Grad,
- stage_width, 0, _cpu,
+ stage_width, Cycles(0), _cpu,
params));
resources.push_back(new InstBuffer("fetch_buffer_t1", FetchBuff2, 4,
- 0, _cpu, params));
+ Cycles(0), _cpu, params));
}
// to the event construction
void
ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
- int delay, int res_idx, ThreadID tid)
+ Cycles delay, int res_idx, ThreadID tid)
{
assert(delay >= 0);
/** Schedule resource event, regardless of its current state. */
void
-ResourcePool::ResPoolEvent::scheduleEvent(int delay)
+ResourcePool::ResPoolEvent::scheduleEvent(Cycles delay)
{
InOrderCPU *cpu = resPool->cpu;
assert(!scheduled() || squashed());
const char *description() const;
/** Schedule Event */
- void scheduleEvent(int delay);
+ void scheduleEvent(Cycles delay);
/** Unschedule This Event */
void unscheduleEvent();
/** Schedule resource event, regardless of its current state. */
void scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst = NULL,
- int delay = 0, int res_idx = 0, ThreadID tid = 0);
+ Cycles delay = Cycles(0), int res_idx = 0,
+ ThreadID tid = 0);
/** UnSchedule resource event, regardless of its current state. */
void unscheduleEvent(int res_idx, DynInstPtr inst);
#include "debug/InOrderAGEN.hh"
AGENUnit::AGENUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{ }
public:
AGENUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
enum Command {
GenerateAddr
using namespace TheISA;
using namespace ThePipeline;
-BranchPredictor::BranchPredictor(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+BranchPredictor::BranchPredictor(std::string res_name, int res_id,
+ int res_width, Cycles res_latency,
+ InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
branchPred(this, params)
public:
BranchPredictor(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
void regStats();
#endif
CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
cachePort(NULL), cachePortBlocked(false)
{
public:
CacheUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
enum Command {
InitiateReadData,
using namespace std;
DecodeUnit::DecodeUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{
public:
DecodeUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
enum Command {
DecodeInst
using namespace ThePipeline;
ExecutionUnit::ExecutionUnit(string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
lastExecuteTick(0), lastControlTick(0)
public:
ExecutionUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
public:
void regStats();
using namespace ThePipeline;
FetchSeqUnit::FetchSeqUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
instSize(sizeof(MachInst))
public:
FetchSeqUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
~FetchSeqUnit();
void init();
using namespace ThePipeline;
FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize)
{
public:
FetchUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
virtual ~FetchUnit();
using namespace ThePipeline;
GraduationUnit::GraduationUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{
public:
GraduationUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
void execute(int slot_num);
using namespace ThePipeline;
InstBuffer::InstBuffer(string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{ }
public:
InstBuffer(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
void regStats();
public:
MemDepUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu);
+ Cycles res_latency, InOrderCPU *_cpu);
virtual ~MemDepUnit() {}
virtual void execute(int slot_num);
using namespace ThePipeline;
MultDivUnit::MultDivUnit(string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
multRepeatRate(params->multRepeatRate),
public:
MultDivUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
public:
protected:
/** Latency & Repeat Rate for Multiply Insts */
unsigned multRepeatRate;
- unsigned multLatency;
+ Cycles multLatency;
/** Latency & Repeat Rate for 8-bit Divide Insts */
unsigned div8RepeatRate;
- unsigned div8Latency;
+ Cycles div8Latency;
/** Latency & Repeat Rate for 16-bit Divide Insts */
unsigned div16RepeatRate;
- unsigned div16Latency;
+ Cycles div16Latency;
/** Latency & Repeat Rate for 24-bit Divide Insts */
unsigned div24RepeatRate;
- unsigned div24Latency;
+ Cycles div24Latency;
/** Latency & Repeat Rate for 32-bit Divide Insts */
unsigned div32RepeatRate;
- unsigned div32Latency;
+ Cycles div32Latency;
/** Last cycle that MDU was used */
Tick lastMDUCycle;
using namespace ThePipeline;
TLBUnit::TLBUnit(string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{
// Hard-Code Selection For Now
public:
TLBUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
virtual ~TLBUnit() {}
void init();
using namespace ThePipeline;
UseDefUnit::UseDefUnit(string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu,
+ Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{
UseDefUnit::init()
{
// Set Up Resource Events to Appropriate Resource BandWidth
- if (latency > 0) {
+ if (latency > Cycles(0)) {
resourceEvent = new ResourceEvent[width];
} else {
resourceEvent = NULL;
public:
UseDefUnit(std::string res_name, int res_id, int res_width,
- int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ Cycles res_latency, InOrderCPU *_cpu,
+ ThePipeline::Params *params);
void init();
}
void
-InOrderThreadContext::activate(int delay)
+InOrderThreadContext::activate(Cycles delay)
{
DPRINTF(InOrderCPU, "Calling activate on Thread Context %d\n",
getThreadNum());
void
-InOrderThreadContext::suspend(int delay)
+InOrderThreadContext::suspend(Cycles delay)
{
DPRINTF(InOrderCPU, "Calling suspend on Thread Context %d\n",
getThreadNum());
}
void
-InOrderThreadContext::halt(int delay)
+InOrderThreadContext::halt(Cycles delay)
{
DPRINTF(InOrderCPU, "Calling halt on Thread Context %d\n",
getThreadNum());
/** Set the status to Active. Optional delay indicates number of
* cycles to wait before beginning execution. */
- void activate(int delay = 1);
+ void activate(Cycles delay = Cycles(1));
/** Set the status to Suspended. */
- void suspend(int delay = 0);
+ void suspend(Cycles delay = Cycles(0));
/** Set the status to Halted. */
- void halt(int delay = 0);
+ void halt(Cycles delay = Cycles(0));
/** Takes over execution of a thread from another CPU. */
void takeOverFrom(ThreadContext *old_context);
int flattenFloatIndex(int reg)
{ return cpu->isa[thread->threadId()].flattenFloatIndex(reg); }
- void activateContext(int delay)
+ void activateContext(Cycles delay)
{ cpu->activateContext(thread->threadId(), delay); }
void deallocateContext()
/** The latency to handle a trap. Used when scheduling trap
* squash event.
*/
- uint trapLatency;
+ Cycles trapLatency;
/** The interrupt fault. */
Fault interrupt;
globalSeqNum(1),
system(params->system),
drainCount(0),
- deferRegistration(params->defer_registration)
+ deferRegistration(params->defer_registration),
+ lastRunningCycle(curCycle())
{
if (!deferRegistration) {
_status = Running;
// Setup the ROB for whichever stages need it.
commit.setROB(&rob);
- lastRunningCycle = curCycle();
-
lastActivatedCycle = 0;
#if 0
// Give renameMap & rename stage access to the freeList;
lastRunningCycle = curCycle();
timesIdled++;
} else {
- schedule(tickEvent, clockEdge(1));
+ schedule(tickEvent, clockEdge(Cycles(1)));
DPRINTF(O3CPU, "Scheduling next tick!\n");
}
}
template <class Impl>
void
-FullO3CPU<Impl>::activateContext(ThreadID tid, int delay)
+FullO3CPU<Impl>::activateContext(ThreadID tid, Cycles delay)
{
// Needs to set each stage to running as well.
if (delay){
DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to activate "
- "on cycle %d\n", tid, curTick() + ticks(delay));
+ "on cycle %d\n", tid, clockEdge(delay));
scheduleActivateThreadEvent(tid, delay);
} else {
activateThread(tid);
activityRec.activity();
fetch.wakeFromQuiesce();
- Tick cycles = curCycle() - lastRunningCycle;
+ Cycles cycles(curCycle() - lastRunningCycle);
+ // @todo: This is an oddity that is only here to match the stats
if (cycles != 0)
--cycles;
quiesceCycles += cycles;
template <class Impl>
bool
FullO3CPU<Impl>::scheduleDeallocateContext(ThreadID tid, bool remove,
- int delay)
+ Cycles delay)
{
// Schedule removal of thread data from CPU
if (delay){
DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to deallocate "
- "on cycle %d\n", tid, curTick() + ticks(delay));
+ "on tick %d\n", tid, clockEdge(delay));
scheduleDeallocateContextEvent(tid, remove, delay);
return false;
} else {
FullO3CPU<Impl>::suspendContext(ThreadID tid)
{
DPRINTF(O3CPU,"[tid: %i]: Suspending Thread Context.\n", tid);
- bool deallocated = scheduleDeallocateContext(tid, false, 1);
+ bool deallocated = scheduleDeallocateContext(tid, false, Cycles(1));
// If this was the last thread then unschedule the tick event.
if ((activeThreads.size() == 1 && !deallocated) ||
activeThreads.size() == 0)
{
//For now, this is the same as deallocate
DPRINTF(O3CPU,"[tid:%i]: Halt Context called. Deallocating", tid);
- scheduleDeallocateContext(tid, true, 1);
+ scheduleDeallocateContext(tid, true, Cycles(1));
}
template <class Impl>
src_tc->setStatus(ThreadContext::Active);
- activateContext(tid,1);
+ activateContext(tid, Cycles(1));
//Reset ROB/IQ/LSQ Entries
commit.rob->resetEntries();
DPRINTF(Activity, "Waking up CPU\n");
- Tick cycles = curCycle() - lastRunningCycle;
+ Cycles cycles(curCycle() - lastRunningCycle);
+ // @todo: This is an oddity that is only here to match the stats
if (cycles != 0)
--cycles;
idleCycles += cycles;
TickEvent tickEvent;
/** Schedule tick event, regardless of its current state. */
- void scheduleTickEvent(int delay)
+ void scheduleTickEvent(Cycles delay)
{
if (tickEvent.squashed())
reschedule(tickEvent, clockEdge(delay));
/** Schedule thread to activate , regardless of its current state. */
void
- scheduleActivateThreadEvent(ThreadID tid, int delay)
+ scheduleActivateThreadEvent(ThreadID tid, Cycles delay)
{
// Schedule thread to activate, regardless of its current state.
if (activateThreadEvent[tid].squashed())
/** Schedule cpu to deallocate thread context.*/
void
- scheduleDeallocateContextEvent(ThreadID tid, bool remove, int delay)
+ scheduleDeallocateContextEvent(ThreadID tid, bool remove, Cycles delay)
{
// Schedule thread to activate, regardless of its current state.
if (deallocateContextEvent[tid].squashed())
virtual Counter totalOps() const;
/** Add Thread to Active Threads List. */
- void activateContext(ThreadID tid, int delay);
+ void activateContext(ThreadID tid, Cycles delay);
/** Remove Thread from Active Threads List */
void suspendContext(ThreadID tid);
/** Remove Thread from Active Threads List &&
* Possibly Remove Thread Context from CPU.
*/
- bool scheduleDeallocateContext(ThreadID tid, bool remove, int delay = 1);
+ bool scheduleDeallocateContext(ThreadID tid, bool remove,
+ Cycles delay = Cycles(1));
/** Remove Thread from Active Threads List &&
* Remove Thread Context from CPU.
std::list<int> cpuWaitList;
/** The cycle that the CPU was last running, used for statistics. */
- Tick lastRunningCycle;
+ Cycles lastRunningCycle;
/** The cycle that the CPU was last activated by a new thread*/
Tick lastActivatedCycle;
assert(!finishTranslationEvent.scheduled());
finishTranslationEvent.setFault(fault);
finishTranslationEvent.setReq(mem_req);
- cpu->schedule(finishTranslationEvent, cpu->clockEdge(1));
+ cpu->schedule(finishTranslationEvent,
+ cpu->clockEdge(Cycles(1)));
return;
}
DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
FUCompletion *execution = new FUCompletion(issuing_inst,
idx, this);
- cpu->schedule(execution, cpu->clockEdge(op_latency - 1));
+ cpu->schedule(execution,
+ cpu->clockEdge(Cycles(op_latency - 1)));
// @todo: Enforce that issue_latency == 1 or op_latency
if (issue_latency > 1) {
load_inst->memData = new uint8_t[64];
ThreadContext *thread = cpu->tcBase(lsqID);
- Tick delay;
+ Cycles delay(0);
PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq);
if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
delay = TheISA::handleIprRead(thread, fst_data_pkt);
- unsigned delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
+ Cycles delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
if (delay2 > delay)
delay = delay2;
/** Set the status to Active. Optional delay indicates number of
* cycles to wait before beginning execution. */
- virtual void activate(int delay = 1);
+ virtual void activate(Cycles delay = Cycles(1));
/** Set the status to Suspended. */
- virtual void suspend(int delay = 0);
+ virtual void suspend(Cycles delay = Cycles(0));
/** Set the status to Halted. */
- virtual void halt(int delay = 0);
+ virtual void halt(Cycles delay = Cycles(0));
/** Dumps the function profiling information.
* @todo: Implement.
template <class Impl>
void
-O3ThreadContext<Impl>::activate(int delay)
+O3ThreadContext<Impl>::activate(Cycles delay)
{
DPRINTF(O3CPU, "Calling activate on Thread Context %d\n",
threadId());
template <class Impl>
void
-O3ThreadContext<Impl>::suspend(int delay)
+O3ThreadContext<Impl>::suspend(Cycles delay)
{
DPRINTF(O3CPU, "Calling suspend on Thread Context %d\n",
threadId());
template <class Impl>
void
-O3ThreadContext<Impl>::halt(int delay)
+O3ThreadContext<Impl>::halt(Cycles delay)
{
DPRINTF(O3CPU, "Calling halt on Thread Context %d\n",
threadId());
void
-AtomicSimpleCPU::activateContext(ThreadID thread_num, int delay)
+AtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
{
DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
assert(!tickEvent.scheduled());
notIdleFraction++;
- numCycles += tickToCycle(thread->lastActivate - thread->lastSuspend);
+ numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
//Make sure ticks are still on multiples of cycles
schedule(tickEvent, clockEdge(delay));
stall_ticks += dcache_latency;
if (stall_ticks) {
- Tick stall_cycles = stall_ticks / clockPeriod();
- Tick aligned_stall_ticks = ticks(stall_cycles);
-
- if (aligned_stall_ticks < stall_ticks)
- aligned_stall_ticks += 1;
-
- latency += aligned_stall_ticks;
+ // the atomic cpu does its accounting in ticks, so
+ // keep counting in ticks but round to the clock
+ // period
+ latency += divCeil(stall_ticks, clockPeriod()) *
+ clockPeriod();
}
}
void switchOut();
void takeOverFrom(BaseCPU *oldCPU);
- virtual void activateContext(ThreadID thread_num, int delay);
+ virtual void activateContext(ThreadID thread_num, Cycles delay);
virtual void suspendContext(ThreadID thread_num);
Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags);
void
-TimingSimpleCPU::activateContext(ThreadID thread_num, int delay)
+TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
{
DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
{
RequestPtr req = pkt->req;
if (req->isMmappedIpr()) {
- Tick delay = TheISA::handleIprRead(thread->getTC(), pkt);
+ Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
new IprEvent(pkt, this, clockEdge(delay));
_status = DcacheWaitResponse;
dcache_pkt = NULL;
{
RequestPtr req = dcache_pkt->req;
if (req->isMmappedIpr()) {
- Tick delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
+ Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
new IprEvent(dcache_pkt, this, clockEdge(delay));
_status = DcacheWaitResponse;
dcache_pkt = NULL;
void switchOut();
void takeOverFrom(BaseCPU *oldCPU);
- virtual void activateContext(ThreadID thread_num, int delay);
+ virtual void activateContext(ThreadID thread_num, Cycles delay);
virtual void suspendContext(ThreadID thread_num);
Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags);
}
void
-SimpleThread::activate(int delay)
+SimpleThread::activate(Cycles delay)
{
if (status() == ThreadContext::Active)
return;
/// Set the status to Active. Optional delay indicates number of
/// cycles to wait before beginning execution.
- void activate(int delay = 1);
+ void activate(Cycles delay = Cycles(1));
/// Set the status to Suspended.
void suspend();
MemTest::tick()
{
if (!tickEvent.scheduled())
- schedule(tickEvent, clockEdge(1));
+ schedule(tickEvent, clockEdge(Cycles(1)));
if (++noResponseCycles >= 500000) {
if (issueDmas) {
exitSimLoop("Network Tester completed simCycles");
else {
if (!tickEvent.scheduled())
- schedule(tickEvent, clockEdge(1));
+ schedule(tickEvent, clockEdge(Cycles(1)));
}
}
/// Set the status to Active. Optional delay indicates number of
/// cycles to wait before beginning execution.
- virtual void activate(int delay = 1) = 0;
+ virtual void activate(Cycles delay = Cycles(1)) = 0;
/// Set the status to Suspended.
- virtual void suspend(int delay = 0) = 0;
+ virtual void suspend(Cycles delay = Cycles(0)) = 0;
/// Set the status to Halted.
- virtual void halt(int delay = 0) = 0;
+ virtual void halt(Cycles delay = Cycles(0)) = 0;
virtual void dumpFuncProfile() = 0;
/// Set the status to Active. Optional delay indicates number of
/// cycles to wait before beginning execution.
- void activate(int delay = 1) { actualTC->activate(delay); }
+ void activate(Cycles delay = Cycles(1))
+ { actualTC->activate(delay); }
/// Set the status to Suspended.
- void suspend(int delay = 0) { actualTC->suspend(); }
+ void suspend(Cycles delay = Cycles(0)) { actualTC->suspend(); }
/// Set the status to Halted.
- void halt(int delay = 0) { actualTC->halt(); }
+ void halt(Cycles delay = Cycles(0)) { actualTC->halt(); }
void dumpFuncProfile() { actualTC->dumpFuncProfile(); }
void
Pl111::dmaDone()
{
- Tick maxFrameTime = lcdTiming2.cpl * height;
+ Cycles maxFrameTime(lcdTiming2.cpl * height);
--dmaPendingNum;
// argument into a relative number of cycles in the future by
// subtracting curCycle()
if (lcdControl.lcden)
- schedule(readEvent, clockEdge(startTime + maxFrameTime -
- curCycle()));
+ // @todo: This is a terrible way of doing the time
+ // keeping, make it all relative
+ schedule(readEvent,
+ clockEdge(Cycles(startTime - curCycle() +
+ maxFrameTime)));
}
if (dmaPendingNum > (maxOutstandingDma - waterMark))
{
if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
getState() == SimObject::Running)
- schedule(tickEvent, clockEdge(1));
+ schedule(tickEvent, clockEdge(Cycles(1)));
}
unsigned int
DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
- reschedule(txEvent, curTick() + ticks(1), true);
+ reschedule(txEvent, curTick() + clockPeriod(), true);
}
bool
Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name,
Bridge& _bridge,
BridgeMasterPort& _masterPort,
- int _delay, int _resp_limit,
+ Cycles _delay, int _resp_limit,
std::vector<Range<Addr> > _ranges)
: SlavePort(_name, &_bridge), bridge(_bridge), masterPort(_masterPort),
delay(_delay), ranges(_ranges.begin(), _ranges.end()),
Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name,
Bridge& _bridge,
BridgeSlavePort& _slavePort,
- int _delay, int _req_limit)
+ Cycles _delay, int _req_limit)
: MasterPort(_name, &_bridge), bridge(_bridge), slavePort(_slavePort),
delay(_delay), reqQueueLimit(_req_limit), sendEvent(*this)
{
Bridge::Bridge(Params *p)
: MemObject(p),
- slavePort(p->name + ".slave", *this, masterPort, p->delay, p->resp_size,
- p->ranges),
- masterPort(p->name + ".master", *this, slavePort, p->delay, p->req_size)
+ slavePort(p->name + ".slave", *this, masterPort,
+ ticksToCycles(p->delay), p->resp_size, p->ranges),
+ masterPort(p->name + ".master", *this, slavePort,
+ ticksToCycles(p->delay), p->req_size)
{
}
DPRINTF(Bridge, "Request queue size: %d\n", transmitList.size());
- slavePort.schedTimingResp(pkt, curTick() + delay);
+ slavePort.schedTimingResp(pkt, bridge.clockEdge(delay));
return true;
}
assert(outstandingResponses != respQueueLimit);
++outstandingResponses;
retryReq = false;
- masterPort.schedTimingReq(pkt, curTick() + delay);
+ masterPort.schedTimingReq(pkt, bridge.clockEdge(delay));
}
}
Tick
Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt)
{
- return delay + masterPort.sendAtomic(pkt);
+ return delay * bridge.clockPeriod() + masterPort.sendAtomic(pkt);
}
void
BridgeMasterPort& masterPort;
/** Minimum request delay though this bridge. */
- Tick delay;
+ Cycles delay;
/** Address ranges to pass through the bridge */
AddrRangeList ranges;
* @param _name the port name including the owner
* @param _bridge the structural owner
* @param _masterPort the master port on the other side of the bridge
- * @param _delay the delay from seeing a response to sending it
+ * @param _delay the delay in cycles from receiving to sending
* @param _resp_limit the size of the response queue
* @param _ranges a number of address ranges to forward
*/
BridgeSlavePort(const std::string& _name, Bridge& _bridge,
- BridgeMasterPort& _masterPort, int _delay,
+ BridgeMasterPort& _masterPort, Cycles _delay,
int _resp_limit, std::vector<Range<Addr> > _ranges);
/**
BridgeSlavePort& slavePort;
/** Minimum delay though this bridge. */
- Tick delay;
+ Cycles delay;
/**
* Request packet queue. Request packets are held in this
* @param _name the port name including the owner
* @param _bridge the structural owner
* @param _slavePort the slave port on the other side of the bridge
- * @param _delay the delay from seeing a request to sending it
+ * @param _delay the delay in cycles from receiving to sending
* @param _req_limit the size of the request queue
*/
BridgeMasterPort(const std::string& _name, Bridge& _bridge,
- BridgeSlavePort& _slavePort, int _delay,
+ BridgeSlavePort& _slavePort, Cycles _delay,
int _req_limit);
/**
# most derived types require this, so we just do it here once
code('%import "stdint.i"')
code('%import "base/types.hh"')
+ # ignore the case operator for Cycles
+ code('%ignore *::operator uint64_t() const;')
def getValue(self):
return long(self.value)
class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True
class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True
+class Cycles(CheckedInt): cxx_type = 'Cycles'; size = 64; unsigned = True
class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True
class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
// The cycle counter value corresponding to the current value of
// 'tick'
- mutable Tick cycle;
+ mutable Cycles cycle;
/**
* Prevent inadvertent use of the copy constructor and assignment
// if not, we have to recalculate the cycle and tick, we
// perform the calculations in terms of relative cycles to
// allow changes to the clock period in the future
- Tick elapsedCycles = divCeil(curTick() - tick, clock);
+ Cycles elapsedCycles(divCeil(curTick() - tick, clock));
cycle += elapsedCycles;
tick += elapsedCycles * clock;
}
*
* @return The tick when the clock edge occurs
*/
- inline Tick clockEdge(int cycles = 0) const
+ inline Tick clockEdge(Cycles cycles = Cycles(0)) const
{
// align tick to the next clock edge
update();
// figure out when this future cycle is
- return tick + ticks(cycles);
+ return tick + clock * cycles;
}
/**
* Determine the current cycle, corresponding to a tick aligned to
* a clock edge.
*
- * @return The current cycle
+ * @return The current cycle count
*/
- inline Tick curCycle() const
+ inline Cycles curCycle() const
{
// align cycle to the next clock edge.
update();
Tick nextCycle() const
{ return clockEdge(); }
- inline Tick frequency() const { return SimClock::Frequency / clock; }
-
- inline Tick ticks(int cycles) const { return clock * cycles; }
+ inline uint64_t frequency() const { return SimClock::Frequency / clock; }
inline Tick clockPeriod() const { return clock; }
- inline Tick tickToCycle(Tick tick) const { return tick / clock; }
+ inline Cycles ticksToCycles(Tick tick) const
+ { return Cycles(tick / clock); }
};
ThreadContext *tc = system->getThreadContext(contextIds[0]);
// mark this context as active so it will start ticking.
- tc->activate(0);
+ tc->activate(Cycles(0));
}
// map simulator fd sim_fd to target fd tgt_fd
EndQuiesceEvent *quiesceEvent = tc->getQuiesceEvent();
- Tick resume = curTick() + cpu->ticks(cycles);
+ Tick resume = cpu->clockEdge(Cycles(cycles));
cpu->reschedule(quiesceEvent, resume, true);