TableWalker::nextWalk(ThreadContext *tc)
{
if (pendingQueue.size())
- schedule(doProcessEvent, tc->getCpuPtr()->nextCycle(curTick()+1));
+ schedule(doProcessEvent, tc->getCpuPtr()->clockEdge(1));
}
// Make sure we don't trot off the end of data.
assert(offset + pkt->getSize() <= sizeof(MiscReg));
pkt->setData(((uint8_t *)&data) + offset);
- return xc->getCpuPtr()->ticks(1);
+ return 1;
}
inline Tick
assert(offset + pkt->getSize() <= sizeof(MiscReg));
pkt->writeData(((uint8_t *)&data) + offset);
xc->setMiscReg(index, gtoh(data));
- return xc->getCpuPtr()->ticks(1);
+ return 1;
}
}
{
Counter temp = cpu->totalOps();
#ifndef NDEBUG
- double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1));
+ double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
"%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
}
if (params()->progress_interval) {
- Tick num_ticks = ticks(params()->progress_interval);
-
- new CPUProgressEvent(this, num_ticks);
+ new CPUProgressEvent(this, params()->progress_interval);
}
}
InOrderCPU::CPUEvent::scheduleEvent(int delay)
{
assert(!scheduled() || squashed());
- cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true);
+ cpu->reschedule(this, cpu->clockEdge(delay), true);
}
void
frontEndSked = createFrontEndSked();
faultSked = createFaultSked();
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
lockAddr = 0;
lockFlag = false;
if (!tickEvent.scheduled()) {
if (_status == SwitchedOut) {
// increment stat
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
} else if (!activityRec.active()) {
DPRINTF(InOrderCPU, "sleeping CPU.\n");
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
timesIdled++;
} else {
//Tick next_tick = curTick() + cycles(1);
//tickEvent.schedule(next_tick);
- schedule(&tickEvent, nextCycle(curTick() + 1));
+ schedule(&tickEvent, clockEdge(1));
DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
- nextCycle(curTick() + 1));
+ clockEdge(1));
}
}
CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
event_pri);
- Tick sked_tick = nextCycle(curTick() + ticks(delay));
- if (delay >= 0) {
- DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
- eventNames[c_event], curTick() + delay, tid);
- schedule(cpu_event, sked_tick);
- } else {
- cpu_event->process();
- cpuEventRemoveList.push(cpu_event);
- }
+ Tick sked_tick = clockEdge(delay);
+ DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
+ eventNames[c_event], curTick() + delay, tid);
+ schedule(cpu_event, sked_tick);
// Broadcast event to the Resource Pool
// Need to reset tid just in case this is a dummy instruction
DPRINTF(Activity, "Waking up CPU\n");
- Tick extra_cycles = tickToCycles((curTick() - 1) - lastRunningCycle);
+ Tick extra_cycles = curCycle() - lastRunningCycle;
+ if (extra_cycles != 0)
+ --extra_cycles;
idleCycles += extra_cycles;
for (int stage_num = 0; stage_num < NumStages; stage_num++) {
numCycles += extra_cycles;
- schedule(&tickEvent, nextCycle(curTick()));
+ schedule(&tickEvent, nextCycle());
}
// Lots of copied full system code...place into BaseCPU class?
void scheduleTickEvent(int delay)
{
assert(!tickEvent.scheduled() || tickEvent.squashed());
- reschedule(&tickEvent, nextCycle(curTick() + ticks(delay)), true);
+ reschedule(&tickEvent, clockEdge(delay), true);
}
/** Unschedule tick event, regardless of its current state. */
void
Resource::scheduleExecution(int slot_num)
{
- if (latency >= 1) {
+ if (latency > 0) {
scheduleEvent(slot_num, latency);
} else {
execute(slot_num);
{
assert(delay >= 0);
- Tick when = cpu->nextCycle(curTick() + cpu->ticks(delay));
+ Tick when = cpu->clockEdge(delay);
switch ((int)e_type)
{
{
InOrderCPU *cpu = resPool->cpu;
assert(!scheduled() || squashed());
- cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true);
+ cpu->reschedule(this, cpu->clockEdge(delay), true);
}
/** Unschedule resource event, regardless of its current state. */
renameToROBDelay = Param.Unsigned(1, "Rename to reorder buffer delay")
commitWidth = Param.Unsigned(8, "Commit width")
squashWidth = Param.Unsigned(8, "Squash width")
- trapLatency = Param.Tick(13, "Trap latency")
- fetchTrapLatency = Param.Tick(1, "Fetch trap latency")
+ trapLatency = Param.Unsigned(13, "Trap latency")
+ fetchTrapLatency = Param.Unsigned(1, "Fetch trap latency")
backComSize = Param.Unsigned(5, "Time buffer size for backwards communication")
forwardComSize = Param.Unsigned(5, "Time buffer size for forward communication")
/** The latency to handle a trap. Used when scheduling trap
* squash event.
*/
- Tick trapLatency;
+ uint trapLatency;
/** The interrupt fault. */
Fault interrupt;
cpu->activateStage(O3CPU::CommitIdx);
cpu->activityThisCycle();
- trapLatency = cpu->ticks(trapLatency);
}
template <class Impl>
TrapEvent *trap = new TrapEvent(this, tid);
- cpu->schedule(trap, curTick() + trapLatency);
+ cpu->schedule(trap, cpu->clockEdge(trapLatency));
trapInFlight[tid] = true;
thread[tid]->trapPending = true;
}
// Setup the ROB for whichever stages need it.
commit.setROB(&rob);
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
lastActivatedCycle = 0;
#if 0
getState() == SimObject::Drained) {
DPRINTF(O3CPU, "Switched out!\n");
// increment stat
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
} else if (!activityRec.active() || _status == Idle) {
DPRINTF(O3CPU, "Idle!\n");
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
timesIdled++;
} else {
- schedule(tickEvent, nextCycle(curTick() + ticks(1)));
+ schedule(tickEvent, clockEdge(1));
DPRINTF(O3CPU, "Scheduling next tick!\n");
}
}
activityRec.activity();
fetch.wakeFromQuiesce();
- quiesceCycles += tickToCycles((curTick() - 1) - lastRunningCycle);
+ Tick cycles = curCycle() - lastRunningCycle;
+ if (cycles != 0)
+ --cycles;
+ quiesceCycles += cycles;
lastActivatedCycle = curTick();
unscheduleTickEvent();
DPRINTF(Quiesce, "Suspending Context\n");
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
_status = Idle;
}
if (!tickEvent.scheduled())
schedule(tickEvent, nextCycle());
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
}
template <class Impl>
DPRINTF(Activity, "Waking up CPU\n");
- idleCycles += tickToCycles((curTick() - 1) - lastRunningCycle);
- numCycles += tickToCycles((curTick() - 1) - lastRunningCycle);
+ Tick cycles = curCycle() - lastRunningCycle;
+ if (cycles != 0)
+ --cycles;
+ idleCycles += cycles;
+ numCycles += cycles;
schedule(tickEvent, nextCycle());
}
void scheduleTickEvent(int delay)
{
if (tickEvent.squashed())
- reschedule(tickEvent, nextCycle(curTick() + ticks(delay)));
+ reschedule(tickEvent, clockEdge(delay));
else if (!tickEvent.scheduled())
- schedule(tickEvent, nextCycle(curTick() + ticks(delay)));
+ schedule(tickEvent, clockEdge(delay));
}
/** Unschedule tick event, regardless of its current state. */
// Schedule thread to activate, regardless of its current state.
if (activateThreadEvent[tid].squashed())
reschedule(activateThreadEvent[tid],
- nextCycle(curTick() + ticks(delay)));
+ clockEdge(delay));
else if (!activateThreadEvent[tid].scheduled()) {
- Tick when = nextCycle(curTick() + ticks(delay));
+ Tick when = clockEdge(delay);
// Check if the deallocateEvent is also scheduled, and make
// sure they do not happen at same time causing a sleep that
// Schedule thread to activate, regardless of its current state.
if (deallocateContextEvent[tid].squashed())
reschedule(deallocateContextEvent[tid],
- nextCycle(curTick() + ticks(delay)));
+ clockEdge(delay));
else if (!deallocateContextEvent[tid].scheduled())
schedule(deallocateContextEvent[tid],
- nextCycle(curTick() + ticks(delay)));
+ clockEdge(delay));
}
/** Unschedule thread deallocation in CPU */
assert(!finishTranslationEvent.scheduled());
finishTranslationEvent.setFault(fault);
finishTranslationEvent.setReq(mem_req);
- cpu->schedule(finishTranslationEvent, cpu->nextCycle(curTick() + cpu->ticks(1)));
+ cpu->schedule(finishTranslationEvent, cpu->clockEdge(1));
return;
}
DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
FUCompletion *execution = new FUCompletion(issuing_inst,
idx, this);
- cpu->schedule(execution, curTick() + cpu->ticks(op_latency - 1));
+ cpu->schedule(execution, cpu->clockEdge(op_latency - 1));
// @todo: Enforce that issue_latency == 1 or op_latency
if (issue_latency > 1) {
delete snd_data_pkt;
}
WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
- cpu->schedule(wb, curTick() + delay);
+ cpu->schedule(wb, cpu->clockEdge(delay));
return NoFault;
}
assert(!tickEvent.scheduled());
notIdleFraction++;
- numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);
+ numCycles += tickToCycle(thread->lastActivate - thread->lastSuspend);
//Make sure ticks are still on multiples of cycles
- schedule(tickEvent, nextCycle(curTick() + ticks(delay)));
+ schedule(tickEvent, clockEdge(delay));
_status = Running;
}
stall_ticks += dcache_latency;
if (stall_ticks) {
- Tick stall_cycles = stall_ticks / ticks(1);
+ Tick stall_cycles = stall_ticks / clockPeriod();
Tick aligned_stall_ticks = ticks(stall_cycles);
if (aligned_stall_ticks < stall_ticks)
}
// instruction takes at least one cycle
- if (latency < ticks(1))
- latency = ticks(1);
+ if (latency < clockPeriod())
+ latency = clockPeriod();
if (_status != Idle)
schedule(tickEvent, curTick() + latency);
TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
: BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
- dcachePort(this), fetchEvent(this)
+ dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
+ fetchEvent(this)
{
_status = Idle;
- ifetch_pkt = dcache_pkt = NULL;
- drainEvent = NULL;
- previousTick = 0;
changeState(SimObject::Running);
system->totalNumInsts = 0;
}
{
assert(_status == Running || _status == Idle);
_status = SwitchedOut;
- numCycles += tickToCycles(curTick() - previousTick);
+ numCycles += curCycle() - previousCycle;
// If we've been scheduled to resume but are then told to switch out,
// we'll need to cancel it.
_status = Idle;
}
assert(threadContexts.size() == 1);
- previousTick = curTick();
+ previousCycle = curCycle();
}
_status = Running;
// kick things off by initiating the fetch of the next instruction
- schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
+ schedule(fetchEvent, clockEdge(delay));
}
{
RequestPtr req = pkt->req;
if (req->isMmappedIpr()) {
- Tick delay;
- delay = TheISA::handleIprRead(thread->getTC(), pkt);
- new IprEvent(pkt, this, nextCycle(curTick() + delay));
+ Tick delay = TheISA::handleIprRead(thread->getTC(), pkt);
+ new IprEvent(pkt, this, clockEdge(delay));
_status = DcacheWaitResponse;
dcache_pkt = NULL;
} else if (!dcachePort.sendTimingReq(pkt)) {
{
// fault may be NoFault in cases where a fault is suppressed,
// for instance prefetches.
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
if (traceData) {
// Since there was a fault, we shouldn't trace this instruction.
{
RequestPtr req = dcache_pkt->req;
if (req->isMmappedIpr()) {
- Tick delay;
- delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
- new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
+ Tick delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
+ new IprEvent(dcache_pkt, this, clockEdge(delay));
_status = DcacheWaitResponse;
dcache_pkt = NULL;
} else if (!dcachePort.sendTimingReq(dcache_pkt)) {
_status = IcacheWaitResponse;
completeIfetch(NULL);
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
}
}
advanceInst(fault);
}
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
}
_status = Running;
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
if (getState() == SimObject::Draining) {
if (pkt) {
assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
pkt->req->getFlags().isSet(Request::NO_ACCESS));
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
if (pkt->senderState) {
SplitFragmentSenderState * send_state =
PacketPtr ifetch_pkt;
PacketPtr dcache_pkt;
- Tick previousTick;
+ Tick previousCycle;
protected:
MemTest::tick()
{
if (!tickEvent.scheduled())
- schedule(tickEvent, curTick() + ticks(1));
+ schedule(tickEvent, clockEdge(1));
if (++noResponseCycles >= 500000) {
if (issueDmas) {
exitSimLoop("Network Tester completed simCycles");
else {
if (!tickEvent.scheduled())
- schedule(tickEvent, curTick() + ticks(1));
+ schedule(tickEvent, clockEdge(1));
}
}
schedule(intEvent, nextCycle());
curAddr = 0;
- startTime = curTick();
+ startTime = curCycle();
maxAddr = static_cast<Addr>(length * bytesPerPixel);
void
Pl111::dmaDone()
{
- Tick maxFrameTime = lcdTiming2.cpl * height * clock;
+ Tick maxFrameTime = lcdTiming2.cpl * height;
--dmaPendingNum;
if (maxAddr == curAddr && !dmaPendingNum) {
- if ((curTick() - startTime) > maxFrameTime) {
+ if ((curCycle() - startTime) > maxFrameTime) {
warn("CLCD controller buffer underrun, took %d cycles when should"
" have taken %d\n", curTick() - startTime, maxFrameTime);
lcdRis.underflow = 1;
pic->seekp(0);
bmp->write(pic);
- DPRINTF(PL111, "-- schedule next dma read event at %d tick \n",
- maxFrameTime + curTick());
-
+ // schedule the next read based on when the last frame started
+ // and the desired fps (i.e. maxFrameTime), we turn the
+ // argument into a relative number of cycles in the future by
+ // subtracting curCycle()
if (lcdControl.lcden)
- schedule(readEvent, nextCycle(startTime + maxFrameTime));
+ schedule(readEvent, clockEdge(startTime + maxFrameTime -
+ curCycle()));
}
if (dmaPendingNum > (maxOutstandingDma - waterMark))
{
if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
getState() == SimObject::Running)
- schedule(tickEvent, (curTick() / ticks(1)) * ticks(1) + ticks(1));
+ schedule(tickEvent, clockEdge(1));
}
unsigned int
if (rxTick || txTick || txFifoTick)
- schedule(tickEvent, curTick() + ticks(1));
+ schedule(tickEvent, curTick() + clockPeriod());
}
void
}
// Go to the next state machine clock tick.
- rxKickTick = curTick() + ticks(1);
+ rxKickTick = curTick() + clockPeriod();
}
switch(rxDmaState) {
}
// Go to the next state machine clock tick.
- txKickTick = curTick() + ticks(1);
+ txKickTick = curTick() + clockPeriod();
}
switch(txDmaState) {
DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
- reschedule(txEvent, curTick() + ticks(1), true);
+ reschedule(txEvent, curTick() + clockPeriod(), true);
}
bool
private:
+ // the tick value of the next clock edge (>= curTick()) at the
+ // time of the last call to update()
+ mutable Tick tick;
+
+ // The cycle counter value corresponding to the current value of
+ // 'tick'
+ mutable Tick cycle;
+
/**
* Prevent inadvertent use of the copy constructor and assignment
* operator by making them private.
ClockedObject(ClockedObject&);
ClockedObject& operator=(ClockedObject&);
+ /**
+ * Align cycle and tick to the next clock edge if not already done.
+ */
+ void update() const
+ {
+ // both tick and cycle are up-to-date and we are done, note
+ // that the >= is important as it captures cases where tick
+ // has already passed curTick()
+ if (tick >= curTick())
+ return;
+
+ // optimise for the common case and see if the tick should be
+ // advanced by a single clock period
+ tick += clock;
+ ++cycle;
+
+ // see if we are done at this point
+ if (tick >= curTick())
+ return;
+
+ // if not, we have to recalculate the cycle and tick, we
+ // perform the calculations in terms of relative cycles to
+ // allow changes to the clock period in the future
+ Tick elapsedCycles = divCeil(curTick() - tick, clock);
+ cycle += elapsedCycles;
+ tick += elapsedCycles * clock;
+ }
+
protected:
// Clock period in ticks
* Create a clocked object and set the clock based on the
* parameters.
*/
- ClockedObject(const ClockedObjectParams* p) : SimObject(p), clock(p->clock)
+ ClockedObject(const ClockedObjectParams* p) :
+ SimObject(p), tick(0), cycle(0), clock(p->clock)
{ }
/**
public:
/**
- * Based on the clock of the object, determine the tick when the
- * next cycle begins, in other words, round the curTick() to the
- * next tick that is a multiple of the clock.
+ * Determine the tick when a cycle begins, by default the current
+ * one, but the argument also enables the caller to determine a
+ * future cycle.
*
- * @return The tick when the next cycle starts
+ * @param cycles The number of cycles into the future
+ *
+ * @return The tick when the clock edge occurs
*/
- Tick nextCycle() const
- { return divCeil(curTick(), clock) * clock; }
+ inline Tick clockEdge(int cycles = 0) const
+ {
+ // align tick to the next clock edge
+ update();
+
+ // figure out when this future cycle is
+ return tick + ticks(cycles);
+ }
/**
- * Determine the next cycle starting from a given tick instead of
- * curTick().
+ * Determine the current cycle, corresponding to a tick aligned to
+ * a clock edge.
*
- * @param begin_tick The tick to round to a clock edge
+ * @return The current cycle
+ */
+ inline Tick curCycle() const
+ {
+ // align cycle to the next clock edge.
+ update();
+
+ return cycle;
+ }
+
+ /**
+ * Based on the clock of the object, determine the tick when the
+ * next cycle begins, in other words, return the next clock edge.
*
- * @return The tick when the cycle after or on begin_tick starts
+ * @return The tick when the next cycle starts
*/
- Tick nextCycle(Tick begin_tick) const
- { return divCeil(begin_tick, clock) * clock; }
+ Tick nextCycle() const
+ { return clockEdge(); }
inline Tick frequency() const { return SimClock::Frequency / clock; }
- inline Tick ticks(int numCycles) const { return clock * numCycles; }
+ inline Tick ticks(int cycles) const { return clock * cycles; }
- inline Tick curCycle() const { return curTick() / clock; }
+ inline Tick clockPeriod() const { return clock; }
- inline Tick tickToCycles(Tick val) const { return val / clock; }
+ inline Tick tickToCycle(Tick tick) const { return tick / clock; }
};