/*
+ * Copyright (c) 2010 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
* Copyright (c) 2004-2006 The Regents of The University of Michigan
* All rights reserved.
*
#include <algorithm>
#include <cstring>
-#include "config/use_checker.hh"
-
#include "arch/isa_traits.hh"
#include "arch/utility.hh"
+#include "base/types.hh"
+#include "config/the_isa.hh"
+#include "config/use_checker.hh"
#include "cpu/checker/cpu.hh"
#include "cpu/exetrace.hh"
#include "cpu/o3/fetch.hh"
#include "mem/packet.hh"
#include "mem/request.hh"
+#include "params/DerivO3CPU.hh"
#include "sim/byteswap.hh"
-#include "sim/host.hh"
#include "sim/core.hh"
#if FULL_SYSTEM
#include "sim/system.hh"
#endif // FULL_SYSTEM
-#include "params/DerivO3CPU.hh"
+using namespace std;
template<class Impl>
void
DefaultFetch<Impl>::IcachePort::recvAtomic(PacketPtr pkt)
{
panic("DefaultFetch doesn't expect recvAtomic callback!");
- return curTick;
+ return curTick();
}
template<class Impl>
fetchWidth(params->fetchWidth),
cacheBlocked(false),
retryPkt(NULL),
- retryTid(-1),
+ retryTid(InvalidThreadID),
numThreads(params->numThreads),
numFetchingThreads(params->smtNumFetchingThreads),
interruptPending(false),
switchedOut(false)
{
if (numThreads > Impl::MaxThreads)
- fatal("numThreads is not a valid value\n");
+ fatal("numThreads (%d) is larger than compiled limit (%d),\n"
+ "\tincrease MaxThreads in src/cpu/o3/impl.hh\n",
+ numThreads, static_cast<int>(Impl::MaxThreads));
// Set fetch stage's status to inactive.
_status = Inactive;
.desc("Number of cycles fetch has spent squashing")
.prereq(fetchSquashCycles);
+ fetchTlbCycles
+ .name(name() + ".TlbCycles")
+ .desc("Number of cycles fetch has spent waiting for tlb")
+ .prereq(fetchTlbCycles);
+
fetchIdleCycles
.name(name() + ".IdleCycles")
.desc("Number of cycles fetch was idle")
template<class Impl>
void
-DefaultFetch<Impl>::setActiveThreads(std::list<unsigned> *at_ptr)
+DefaultFetch<Impl>::setActiveThreads(std::list<ThreadID> *at_ptr)
{
activeThreads = at_ptr;
}
DefaultFetch<Impl>::initStage()
{
// Setup PC and nextPC with initial state.
- for (int tid = 0; tid < numThreads; tid++) {
- PC[tid] = cpu->readPC(tid);
- nextPC[tid] = cpu->readNextPC(tid);
- microPC[tid] = cpu->readMicroPC(tid);
+ for (ThreadID tid = 0; tid < numThreads; tid++) {
+ pc[tid] = cpu->pcState(tid);
+ fetchOffset[tid] = 0;
+ macroop[tid] = NULL;
}
- for (int tid=0; tid < numThreads; tid++) {
+ for (ThreadID tid = 0; tid < numThreads; tid++) {
fetchStatus[tid] = Running;
// Create mask to get rid of offset bits.
cacheBlkMask = (cacheBlkSize - 1);
- for (int tid=0; tid < numThreads; tid++) {
+ for (ThreadID tid = 0; tid < numThreads; tid++) {
// Create space to store a cache line.
cacheData[tid] = new uint8_t[cacheBlkSize];
cacheDataPC[tid] = 0;
void
DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
{
- unsigned tid = pkt->req->getThreadNum();
+ ThreadID tid = pkt->req->threadId();
- DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n",tid);
+ DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n", tid);
assert(!pkt->wasNacked());
DefaultFetch<Impl>::takeOverFrom()
{
// Reset all state
- for (int i = 0; i < Impl::MaxThreads; ++i) {
+ for (ThreadID i = 0; i < Impl::MaxThreads; ++i) {
stalls[i].decode = 0;
stalls[i].rename = 0;
stalls[i].iew = 0;
stalls[i].commit = 0;
- PC[i] = cpu->readPC(i);
- nextPC[i] = cpu->readNextPC(i);
- microPC[i] = cpu->readMicroPC(i);
+ pc[i] = cpu->pcState(i);
fetchStatus[i] = Running;
}
numInst = 0;
template <class Impl>
bool
-DefaultFetch<Impl>::lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC,
- Addr &next_NPC, Addr &next_MicroPC)
+DefaultFetch<Impl>::lookupAndUpdateNextPC(
+ DynInstPtr &inst, TheISA::PCState &nextPC)
{
// Do branch prediction check here.
// A bit of a misnomer...next_PC is actually the current PC until
bool predict_taken;
if (!inst->isControl()) {
- if (inst->isMicroop() && !inst->isLastMicroop()) {
- next_MicroPC++;
- } else {
- next_PC = next_NPC;
- next_NPC = next_NPC + instSize;
- next_MicroPC = 0;
- }
- inst->setPredTarg(next_PC, next_NPC, next_MicroPC);
+ TheISA::advancePC(nextPC, inst->staticInst);
+ inst->setPredTarg(nextPC);
inst->setPredTaken(false);
return false;
}
- //Assume for now that all control flow is to a different macroop which
- //would reset the micro pc to 0.
- next_MicroPC = 0;
-
- int tid = inst->threadNumber;
- Addr pred_PC = next_PC;
- predict_taken = branchPred.predict(inst, pred_PC, tid);
+ ThreadID tid = inst->threadNumber;
+ predict_taken = branchPred.predict(inst, nextPC, tid);
-/* if (predict_taken) {
- DPRINTF(Fetch, "[tid:%i]: Branch predicted to be taken to %#x.\n",
- tid, pred_PC);
+ if (predict_taken) {
+ DPRINTF(Fetch, "[tid:%i]: [sn:%i]: Branch predicted to be taken to %s.\n",
+ tid, inst->seqNum, nextPC);
} else {
- DPRINTF(Fetch, "[tid:%i]: Branch predicted to be not taken.\n", tid);
- }*/
-
-#if ISA_HAS_DELAY_SLOT
- next_PC = next_NPC;
- if (predict_taken)
- next_NPC = pred_PC;
- else
- next_NPC += instSize;
-#else
- if (predict_taken)
- next_PC = pred_PC;
- else
- next_PC += instSize;
- next_NPC = next_PC + instSize;
-#endif
-/* DPRINTF(Fetch, "[tid:%i]: Branch predicted to go to %#x and then %#x.\n",
- tid, next_PC, next_NPC);*/
- inst->setPredTarg(next_PC, next_NPC, next_MicroPC);
+ DPRINTF(Fetch, "[tid:%i]: [sn:%i]:Branch predicted to be not taken.\n",
+ tid, inst->seqNum);
+ }
+
+ DPRINTF(Fetch, "[tid:%i]: [sn:%i] Branch predicted to go to %s.\n",
+ tid, inst->seqNum, nextPC);
+ inst->setPredTarg(nextPC);
inst->setPredTaken(predict_taken);
++fetchedBranches;
template <class Impl>
bool
-DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid)
+DefaultFetch<Impl>::fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc)
{
Fault fault = NoFault;
+ // @todo: not sure if these should block translation.
//AlphaDep
if (cacheBlocked) {
DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, cache blocked\n",
DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, switched out\n",
tid);
return false;
- } else if (interruptPending && !(fetch_PC & 0x3)) {
+ } else if (checkInterrupt(pc)) {
// Hold off fetch from getting new instructions when:
// Cache is blocked, or
// while an interrupt is pending and we're not in PAL mode, or
return false;
}
- // Align the fetch PC so it's at the start of a cache block.
- Addr block_PC = icacheBlockAlignPC(fetch_PC);
-
- // If we've already got the block, no need to try to fetch it again.
- if (cacheDataValid[tid] && block_PC == cacheDataPC[tid]) {
- return true;
- }
+ // Align the fetch address so it's at the start of a cache block.
+ Addr block_PC = icacheBlockAlignPC(vaddr);
// Setup the memReq to do a read of the first instruction's address.
// Set the appropriate read size and flags as well.
// Build request here.
- RequestPtr mem_req = new Request(tid, block_PC, cacheBlkSize, 0,
- fetch_PC, cpu->cpuId(), tid);
+ RequestPtr mem_req =
+ new Request(tid, block_PC, cacheBlkSize, Request::INST_FETCH,
+ pc, cpu->thread[tid]->contextId(), tid);
memReq[tid] = mem_req;
- // Translate the instruction request.
- fault = cpu->translateInstReq(mem_req, cpu->thread[tid]);
+ // Initiate translation of the icache block
+ fetchStatus[tid] = ItlbWait;
+ FetchTranslation *trans = new FetchTranslation(this);
+ cpu->itb->translateTiming(mem_req, cpu->thread[tid]->getTC(),
+ trans, BaseTLB::Execute);
+ return true;
+}
- // In the case of faults, the fetch stage may need to stall and wait
- // for the ITB miss to be handled.
+template <class Impl>
+void
+DefaultFetch<Impl>::finishTranslation(Fault fault, RequestPtr mem_req)
+{
+ ThreadID tid = mem_req->threadId();
+ Addr block_PC = mem_req->getVaddr();
- // If translation was successful, attempt to read the first
- // instruction.
+ // If translation was successful, attempt to read the icache block.
if (fault == NoFault) {
-#if 0
- if (cpu->system->memctrl->badaddr(memReq[tid]->paddr) ||
- memReq[tid]->isUncacheable()) {
- DPRINTF(Fetch, "Fetch: Bad address %#x (hopefully on a "
- "misspeculating path)!",
- memReq[tid]->paddr);
- ret_fault = TheISA::genMachineCheckFault();
- return false;
- }
-#endif
-
// Build packet here.
PacketPtr data_pkt = new Packet(mem_req,
MemCmd::ReadReq, Packet::Broadcast);
cacheDataPC[tid] = block_PC;
cacheDataValid[tid] = false;
-
DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
fetchedCacheLines++;
- // Now do the timing access to see whether or not the instruction
- // exists within the cache.
+ // Access the cache.
if (!icachePort->sendTiming(data_pkt)) {
assert(retryPkt == NULL);
- assert(retryTid == -1);
+ assert(retryTid == InvalidThreadID);
DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
+
fetchStatus[tid] = IcacheWaitRetry;
retryPkt = data_pkt;
retryTid = tid;
cacheBlocked = true;
- return false;
+ } else {
+ DPRINTF(Fetch, "[tid:%i]: Doing Icache access.\n", tid);
+ DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache "
+ "response.\n", tid);
+
+ lastIcacheStall[tid] = curTick();
+ fetchStatus[tid] = IcacheWaitResponse;
}
+ } else {
+ // Translation faulted, icache request won't be sent.
+ delete mem_req;
+ memReq[tid] = NULL;
- DPRINTF(Fetch, "[tid:%i]: Doing cache access.\n", tid);
+ // Send the fault to commit. This thread will not do anything
+ // until commit handles the fault. The only other way it can
+ // wake up is if a squash comes along and changes the PC.
+ TheISA::PCState fetchPC = pc[tid];
- lastIcacheStall[tid] = curTick;
+ // We will use a nop in ordier to carry the fault.
+ DynInstPtr instruction = buildInst(tid,
+ StaticInstPtr(TheISA::NoopMachInst, fetchPC.instAddr()),
+ NULL, fetchPC, fetchPC, false);
- DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache "
- "response.\n", tid);
+ instruction->setPredTarg(fetchPC);
+ instruction->fault = fault;
+ wroteToTimeBuffer = true;
- fetchStatus[tid] = IcacheWaitResponse;
- } else {
- delete mem_req;
- memReq[tid] = NULL;
- }
+ fetchStatus[tid] = TrapPending;
- ret_fault = fault;
- return true;
+ DPRINTF(Fetch, "[tid:%i]: Blocked, need to handle the trap.\n", tid);
+ DPRINTF(Fetch, "[tid:%i]: fault (%s) detected @ PC %s.\n",
+ tid, fault->name(), pc[tid]);
+ }
+ _status = updateFetchStatus();
}
template <class Impl>
inline void
-DefaultFetch<Impl>::doSquash(const Addr &new_PC,
- const Addr &new_NPC, const Addr &new_microPC, unsigned tid)
+DefaultFetch<Impl>::doSquash(const TheISA::PCState &newPC, ThreadID tid)
{
- DPRINTF(Fetch, "[tid:%i]: Squashing, setting PC to: %#x, NPC to: %#x.\n",
- tid, new_PC, new_NPC);
+ DPRINTF(Fetch, "[tid:%i]: Squashing, setting PC to: %s.\n",
+ tid, newPC);
- PC[tid] = new_PC;
- nextPC[tid] = new_NPC;
- microPC[tid] = new_microPC;
+ pc[tid] = newPC;
+ fetchOffset[tid] = 0;
+ macroop[tid] = NULL;
+ predecoder.reset();
// Clear the icache miss if it's outstanding.
if (fetchStatus[tid] == IcacheWaitResponse) {
delete retryPkt;
}
retryPkt = NULL;
- retryTid = -1;
+ retryTid = InvalidThreadID;
}
fetchStatus[tid] = Squashing;
template<class Impl>
void
-DefaultFetch<Impl>::squashFromDecode(const Addr &new_PC, const Addr &new_NPC,
- const Addr &new_MicroPC,
- const InstSeqNum &seq_num, unsigned tid)
+DefaultFetch<Impl>::squashFromDecode(const TheISA::PCState &newPC,
+ const InstSeqNum &seq_num, ThreadID tid)
{
- DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n",tid);
+ DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n", tid);
- doSquash(new_PC, new_NPC, new_MicroPC, tid);
+ doSquash(newPC, tid);
// Tell the CPU to remove any instructions that are in flight between
// fetch and decode.
template<class Impl>
bool
-DefaultFetch<Impl>::checkStall(unsigned tid) const
+DefaultFetch<Impl>::checkStall(ThreadID tid) const
{
bool ret_val = false;
DefaultFetch<Impl>::updateFetchStatus()
{
//Check Running
- std::list<unsigned>::iterator threads = activeThreads->begin();
- std::list<unsigned>::iterator end = activeThreads->end();
+ list<ThreadID>::iterator threads = activeThreads->begin();
+ list<ThreadID>::iterator end = activeThreads->end();
while (threads != end) {
- unsigned tid = *threads++;
+ ThreadID tid = *threads++;
if (fetchStatus[tid] == Running ||
fetchStatus[tid] == Squashing ||
template <class Impl>
void
-DefaultFetch<Impl>::squash(const Addr &new_PC, const Addr &new_NPC,
- const Addr &new_MicroPC,
- const InstSeqNum &seq_num, unsigned tid)
+DefaultFetch<Impl>::squash(const TheISA::PCState &newPC,
+ const InstSeqNum &seq_num, ThreadID tid)
{
- DPRINTF(Fetch, "[tid:%u]: Squash from commit.\n",tid);
+ DPRINTF(Fetch, "[tid:%u]: Squash from commit.\n", tid);
- doSquash(new_PC, new_NPC, new_MicroPC, tid);
+ doSquash(newPC, tid);
// Tell the CPU to remove any instructions that are not in the ROB.
cpu->removeInstsNotInROB(tid);
void
DefaultFetch<Impl>::tick()
{
- std::list<unsigned>::iterator threads = activeThreads->begin();
- std::list<unsigned>::iterator end = activeThreads->end();
+ list<ThreadID>::iterator threads = activeThreads->begin();
+ list<ThreadID>::iterator end = activeThreads->end();
bool status_change = false;
wroteToTimeBuffer = false;
while (threads != end) {
- unsigned tid = *threads++;
+ ThreadID tid = *threads++;
// Check the signals for each thread to determine the proper status
// for each thread.
template <class Impl>
bool
-DefaultFetch<Impl>::checkSignalsAndUpdate(unsigned tid)
+DefaultFetch<Impl>::checkSignalsAndUpdate(ThreadID tid)
{
// Update the per thread stall statuses.
if (fromDecode->decodeBlock[tid]) {
DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
"from commit.\n",tid);
// In any case, squash.
- squash(fromCommit->commitInfo[tid].nextPC,
- fromCommit->commitInfo[tid].nextNPC,
- fromCommit->commitInfo[tid].nextMicroPC,
+ squash(fromCommit->commitInfo[tid].pc,
fromCommit->commitInfo[tid].doneSeqNum,
tid);
- // Also check if there's a mispredict that happened.
- if (fromCommit->commitInfo[tid].branchMispredict) {
+ // If it was a branch mispredict on a control instruction, update the
+ // branch predictor with that instruction, otherwise just kill the
+ // invalid state we generated in after sequence number
+ assert(!fromCommit->commitInfo[tid].branchMispredict ||
+ fromCommit->commitInfo[tid].mispredictInst);
+
+ if (fromCommit->commitInfo[tid].branchMispredict &&
+ fromCommit->commitInfo[tid].mispredictInst->isControl()) {
branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
- fromCommit->commitInfo[tid].nextPC,
+ fromCommit->commitInfo[tid].pc,
fromCommit->commitInfo[tid].branchTaken,
tid);
} else {
if (fetchStatus[tid] != Squashing) {
- DPRINTF(Fetch, "Squashing from decode with PC = %#x, NPC = %#x\n",
- fromDecode->decodeInfo[tid].nextPC,
- fromDecode->decodeInfo[tid].nextNPC);
+ TheISA::PCState nextPC = fromDecode->decodeInfo[tid].nextPC;
+ DPRINTF(Fetch, "Squashing from decode with PC = %s\n", nextPC);
// Squash unless we're already squashing
squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
- fromDecode->decodeInfo[tid].nextNPC,
- fromDecode->decodeInfo[tid].nextMicroPC,
fromDecode->decodeInfo[tid].doneSeqNum,
tid);
return false;
}
+template<class Impl>
+typename Impl::DynInstPtr
+DefaultFetch<Impl>::buildInst(ThreadID tid, StaticInstPtr staticInst,
+ StaticInstPtr curMacroop, TheISA::PCState thisPC,
+ TheISA::PCState nextPC, bool trace)
+{
+ // Get a sequence number.
+ InstSeqNum seq = cpu->getAndIncrementInstSeq();
+
+ // Create a new DynInst from the instruction fetched.
+ DynInstPtr instruction =
+ new DynInst(staticInst, thisPC, nextPC, seq, cpu);
+ instruction->setTid(tid);
+
+ instruction->setASID(tid);
+
+ instruction->setThreadState(cpu->thread[tid]);
+
+ DPRINTF(Fetch, "[tid:%i]: Instruction PC %#x (%d) created "
+ "[sn:%lli].\n", tid, thisPC.instAddr(),
+ thisPC.microPC(), seq);
+
+ DPRINTF(Fetch, "[tid:%i]: Instruction is: %s\n", tid,
+ instruction->staticInst->
+ disassemble(thisPC.instAddr()));
+
+#if TRACING_ON
+ if (trace) {
+ instruction->traceData =
+ cpu->getTracer()->getInstRecord(curTick(), cpu->tcBase(tid),
+ instruction->staticInst, thisPC, curMacroop);
+ }
+#else
+ instruction->traceData = NULL;
+#endif
+
+ // Add instruction to the CPU's list of instructions.
+ instruction->setInstListIt(cpu->addInst(instruction));
+
+ // Write the instruction to the first slot in the queue
+ // that heads to decode.
+ assert(numInst < fetchWidth);
+ toDecode->insts[toDecode->size++] = instruction;
+
+ return instruction;
+}
+
template<class Impl>
void
DefaultFetch<Impl>::fetch(bool &status_change)
//////////////////////////////////////////
// Start actual fetch
//////////////////////////////////////////
- int tid = getFetchingThread(fetchPolicy);
+ ThreadID tid = getFetchingThread(fetchPolicy);
- if (tid == -1 || drainPending) {
+ if (tid == InvalidThreadID || drainPending) {
DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
// Breaks looping condition in tick()
DPRINTF(Fetch, "Attempting to fetch from [tid:%i]\n", tid);
// The current PC.
- Addr fetch_PC = PC[tid];
- Addr fetch_NPC = nextPC[tid];
- Addr fetch_MicroPC = microPC[tid];
+ TheISA::PCState thisPC = pc[tid];
- // Fault code for memory access.
- Fault fault = NoFault;
+ Addr pcOffset = fetchOffset[tid];
+ Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
// If returning from the delay of a cache miss, then update the status
// to running, otherwise do the cache access. Possibly move this up
// to tick() function.
if (fetchStatus[tid] == IcacheAccessComplete) {
- DPRINTF(Fetch, "[tid:%i]: Icache miss is complete.\n",
- tid);
+ DPRINTF(Fetch, "[tid:%i]: Icache miss is complete.\n", tid);
fetchStatus[tid] = Running;
status_change = true;
} else if (fetchStatus[tid] == Running) {
- DPRINTF(Fetch, "[tid:%i]: Attempting to translate and read "
- "instruction, starting at PC %08p.\n",
- tid, fetch_PC);
+ // Align the fetch PC so its at the start of a cache block.
+ Addr block_PC = icacheBlockAlignPC(fetchAddr);
+
+ // Unless buffer already got the block, fetch it from icache.
+ if (!cacheDataValid[tid] || block_PC != cacheDataPC[tid]) {
+ DPRINTF(Fetch, "[tid:%i]: Attempting to translate and read "
+ "instruction, starting at PC %s.\n", tid, thisPC);
- bool fetch_success = fetchCacheLine(fetch_PC, fault, tid);
- if (!fetch_success) {
- if (cacheBlocked) {
+ fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
+
+ if (fetchStatus[tid] == IcacheWaitResponse)
++icacheStallCycles;
- } else {
+ else if (fetchStatus[tid] == ItlbWait)
+ ++fetchTlbCycles;
+ else
++fetchMiscStallCycles;
- }
+ return;
+ } else if (checkInterrupt(thisPC.instAddr()) || isSwitchedOut()) {
+ ++fetchMiscStallCycles;
return;
}
} else {
DPRINTF(Fetch, "[tid:%i]: Fetch is squashing!\n", tid);
} else if (fetchStatus[tid] == IcacheWaitResponse) {
++icacheStallCycles;
- DPRINTF(Fetch, "[tid:%i]: Fetch is waiting cache response!\n", tid);
+ DPRINTF(Fetch, "[tid:%i]: Fetch is waiting cache response!\n",
+ tid);
+ } else if (fetchStatus[tid] == ItlbWait) {
+ DPRINTF(Fetch, "[tid:%i]: Fetch is waiting ITLB walk to "
+ "finish! \n", tid);
+ ++fetchTlbCycles;
}
- // Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so
- // fetch should do nothing.
+ // Status is Idle, Squashing, Blocked, ItlbWait or IcacheWaitResponse
+ // so fetch should do nothing.
return;
}
++fetchCycles;
- // If we had a stall due to an icache miss, then return.
- if (fetchStatus[tid] == IcacheWaitResponse) {
- ++icacheStallCycles;
- status_change = true;
- return;
- }
-
- Addr next_PC = fetch_PC;
- Addr next_NPC = fetch_NPC;
- Addr next_MicroPC = fetch_MicroPC;
-
- InstSeqNum inst_seq;
- MachInst inst;
- ExtMachInst ext_inst;
- // @todo: Fix this hack.
- unsigned offset = (fetch_PC & cacheBlkMask) & ~3;
+ TheISA::PCState nextPC = thisPC;
StaticInstPtr staticInst = NULL;
- StaticInstPtr macroop = NULL;
-
- if (fault == NoFault) {
- // If the read of the first instruction was successful, then grab the
- // instructions from the rest of the cache line and put them into the
- // queue heading to decode.
-
- DPRINTF(Fetch, "[tid:%i]: Adding instructions to queue to "
- "decode.\n",tid);
-
- // Need to keep track of whether or not a predicted branch
- // ended this fetch block.
- bool predicted_branch = false;
-
- while (offset < cacheBlkSize &&
- numInst < fetchWidth &&
- !predicted_branch) {
-
- // If we're branching after this instruction, quite fetching
- // from the same block then.
- predicted_branch =
- (fetch_PC + sizeof(TheISA::MachInst) != fetch_NPC);
- if (predicted_branch) {
- DPRINTF(Fetch, "Branch detected with PC = %#x, NPC = %#x\n",
- fetch_PC, fetch_NPC);
+ StaticInstPtr curMacroop = macroop[tid];
+
+ // If the read of the first instruction was successful, then grab the
+ // instructions from the rest of the cache line and put them into the
+ // queue heading to decode.
+
+ DPRINTF(Fetch, "[tid:%i]: Adding instructions to queue to "
+ "decode.\n", tid);
+
+ // Need to keep track of whether or not a predicted branch
+ // ended this fetch block.
+ bool predictedBranch = false;
+
+ TheISA::MachInst *cacheInsts =
+ reinterpret_cast<TheISA::MachInst *>(cacheData[tid]);
+
+ const unsigned numInsts = cacheBlkSize / instSize;
+ unsigned blkOffset = (fetchAddr - cacheDataPC[tid]) / instSize;
+
+ // Loop through instruction memory from the cache.
+ while (blkOffset < numInsts &&
+ numInst < fetchWidth &&
+ !predictedBranch) {
+
+ // If we need to process more memory, do it now.
+ if (!curMacroop && !predecoder.extMachInstReady()) {
+ if (ISA_HAS_DELAY_SLOT && pcOffset == 0) {
+ // Walk past any annulled delay slot instructions.
+ Addr pcAddr = thisPC.instAddr() & BaseCPU::PCMask;
+ while (fetchAddr != pcAddr && blkOffset < numInsts) {
+ blkOffset++;
+ fetchAddr += instSize;
+ }
+ if (blkOffset >= numInsts)
+ break;
}
+ MachInst inst = TheISA::gtoh(cacheInsts[blkOffset]);
- // Make sure this is a valid index.
- assert(offset <= cacheBlkSize - instSize);
+ predecoder.setTC(cpu->thread[tid]->getTC());
+ predecoder.moreBytes(thisPC, fetchAddr, inst);
- if (!macroop) {
- // Get the instruction from the array of the cache line.
- inst = TheISA::gtoh(*reinterpret_cast<TheISA::MachInst *>
- (&cacheData[tid][offset]));
-
- predecoder.setTC(cpu->thread[tid]->getTC());
- predecoder.moreBytes(fetch_PC, fetch_PC, inst);
+ if (predecoder.needMoreBytes()) {
+ blkOffset++;
+ fetchAddr += instSize;
+ pcOffset += instSize;
+ }
+ }
- ext_inst = predecoder.getExtMachInst();
- staticInst = StaticInstPtr(ext_inst, fetch_PC);
- if (staticInst->isMacroop())
- macroop = staticInst;
+ // Extract as many instructions and/or microops as we can from
+ // the memory we've processed so far.
+ do {
+ if (!curMacroop) {
+ if (predecoder.extMachInstReady()) {
+ ExtMachInst extMachInst;
+
+ extMachInst = predecoder.getExtMachInst(thisPC);
+ staticInst = StaticInstPtr(extMachInst,
+ thisPC.instAddr());
+
+ // Increment stat of fetched instructions.
+ ++fetchedInsts;
+
+ if (staticInst->isMacroop()) {
+ curMacroop = staticInst;
+ } else {
+ pcOffset = 0;
+ }
+ } else {
+ // We need more bytes for this instruction.
+ break;
+ }
}
- do {
- if (macroop) {
- staticInst = macroop->fetchMicroop(fetch_MicroPC);
- if (staticInst->isLastMicroop())
- macroop = NULL;
+ if (curMacroop) {
+ staticInst = curMacroop->fetchMicroop(thisPC.microPC());
+ if (staticInst->isLastMicroop()) {
+ curMacroop = NULL;
+ pcOffset = 0;
}
+ }
- // Get a sequence number.
- inst_seq = cpu->getAndIncrementInstSeq();
-
- // Create a new DynInst from the instruction fetched.
- DynInstPtr instruction = new DynInst(staticInst,
- fetch_PC, fetch_NPC, fetch_MicroPC,
- next_PC, next_NPC, next_MicroPC,
- inst_seq, cpu);
- instruction->setTid(tid);
-
- instruction->setASID(tid);
-
- instruction->setThreadState(cpu->thread[tid]);
-
- DPRINTF(Fetch, "[tid:%i]: Instruction PC %#x created "
- "[sn:%lli]\n",
- tid, instruction->readPC(), inst_seq);
-
- //DPRINTF(Fetch, "[tid:%i]: MachInst is %#x\n", tid, ext_inst);
-
- DPRINTF(Fetch, "[tid:%i]: Instruction is: %s\n",
- tid, instruction->staticInst->disassemble(fetch_PC));
-
-#if TRACING_ON
- instruction->traceData =
- cpu->getTracer()->getInstRecord(curTick, cpu->tcBase(tid),
- instruction->staticInst, instruction->readPC());
-#else
- instruction->traceData = NULL;
-#endif
-
- ///FIXME This needs to be more robust in dealing with delay slots
- predicted_branch |=
- lookupAndUpdateNextPC(instruction, next_PC, next_NPC, next_MicroPC);
-
- // Add instruction to the CPU's list of instructions.
- instruction->setInstListIt(cpu->addInst(instruction));
+ DynInstPtr instruction =
+ buildInst(tid, staticInst, curMacroop,
+ thisPC, nextPC, true);
- // Write the instruction to the first slot in the queue
- // that heads to decode.
- toDecode->insts[numInst] = instruction;
+ numInst++;
- toDecode->size++;
+ nextPC = thisPC;
- // Increment stat of fetched instructions.
- ++fetchedInsts;
+ // If we're branching after this instruction, quite fetching
+ // from the same block then.
+ predictedBranch |= thisPC.branching();
+ predictedBranch |=
+ lookupAndUpdateNextPC(instruction, nextPC);
+ if (predictedBranch) {
+ DPRINTF(Fetch, "Branch detected with PC = %s\n", thisPC);
+ }
- // Move to the next instruction, unless we have a branch.
- fetch_PC = next_PC;
- fetch_NPC = next_NPC;
- fetch_MicroPC = next_MicroPC;
+ // Move to the next instruction, unless we have a branch.
+ thisPC = nextPC;
- if (instruction->isQuiesce()) {
- DPRINTF(Fetch, "Quiesce instruction encountered, halting fetch!",
- curTick);
- fetchStatus[tid] = QuiescePending;
- ++numInst;
- status_change = true;
- break;
- }
-
- ++numInst;
- } while (staticInst->isMicroop() &&
- !staticInst->isLastMicroop() &&
- numInst < fetchWidth);
- offset += instSize;
- }
+ if (instruction->isQuiesce()) {
+ DPRINTF(Fetch,
+ "Quiesce instruction encountered, halting fetch!");
+ fetchStatus[tid] = QuiescePending;
+ status_change = true;
+ break;
+ }
+ } while ((curMacroop || predecoder.extMachInstReady()) &&
+ numInst < fetchWidth);
+ }
- if (predicted_branch) {
- DPRINTF(Fetch, "[tid:%i]: Done fetching, predicted branch "
- "instruction encountered.\n", tid);
- } else if (numInst >= fetchWidth) {
- DPRINTF(Fetch, "[tid:%i]: Done fetching, reached fetch bandwidth "
- "for this cycle.\n", tid);
- } else if (offset >= cacheBlkSize) {
- DPRINTF(Fetch, "[tid:%i]: Done fetching, reached the end of cache "
- "block.\n", tid);
- }
+ if (predictedBranch) {
+ DPRINTF(Fetch, "[tid:%i]: Done fetching, predicted branch "
+ "instruction encountered.\n", tid);
+ } else if (numInst >= fetchWidth) {
+ DPRINTF(Fetch, "[tid:%i]: Done fetching, reached fetch bandwidth "
+ "for this cycle.\n", tid);
+ } else if (blkOffset >= cacheBlkSize) {
+ DPRINTF(Fetch, "[tid:%i]: Done fetching, reached the end of cache "
+ "block.\n", tid);
}
+ macroop[tid] = curMacroop;
+ fetchOffset[tid] = pcOffset;
+
if (numInst > 0) {
wroteToTimeBuffer = true;
}
- // Now that fetching is completed, update the PC to signify what the next
- // cycle will be.
- if (fault == NoFault) {
- PC[tid] = next_PC;
- nextPC[tid] = next_NPC;
- microPC[tid] = next_MicroPC;
- DPRINTF(Fetch, "[tid:%i]: Setting PC to %08p.\n", tid, next_PC);
- } else {
- // We shouldn't be in an icache miss and also have a fault (an ITB
- // miss)
- if (fetchStatus[tid] == IcacheWaitResponse) {
- panic("Fetch should have exited prior to this!");
- }
-
- // Send the fault to commit. This thread will not do anything
- // until commit handles the fault. The only other way it can
- // wake up is if a squash comes along and changes the PC.
- assert(numInst < fetchWidth);
- // Get a sequence number.
- inst_seq = cpu->getAndIncrementInstSeq();
- // We will use a nop in order to carry the fault.
- ext_inst = TheISA::NoopMachInst;
-
- // Create a new DynInst from the dummy nop.
- DynInstPtr instruction = new DynInst(ext_inst,
- fetch_PC, fetch_NPC, fetch_MicroPC,
- next_PC, next_NPC, next_MicroPC,
- inst_seq, cpu);
- instruction->setPredTarg(next_NPC, next_NPC + instSize, 0);
- instruction->setTid(tid);
-
- instruction->setASID(tid);
-
- instruction->setThreadState(cpu->thread[tid]);
-
- instruction->traceData = NULL;
-
- instruction->setInstListIt(cpu->addInst(instruction));
-
- instruction->fault = fault;
-
- toDecode->insts[numInst] = instruction;
- toDecode->size++;
-
- DPRINTF(Fetch, "[tid:%i]: Blocked, need to handle the trap.\n",tid);
-
- fetchStatus[tid] = TrapPending;
- status_change = true;
-
- DPRINTF(Fetch, "[tid:%i]: fault (%s) detected @ PC %08p",
- tid, fault->name(), PC[tid]);
- }
+ pc[tid] = thisPC;
}
template<class Impl>
{
if (retryPkt != NULL) {
assert(cacheBlocked);
- assert(retryTid != -1);
+ assert(retryTid != InvalidThreadID);
assert(fetchStatus[retryTid] == IcacheWaitRetry);
if (icachePort->sendTiming(retryPkt)) {
fetchStatus[retryTid] = IcacheWaitResponse;
retryPkt = NULL;
- retryTid = -1;
+ retryTid = InvalidThreadID;
cacheBlocked = false;
}
} else {
- assert(retryTid == -1);
+ assert(retryTid == InvalidThreadID);
// Access has been squashed since it was sent out. Just clear
// the cache being blocked.
cacheBlocked = false;
// //
///////////////////////////////////////
template<class Impl>
-int
+ThreadID
DefaultFetch<Impl>::getFetchingThread(FetchPriority &fetch_priority)
{
if (numThreads > 1) {
return branchCount();
default:
- return -1;
+ return InvalidThreadID;
}
} else {
- std::list<unsigned>::iterator thread = activeThreads->begin();
- assert(thread != activeThreads->end());
- int tid = *thread;
+ list<ThreadID>::iterator thread = activeThreads->begin();
+ if (thread == activeThreads->end()) {
+ return InvalidThreadID;
+ }
+
+ ThreadID tid = *thread;
if (fetchStatus[tid] == Running ||
fetchStatus[tid] == IcacheAccessComplete ||
fetchStatus[tid] == Idle) {
return tid;
} else {
- return -1;
+ return InvalidThreadID;
}
}
-
}
template<class Impl>
-int
+ThreadID
DefaultFetch<Impl>::roundRobin()
{
- std::list<unsigned>::iterator pri_iter = priorityList.begin();
- std::list<unsigned>::iterator end = priorityList.end();
+ list<ThreadID>::iterator pri_iter = priorityList.begin();
+ list<ThreadID>::iterator end = priorityList.end();
- int high_pri;
+ ThreadID high_pri;
while (pri_iter != end) {
high_pri = *pri_iter;
pri_iter++;
}
- return -1;
+ return InvalidThreadID;
}
template<class Impl>
-int
+ThreadID
DefaultFetch<Impl>::iqCount()
{
- std::priority_queue<unsigned> PQ;
+ std::priority_queue<ThreadID> PQ;
- std::list<unsigned>::iterator threads = activeThreads->begin();
- std::list<unsigned>::iterator end = activeThreads->end();
+ list<ThreadID>::iterator threads = activeThreads->begin();
+ list<ThreadID>::iterator end = activeThreads->end();
while (threads != end) {
- unsigned tid = *threads++;
+ ThreadID tid = *threads++;
PQ.push(fromIEW->iewInfo[tid].iqCount);
}
while (!PQ.empty()) {
-
- unsigned high_pri = PQ.top();
+ ThreadID high_pri = PQ.top();
if (fetchStatus[high_pri] == Running ||
fetchStatus[high_pri] == IcacheAccessComplete ||
}
- return -1;
+ return InvalidThreadID;
}
template<class Impl>
-int
+ThreadID
DefaultFetch<Impl>::lsqCount()
{
- std::priority_queue<unsigned> PQ;
+ std::priority_queue<ThreadID> PQ;
- std::list<unsigned>::iterator threads = activeThreads->begin();
- std::list<unsigned>::iterator end = activeThreads->end();
+ list<ThreadID>::iterator threads = activeThreads->begin();
+ list<ThreadID>::iterator end = activeThreads->end();
while (threads != end) {
- unsigned tid = *threads++;
+ ThreadID tid = *threads++;
PQ.push(fromIEW->iewInfo[tid].ldstqCount);
}
while (!PQ.empty()) {
-
- unsigned high_pri = PQ.top();
+ ThreadID high_pri = PQ.top();
if (fetchStatus[high_pri] == Running ||
fetchStatus[high_pri] == IcacheAccessComplete ||
return high_pri;
else
PQ.pop();
-
}
- return -1;
+ return InvalidThreadID;
}
template<class Impl>
-int
+ThreadID
DefaultFetch<Impl>::branchCount()
{
- std::list<unsigned>::iterator thread = activeThreads->begin();
+#if 0
+ list<ThreadID>::iterator thread = activeThreads->begin();
assert(thread != activeThreads->end());
- unsigned tid = *thread;
+ ThreadID tid = *thread;
+#endif
panic("Branch Count Fetch policy unimplemented\n");
- return 0 * tid;
+ return InvalidThreadID;
}