formerly, to free up bandwidth in a resource, we could just change the pointer in that resource
but at the same time the pipeline stages had visibility to see what happened to a resource request.
Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw
away the request too early or the pipeline stage gets bad information. Instead, mark when a request
is done with the resource all together and then let the pipeline stage call back to the resource
that it's time to free up the bandwidth for more instructions
*** inteface notes ***
- When an instruction completes and is done in a resource for that cycle, call done()
- When an instruction fails and is done with a resource for that cycle, call done(false)
- When an instruction completes, but isnt finished with a resource, call completed()
- When an instruction fails, but isnt finished with a resource, call completed(false)
* * *
inorder: tlbmiss wakeup bug fix
ResReqPtr req = cpu->resPool->request(res_num, inst);
assert(req->valid);
- if (req->isCompleted()) {
+ bool req_completed = req->isCompleted();
+ bool done_in_pipeline = false;
+ if (req_completed) {
DPRINTF(InOrderStage, "[tid:%i]: [sn:%i] request to %s "
"completed.\n", tid, inst->seqNum,
cpu->resPool->name(res_num));
req->stagePasses++;
- bool done_in_pipeline = inst->finishSkedEntry();
+ done_in_pipeline = inst->finishSkedEntry();
if (done_in_pipeline) {
DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] finished "
"in pipeline.\n", tid, inst->seqNum);
- break;
}
} else {
DPRINTF(InOrderStage, "[tid:%i]: [sn:%i] request to %s failed."
"thread due to cache miss.\n");
cpu->activateNextReadyContext();
}
-
+ }
+
+ // If this request is no longer needs to take up bandwidth in the
+ // resource, go ahead and free that bandwidth up
+ if (req->doneInResource) {
+ req->freeSlot();
+ }
+
+ // No longer need to process this instruction if the last
+ // request it had wasn't completed or if there is nothing
+ // else for it to do in the pipeline
+ if (done_in_pipeline || !req_completed) {
break;
}
// Use to deny a instruction a resource.
deniedReq = new ResourceRequest(this);
+ deniedReq->valid = true;
}
Resource::~Resource()
void
Resource::freeSlot(int slot_idx)
{
+ DPRINTF(Resource, "Deallocating [slot:%i].\n",
+ slot_idx);
+
// Put slot number on this resource's free list
availSlots.push_back(slot_idx);
slot_num = getSlot(inst);
if (slot_num != -1) {
+ DPRINTF(Resource, "Allocating [slot:%i] for [tid:%i]: [sn:%i]\n",
+ slot_num, inst->readTid(), inst->seqNum);
+
// Get Stage # from Schedule Entry
stage_num = inst->curSkedEntry->stageNum;
unsigned cmd = inst->curSkedEntry->cmd;
inst->readTid());
}
- reqs[slot_num] = inst_req;
-
try_request = true;
+ } else {
+ DPRINTF(Resource, "No slot available for [tid:%i]: [sn:%i]\n",
+ inst->readTid(), inst->seqNum);
}
+
}
if (try_request) {
int ResourceRequest::maxReqCount = 0;
ResourceRequest::ResourceRequest(Resource *_res)
- : res(_res), inst(NULL), stagePasses(0), valid(false), complSlotNum(-1),
- completed(false), squashed(false), processing(false), memStall(false)
+ : res(_res), inst(NULL), stagePasses(0), valid(false), doneInResource(false),
+ complSlotNum(-1), completed(false), squashed(false), processing(false),
+ memStall(false)
{
}
valid = false;
inst = NULL;
stagePasses = 0;
+ completed = false;
+ doneInResource = false;
+ squashed = false;
+ memStall = false;
+}
+
+void
+ResourceRequest::freeSlot()
+{
+ assert(res);
+
+ // Free Slot So Another Instruction Can Use This Resource
+ res->freeSlot(slotNum);
}
void
if (completed) {
complSlotNum = slotNum;
}
-
- // Free Slot So Another Instruction Can Use This Resource
- res->freeSlot(slotNum);
- // change slot # to -1, since we check slotNum to see if request is
- // still valid
- slotNum = -1;
+ doneInResource = true;
}
ResourceEvent::ResourceEvent()
*/
void done(bool completed = true);
+ void freeSlot();
+
/////////////////////////////////////////////
//
// GET RESOURCE REQUEST IDENTIFICATION / INFO
bool valid;
+ bool doneInResource;
+
////////////////////////////////////////
//
// GET RESOURCE REQUEST STATUS FROM VARIABLES
CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
assert(cache_req);
- if (cachePortBlocked) {
+ if (cachePortBlocked &&
+ (cache_req->cmd == InitiateReadData ||
+ cache_req->cmd == InitiateWriteData ||
+ cache_req->cmd == InitSecondSplitRead ||
+ cache_req->cmd == InitSecondSplitWrite)) {
DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
- cache_req->setCompleted(false);
+ cache_req->done(false);
return;
}
-
DynInstPtr inst = cache_req->inst;
#if TRACING_ON
ThreadID tid = inst->readTid();
acc_type = "read";
#endif
case InitiateWriteData:
-
+ if (cachePortBlocked) {
+ DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
+ cache_req->done(false);
+ return;
+ }
+
DPRINTF(InOrderCachePort,
"[tid:%u]: [sn:%i] Initiating data %s access to %s for "
"addr. %08p\n", tid, inst->seqNum, acc_type, name(),
"[tid:%i] [sn:%i] cannot access cache, because port "
"is blocked. now waiting to retry request\n", tid,
inst->seqNum);
- cache_req->setCompleted(false);
+ cache_req->done(false);
cachePortBlocked = true;
} else {
DPRINTF(InOrderCachePort,
// Make cache request again since access due to
// inability to access
DPRINTF(InOrderStall, "STALL: \n");
- cache_req->setCompleted(false);
+ cache_req->done(false);
}
}
cache_pkt->cacheReq->getTid(),
cache_pkt->cacheReq->seqNum);
- cache_pkt->cacheReq->done();
+ cache_pkt->cacheReq->freeSlot();
delete cache_pkt;
cpu->wakeCPU();
req_ptr->tlbStall = false;
if (req_ptr->isSquashed()) {
- req_ptr->done();
+ req_ptr->freeSlot();
}
+
+ tlb_res->cpu->wakeCPU();
}
void
fs_req->done();
} else {
DPRINTF(InOrderStall, "STALL: [tid:%i]: NPC not valid\n", tid);
- fs_req->setCompleted(false);
+ fs_req->done(false);
}
}
break;
CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
assert(cache_req);
- if (cachePortBlocked) {
+ if (cachePortBlocked && cache_req->cmd == InitiateFetch) {
DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
- cache_req->setCompleted(false);
+ cache_req->done(false);
return;
}
// If not, block this request.
if (pendingFetch.size() >= fetchBuffSize) {
DPRINTF(InOrderCachePort, "No room available in fetch buffer.\n");
- cache_req->setCompleted(false);
+ cache_req->done();
return;
}
cache_pkt->cacheReq->seqNum);
cache_pkt->cacheReq->done();
+ cache_pkt->cacheReq->freeSlot();
delete cache_pkt;
cpu->wakeCPU();