if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) {
mshr->threadNum = -1;
}
- mshr->allocateTarget(pkt, true);
+ mshr->allocateTarget(pkt);
if (mshr->getNumTargets() == numTarget) {
noTargetMSHR = mshr;
setBlocked(Blocked_NoTargets);
if (isCacheFill) {
PacketList writebacks;
blk = handleFill(busPkt, blk, writebacks);
- bool status = satisfyCpuSideRequest(pkt, blk);
- assert(status);
+ satisfyCpuSideRequest(pkt, blk);
delete busPkt;
// Handle writebacks if needed
// There can be many matching outstanding writes.
std::vector<MSHR*> writes;
- writeBuffer.findMatches(blk_addr, writes);
+ assert(!writeBuffer.findMatches(blk_addr, writes));
+/* Need to change this to iterate through targets in mshr??
for (int i = 0; i < writes.size(); ++i) {
MSHR *mshr = writes[i];
if (pkt->checkFunctional(mshr->addr, mshr->size, mshr->writeData))
return;
}
+*/
otherSidePort->checkAndSendFunctional(pkt);
}
template<class TagStore, class Coherence>
-bool
+void
Cache<TagStore,Coherence>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
{
- if (blk && (pkt->needsExclusive() ? blk->isWritable() : blk->isValid())) {
- assert(pkt->isWrite() || pkt->isReadWrite() || pkt->isRead());
- assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
-
- if (pkt->isWrite()) {
- if (blk->checkWrite(pkt)) {
- blk->status |= BlkDirty;
- pkt->writeDataToBlock(blk->data, blkSize);
- }
- } else if (pkt->isReadWrite()) {
- cmpAndSwap(blk, pkt);
- } else {
- if (pkt->isLocked()) {
- blk->trackLoadLocked(pkt);
- }
- pkt->setDataFromBlock(blk->data, blkSize);
+ assert(blk);
+ assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
+ assert(pkt->isWrite() || pkt->isReadWrite() || pkt->isRead());
+ assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
+
+ if (pkt->isWrite()) {
+ if (blk->checkWrite(pkt)) {
+ blk->status |= BlkDirty;
+ pkt->writeDataToBlock(blk->data, blkSize);
}
-
- return true;
+ } else if (pkt->isReadWrite()) {
+ cmpAndSwap(blk, pkt);
} else {
- return false;
+ if (pkt->isLocked()) {
+ blk->trackLoadLocked(pkt);
+ }
+ pkt->setDataFromBlock(blk->data, blkSize);
}
}
-template<class TagStore, class Coherence>
-bool
-Cache<TagStore,Coherence>::satisfyTarget(MSHR::Target *target, BlkType *blk)
-{
- assert(target != NULL);
- assert(target->isCpuSide());
- return satisfyCpuSideRequest(target->pkt, blk);
-}
-
template<class TagStore, class Coherence>
bool
Cache<TagStore,Coherence>::satisfyMSHR(MSHR *mshr, PacketPtr pkt,
while (mshr->hasTargets()) {
MSHR::Target *target = mshr->getTarget();
- if (!satisfyTarget(target, blk)) {
- // Invalid access, need to do another request
- // can occur if block is invalidated, or not correct
- // permissions
- MSHRQueue *mq = mshr->queue;
- mq->markPending(mshr);
- mshr->order = order++;
- requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
- return false;
- }
+ if (target->isCpuSide()) {
+ satisfyCpuSideRequest(target->pkt, blk);
+ // How many bytes pass the first request is this one
+ int transfer_offset =
+ target->pkt->getOffset(blkSize) - initial_offset;
+ if (transfer_offset < 0) {
+ transfer_offset += blkSize;
+ }
+ // If critical word (no offset) return first word time
+ Tick completion_time = tags->getHitLatency() +
+ transfer_offset ? pkt->finishTime : pkt->firstWordTime;
- // How many bytes pass the first request is this one
- int transfer_offset = target->pkt->getOffset(blkSize) - initial_offset;
- if (transfer_offset < 0) {
- transfer_offset += blkSize;
+ if (!target->pkt->req->isUncacheable()) {
+ missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
+ completion_time - target->time;
+ }
+ target->pkt->makeTimingResponse();
+ cpuSidePort->respond(target->pkt, completion_time);
+ } else {
+ // response to snoop request
+ DPRINTF(Cache, "processing deferred snoop...\n");
+ handleSnoop(target->pkt, blk, true);
}
- // If critical word (no offset) return first word time
- Tick completion_time = tags->getHitLatency() +
- transfer_offset ? pkt->finishTime : pkt->firstWordTime;
-
- if (!target->pkt->req->isUncacheable()) {
- missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
- completion_time - target->time;
- }
- target->pkt->makeTimingResponse();
- cpuSidePort->respond(target->pkt, completion_time);
mshr->popTarget();
}
+ if (mshr->promoteDeferredTargets()) {
+ MSHRQueue *mq = mshr->queue;
+ mq->markPending(mshr);
+ mshr->order = order++;
+ requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
+ return false;
+ }
+
return true;
}
Tick time = curTick + hitLatency;
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
assert(mshr);
+
if (pkt->result == Packet::Nacked) {
//pkt->reinitFromRequest();
warn("NACKs from devices not connected to the same bus "
}
assert(pkt->result != Packet::BadAddress);
assert(pkt->result == Packet::Success);
- DPRINTF(Cache, "Handling reponse to %x\n", pkt->getAddr());
+ DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
MSHRQueue *mq = mshr->queue;
bool wasFull = mq->isFull();
MSHR *mshr = mshrQueue.findMatch(blk_addr);
// better not be snooping a request that conflicts with something
// we have outstanding...
- assert(!mshr || !mshr->inService);
+ if (mshr && mshr->inService) {
+ assert(mshr->getNumTargets() < numTarget); //handle later
+ mshr->allocateSnoopTarget(pkt);
+ assert(mshr->getNumTargets() < numTarget); //handle later
+ return;
+ }
//We also need to check the writeback buffers and handle those
std::vector<MSHR *> writebacks;
for (int i=0; i<writebacks.size(); i++) {
mshr = writebacks[i];
assert(!mshr->isUncacheable());
+ assert(mshr->getNumTargets() == 1);
+ PacketPtr wb_pkt = mshr->getTarget()->pkt;
+ assert(wb_pkt->cmd == MemCmd::Writeback);
if (pkt->isRead()) {
pkt->assertMemInhibit();
// the packet's invalidate flag is set...
assert(pkt->isInvalidate());
}
- doTimingSupplyResponse(pkt, mshr->writeData);
+ doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>());
}
if (pkt->isInvalidate()) {
waitingOnRetry = !success;
if (waitingOnRetry) {
- DPRINTF(CachePort, "%s now waiting on a retry\n", name());
+ DPRINTF(CachePort, "now waiting on a retry\n");
} else {
myCache()->markInService(mshr);
}
if (!waitingOnRetry) {
if (isBusRequested()) {
// more requests/writebacks: rerequest ASAP
- DPRINTF(CachePort, "%s still more MSHR requests to send\n",
- name());
+ DPRINTF(CachePort, "still more MSHR requests to send\n");
sendEvent->schedule(curTick+1);
} else if (!transmitList.empty()) {
// deferred packets: rerequest bus, but possibly not until later
// Don't know of a case where we would allocate a new MSHR for a
// snoop (mem0-side request), so set cpuSide to true here.
targets.push_back(Target(target, true));
+ assert(deferredTargets.empty());
+ deferredNeedsExclusive = false;
+ pendingInvalidate = false;
}
void
MSHR::deallocate()
{
assert(targets.empty());
+ assert(deferredTargets.empty());
assert(ntargets == 0);
inService = false;
//allocIter = NULL;
* Adds a target to an MSHR
*/
void
-MSHR::allocateTarget(PacketPtr target, bool cpuSide)
+MSHR::allocateTarget(PacketPtr target)
{
- //If we append an invalidate and we issued a read to the bus,
- //but now have some pending writes, we need to move
- //the invalidate to before the first non-read
- if (inService && !inServiceForExclusive && needsExclusive
- && !cpuSide && target->isInvalidate()) {
- std::list<Target> temp;
-
- while (!targets.empty()) {
- if (targets.front().pkt->needsExclusive()) break;
- //Place on top of temp stack
- temp.push_front(targets.front());
- //Remove from targets
- targets.pop_front();
+ if (inService) {
+ if (!deferredTargets.empty() || pendingInvalidate ||
+ (!needsExclusive && target->needsExclusive())) {
+ // need to put on deferred list
+ deferredTargets.push_back(Target(target, true));
+ if (target->needsExclusive()) {
+ deferredNeedsExclusive = true;
+ }
+ } else {
+ // still OK to append to outstanding request
+ targets.push_back(Target(target, true));
+ }
+ } else {
+ if (target->needsExclusive()) {
+ needsExclusive = true;
}
- //Now that we have all the reads off until first non-read, we can
- //place the invalidate on
- targets.push_front(Target(target, cpuSide));
+ targets.push_back(Target(target, true));
+ }
- //Now we pop off the temp_stack and put them back
- while (!temp.empty()) {
- targets.push_front(temp.front());
- temp.pop_front();
- }
+ ++ntargets;
+}
+
+void
+MSHR::allocateSnoopTarget(PacketPtr target)
+{
+ assert(inService); // don't bother to call otherwise
+
+ if (pendingInvalidate) {
+ // a prior snoop has already appended an invalidation, so
+ // logically we don't have the block anymore...
+ return;
}
- else {
- targets.push_back(Target(target, cpuSide));
+
+ if (needsExclusive) {
+ // We're awaiting an exclusive copy, so ownership is pending.
+ // It's up to us to respond once the data arrives.
+ target->assertMemInhibit();
+ } else if (target->needsExclusive()) {
+ // This transaction will take away our pending copy
+ pendingInvalidate = true;
+ } else {
+ // If we're not going to supply data or perform an
+ // invalidation, we don't need to save this.
+ return;
}
+ targets.push_back(Target(target, false));
++ntargets;
+}
+
+
+bool
+MSHR::promoteDeferredTargets()
+{
+ if (deferredTargets.empty()) {
+ return false;
+ }
+
+ assert(targets.empty());
+ targets = deferredTargets;
+ deferredTargets.clear();
assert(targets.size() == ntargets);
- needsExclusive = needsExclusive || target->needsExclusive();
+ needsExclusive = deferredNeedsExclusive;
+ pendingInvalidate = false;
+ deferredNeedsExclusive = false;
+
+ return true;
}