From: Steve Reinhardt Date: Mon, 25 Jun 2007 00:32:31 +0000 (-0700) Subject: Better handling of deferred targets. X-Git-Tag: m5_2.0_beta4~195^2~50^2~35 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=47bce8ef7875420b2e26ebd834ed0d4146b65d5b;p=gem5.git Better handling of deferred targets. --HG-- extra : convert_revision : 0fbc28c32c1eeb3dd672df14c1d53bd516f81d0f --- diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc index 1f5182574..ac577f5a2 100644 --- a/src/mem/cache/base_cache.cc +++ b/src/mem/cache/base_cache.cc @@ -42,8 +42,7 @@ using namespace std; BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache) : SimpleTimingPort(_name, _cache), cache(_cache), otherPort(NULL), - blocked(false), waitingOnRetry(false), mustSendRetry(false), - requestCauses(0) + blocked(false), mustSendRetry(false), requestCauses(0) { } diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh index 27134b2ad..b35fc0811 100644 --- a/src/mem/cache/base_cache.hh +++ b/src/mem/cache/base_cache.hh @@ -118,8 +118,6 @@ class BaseCache : public MemObject bool blocked; - bool waitingOnRetry; - bool mustSendRetry; /** diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh index a93b761ec..2a95dc53c 100644 --- a/src/mem/cache/cache.hh +++ b/src/mem/cache/cache.hh @@ -182,8 +182,7 @@ class Cache : public BaseCache BlkType *handleFill(PacketPtr pkt, BlkType *blk, PacketList &writebacks); - bool satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk); - bool satisfyTarget(MSHR::Target *target, BlkType *blk); + void satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk); bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, BlkType *blk); void doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data); diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 0649b5061..b4d334249 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -368,7 +368,7 @@ Cache::timingAccess(PacketPtr pkt) if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) { mshr->threadNum = -1; } - mshr->allocateTarget(pkt, true); + mshr->allocateTarget(pkt); if (mshr->getNumTargets() == numTarget) { noTargetMSHR = mshr; setBlocked(Blocked_NoTargets); @@ -483,8 +483,7 @@ Cache::atomicAccess(PacketPtr pkt) if (isCacheFill) { PacketList writebacks; blk = handleFill(busPkt, blk, writebacks); - bool status = satisfyCpuSideRequest(pkt, blk); - assert(status); + satisfyCpuSideRequest(pkt, blk); delete busPkt; // Handle writebacks if needed @@ -538,12 +537,14 @@ Cache::functionalAccess(PacketPtr pkt, // There can be many matching outstanding writes. std::vector writes; - writeBuffer.findMatches(blk_addr, writes); + assert(!writeBuffer.findMatches(blk_addr, writes)); +/* Need to change this to iterate through targets in mshr?? for (int i = 0; i < writes.size(); ++i) { MSHR *mshr = writes[i]; if (pkt->checkFunctional(mshr->addr, mshr->size, mshr->writeData)) return; } +*/ otherSidePort->checkAndSendFunctional(pkt); } @@ -557,43 +558,30 @@ Cache::functionalAccess(PacketPtr pkt, template -bool +void Cache::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk) { - if (blk && (pkt->needsExclusive() ? blk->isWritable() : blk->isValid())) { - assert(pkt->isWrite() || pkt->isReadWrite() || pkt->isRead()); - assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); - - if (pkt->isWrite()) { - if (blk->checkWrite(pkt)) { - blk->status |= BlkDirty; - pkt->writeDataToBlock(blk->data, blkSize); - } - } else if (pkt->isReadWrite()) { - cmpAndSwap(blk, pkt); - } else { - if (pkt->isLocked()) { - blk->trackLoadLocked(pkt); - } - pkt->setDataFromBlock(blk->data, blkSize); + assert(blk); + assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid()); + assert(pkt->isWrite() || pkt->isReadWrite() || pkt->isRead()); + assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); + + if (pkt->isWrite()) { + if (blk->checkWrite(pkt)) { + blk->status |= BlkDirty; + pkt->writeDataToBlock(blk->data, blkSize); } - - return true; + } else if (pkt->isReadWrite()) { + cmpAndSwap(blk, pkt); } else { - return false; + if (pkt->isLocked()) { + blk->trackLoadLocked(pkt); + } + pkt->setDataFromBlock(blk->data, blkSize); } } -template -bool -Cache::satisfyTarget(MSHR::Target *target, BlkType *blk) -{ - assert(target != NULL); - assert(target->isCpuSide()); - return satisfyCpuSideRequest(target->pkt, blk); -} - template bool Cache::satisfyMSHR(MSHR *mshr, PacketPtr pkt, @@ -611,37 +599,42 @@ Cache::satisfyMSHR(MSHR *mshr, PacketPtr pkt, while (mshr->hasTargets()) { MSHR::Target *target = mshr->getTarget(); - if (!satisfyTarget(target, blk)) { - // Invalid access, need to do another request - // can occur if block is invalidated, or not correct - // permissions - MSHRQueue *mq = mshr->queue; - mq->markPending(mshr); - mshr->order = order++; - requestMemSideBus((RequestCause)mq->index, pkt->finishTime); - return false; - } + if (target->isCpuSide()) { + satisfyCpuSideRequest(target->pkt, blk); + // How many bytes pass the first request is this one + int transfer_offset = + target->pkt->getOffset(blkSize) - initial_offset; + if (transfer_offset < 0) { + transfer_offset += blkSize; + } + // If critical word (no offset) return first word time + Tick completion_time = tags->getHitLatency() + + transfer_offset ? pkt->finishTime : pkt->firstWordTime; - // How many bytes pass the first request is this one - int transfer_offset = target->pkt->getOffset(blkSize) - initial_offset; - if (transfer_offset < 0) { - transfer_offset += blkSize; + if (!target->pkt->req->isUncacheable()) { + missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += + completion_time - target->time; + } + target->pkt->makeTimingResponse(); + cpuSidePort->respond(target->pkt, completion_time); + } else { + // response to snoop request + DPRINTF(Cache, "processing deferred snoop...\n"); + handleSnoop(target->pkt, blk, true); } - // If critical word (no offset) return first word time - Tick completion_time = tags->getHitLatency() + - transfer_offset ? pkt->finishTime : pkt->firstWordTime; - - if (!target->pkt->req->isUncacheable()) { - missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += - completion_time - target->time; - } - target->pkt->makeTimingResponse(); - cpuSidePort->respond(target->pkt, completion_time); mshr->popTarget(); } + if (mshr->promoteDeferredTargets()) { + MSHRQueue *mq = mshr->queue; + mq->markPending(mshr); + mshr->order = order++; + requestMemSideBus((RequestCause)mq->index, pkt->finishTime); + return false; + } + return true; } @@ -653,6 +646,7 @@ Cache::handleResponse(PacketPtr pkt) Tick time = curTick + hitLatency; MSHR *mshr = dynamic_cast(pkt->senderState); assert(mshr); + if (pkt->result == Packet::Nacked) { //pkt->reinitFromRequest(); warn("NACKs from devices not connected to the same bus " @@ -661,7 +655,7 @@ Cache::handleResponse(PacketPtr pkt) } assert(pkt->result != Packet::BadAddress); assert(pkt->result == Packet::Success); - DPRINTF(Cache, "Handling reponse to %x\n", pkt->getAddr()); + DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr()); MSHRQueue *mq = mshr->queue; bool wasFull = mq->isFull(); @@ -883,7 +877,12 @@ Cache::snoopTiming(PacketPtr pkt) MSHR *mshr = mshrQueue.findMatch(blk_addr); // better not be snooping a request that conflicts with something // we have outstanding... - assert(!mshr || !mshr->inService); + if (mshr && mshr->inService) { + assert(mshr->getNumTargets() < numTarget); //handle later + mshr->allocateSnoopTarget(pkt); + assert(mshr->getNumTargets() < numTarget); //handle later + return; + } //We also need to check the writeback buffers and handle those std::vector writebacks; @@ -895,6 +894,9 @@ Cache::snoopTiming(PacketPtr pkt) for (int i=0; iisUncacheable()); + assert(mshr->getNumTargets() == 1); + PacketPtr wb_pkt = mshr->getTarget()->pkt; + assert(wb_pkt->cmd == MemCmd::Writeback); if (pkt->isRead()) { pkt->assertMemInhibit(); @@ -906,7 +908,7 @@ Cache::snoopTiming(PacketPtr pkt) // the packet's invalidate flag is set... assert(pkt->isInvalidate()); } - doTimingSupplyResponse(pkt, mshr->writeData); + doTimingSupplyResponse(pkt, wb_pkt->getPtr()); } if (pkt->isInvalidate()) { @@ -1208,7 +1210,7 @@ Cache::MemSidePort::sendPacket() waitingOnRetry = !success; if (waitingOnRetry) { - DPRINTF(CachePort, "%s now waiting on a retry\n", name()); + DPRINTF(CachePort, "now waiting on a retry\n"); } else { myCache()->markInService(mshr); } @@ -1220,8 +1222,7 @@ Cache::MemSidePort::sendPacket() if (!waitingOnRetry) { if (isBusRequested()) { // more requests/writebacks: rerequest ASAP - DPRINTF(CachePort, "%s still more MSHR requests to send\n", - name()); + DPRINTF(CachePort, "still more MSHR requests to send\n"); sendEvent->schedule(curTick+1); } else if (!transmitList.empty()) { // deferred packets: rerequest bus, but possibly not until later diff --git a/src/mem/cache/miss/mshr.cc b/src/mem/cache/miss/mshr.cc index 1f2c05a6e..24ff3b33c 100644 --- a/src/mem/cache/miss/mshr.cc +++ b/src/mem/cache/miss/mshr.cc @@ -68,12 +68,16 @@ MSHR::allocate(Addr _addr, int _size, PacketPtr target) // Don't know of a case where we would allocate a new MSHR for a // snoop (mem0-side request), so set cpuSide to true here. targets.push_back(Target(target, true)); + assert(deferredTargets.empty()); + deferredNeedsExclusive = false; + pendingInvalidate = false; } void MSHR::deallocate() { assert(targets.empty()); + assert(deferredTargets.empty()); assert(ntargets == 0); inService = false; //allocIter = NULL; @@ -84,41 +88,77 @@ MSHR::deallocate() * Adds a target to an MSHR */ void -MSHR::allocateTarget(PacketPtr target, bool cpuSide) +MSHR::allocateTarget(PacketPtr target) { - //If we append an invalidate and we issued a read to the bus, - //but now have some pending writes, we need to move - //the invalidate to before the first non-read - if (inService && !inServiceForExclusive && needsExclusive - && !cpuSide && target->isInvalidate()) { - std::list temp; - - while (!targets.empty()) { - if (targets.front().pkt->needsExclusive()) break; - //Place on top of temp stack - temp.push_front(targets.front()); - //Remove from targets - targets.pop_front(); + if (inService) { + if (!deferredTargets.empty() || pendingInvalidate || + (!needsExclusive && target->needsExclusive())) { + // need to put on deferred list + deferredTargets.push_back(Target(target, true)); + if (target->needsExclusive()) { + deferredNeedsExclusive = true; + } + } else { + // still OK to append to outstanding request + targets.push_back(Target(target, true)); + } + } else { + if (target->needsExclusive()) { + needsExclusive = true; } - //Now that we have all the reads off until first non-read, we can - //place the invalidate on - targets.push_front(Target(target, cpuSide)); + targets.push_back(Target(target, true)); + } - //Now we pop off the temp_stack and put them back - while (!temp.empty()) { - targets.push_front(temp.front()); - temp.pop_front(); - } + ++ntargets; +} + +void +MSHR::allocateSnoopTarget(PacketPtr target) +{ + assert(inService); // don't bother to call otherwise + + if (pendingInvalidate) { + // a prior snoop has already appended an invalidation, so + // logically we don't have the block anymore... + return; } - else { - targets.push_back(Target(target, cpuSide)); + + if (needsExclusive) { + // We're awaiting an exclusive copy, so ownership is pending. + // It's up to us to respond once the data arrives. + target->assertMemInhibit(); + } else if (target->needsExclusive()) { + // This transaction will take away our pending copy + pendingInvalidate = true; + } else { + // If we're not going to supply data or perform an + // invalidation, we don't need to save this. + return; } + targets.push_back(Target(target, false)); ++ntargets; +} + + +bool +MSHR::promoteDeferredTargets() +{ + if (deferredTargets.empty()) { + return false; + } + + assert(targets.empty()); + targets = deferredTargets; + deferredTargets.clear(); assert(targets.size() == ntargets); - needsExclusive = needsExclusive || target->needsExclusive(); + needsExclusive = deferredNeedsExclusive; + pendingInvalidate = false; + deferredNeedsExclusive = false; + + return true; } diff --git a/src/mem/cache/miss/mshr.hh b/src/mem/cache/miss/mshr.hh index 195438e46..f4e090a12 100644 --- a/src/mem/cache/miss/mshr.hh +++ b/src/mem/cache/miss/mshr.hh @@ -85,9 +85,6 @@ class MSHR : public Packet::SenderState /** Size of the request. */ int size; - /** Data associated with the request (if a write). */ - uint8_t *writeData; - /** True if the request has been sent to the bus. */ bool inService; @@ -95,12 +92,13 @@ class MSHR : public Packet::SenderState bool isCacheFill; /** True if we need to get an exclusive copy of the block. */ bool needsExclusive; + /** True if the request is uncacheable */ bool _isUncacheable; - /** True if the request that has been sent to the bus is for en - * exclusive copy. */ - bool inServiceForExclusive; + bool deferredNeedsExclusive; + bool pendingInvalidate; + /** Thread number of the miss. */ short threadNum; /** The number of currently allocated targets. */ @@ -124,6 +122,8 @@ private: /** List of all requests that match the address */ TargetList targets; + TargetList deferredTargets; + public: bool isUncacheable() { return _isUncacheable; } @@ -153,7 +153,8 @@ public: * Add a request to the list of targets. * @param target The target. */ - void allocateTarget(PacketPtr target, bool cpuSide); + void allocateTarget(PacketPtr target); + void allocateSnoopTarget(PacketPtr target); /** A simple constructor. */ MSHR(); @@ -201,6 +202,8 @@ public: return tgt->isCpuSide() && !tgt->pkt->needsResponse(); } + bool promoteDeferredTargets(); + /** * Prints the contents of this MSHR to stderr. */