* Steve Reinhardt
* Ron Dreslinski
* Andreas Sandberg
+ * Nikos Nikoleris
*/
/**
DPRINTF(Cache, "%s %s\n", pkt->print(),
blk ? "hit " + blk->print() : "miss");
+ if (pkt->req->isCacheMaintenance()) {
+ // A cache maintenance operation is always forwarded to the
+ // memory below even if the block is found in dirty state.
+
+ // We defer any changes to the state of the block until we
+ // create and mark as in service the mshr for the downstream
+ // packet.
+ return false;
+ }
if (pkt->isEviction()) {
// We check for presence of block in above caches before issuing
promoteWholeLineWrites(pkt);
+ // Cache maintenance operations have to visit all the caches down
+ // to the specified xbar (PoC, PoU, etc.). Even if a cache above
+ // is responding we forward the packet to the memory below rather
+ // than creating an express snoop.
if (pkt->cacheResponding()) {
// a cache above us (but not where the packet came from) is
// responding to the request, in other words it has the line
blk->status &= ~BlkHWPrefetched;
// Don't notify on SWPrefetch
- if (!pkt->cmd.isSWPrefetch())
+ if (!pkt->cmd.isSWPrefetch()) {
+ assert(!pkt->req->isCacheMaintenance());
next_pf_time = prefetcher->notify(pkt);
+ }
}
if (needsResponse) {
// outstanding requests in MSHRs are simply sunk here
if (pkt->cmd == MemCmd::CleanEvict) {
pendingDelete.reset(pkt);
+ } else if (pkt->cmd == MemCmd::WriteClean) {
+ // A WriteClean should never coalesce with any
+ // outstanding cache maintenance requests.
+
+ // We use forward_time here because there is an
+ // uncached memory write, forwarded to WriteBuffer.
+ allocateWriteBuffer(pkt, forward_time);
} else {
DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
pkt->print());
// know about the request
if (prefetcher) {
// Don't notify on SWPrefetch
- if (!pkt->cmd.isSWPrefetch())
+ if (!pkt->cmd.isSWPrefetch() &&
+ !pkt->req->isCacheMaintenance())
next_pf_time = prefetcher->notify(pkt);
}
}
// internally, and have a sufficiently weak memory
// model, this is probably unnecessary, but at some
// point it must have seemed like we needed it...
- assert(pkt->needsWritable());
- assert(!blk->isWritable());
+ assert((pkt->needsWritable() && !blk->isWritable()) ||
+ pkt->req->isCacheMaintenance());
blk->status &= ~BlkReadable;
}
// Here we are using forward_time, modelling the latency of
if (prefetcher) {
// Don't notify on SWPrefetch
- if (!pkt->cmd.isSWPrefetch())
+ if (!pkt->cmd.isSWPrefetch() &&
+ !pkt->req->isCacheMaintenance())
next_pf_time = prefetcher->notify(pkt);
}
}
if (cpu_pkt->req->isUncacheable() ||
(!blkValid && cpu_pkt->isUpgrade()) ||
- cpu_pkt->cmd == MemCmd::InvalidateReq) {
+ cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
// uncacheable requests and upgrades from upper-level caches
// that missed completely just go through as is
return nullptr;
// follow the same flow as in recvTimingReq, and check if a cache
// above us is responding
- if (pkt->cacheResponding()) {
+ if (pkt->cacheResponding() && !pkt->isClean()) {
+ assert(!pkt->req->isCacheInvalidate());
DPRINTF(Cache, "Cache above responding to %s: not responding\n",
pkt->print());
PacketList writebacks;
bool satisfied = access(pkt, blk, lat, writebacks);
+ if (pkt->isClean() && blk && blk->isDirty()) {
+ // A cache clean opearation is looking for a dirty
+ // block. If a dirty block is encountered a WriteClean
+ // will update any copies to the path to the memory
+ // until the point of reference.
+ DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
+ __func__, pkt->print(), blk->print());
+ PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest());
+ writebacks.push_back(wb_pkt);
+ pkt->setSatisfied();
+ }
+
// handle writebacks resulting from the access here to ensure they
// logically proceed anything happening below
doWritebacksAtomic(writebacks);
// If the response indicates that there are no sharers and we
// either had the block already or the response is filling we can
// promote our copy to writable
- if (!pkt->hasSharers() && (is_fill || valid_blk)) {
+ if (!pkt->hasSharers() &&
+ (is_fill || (valid_blk && !pkt->req->isCacheInvalidate()))) {
mshr->promoteWritable();
}
// requests to be discarded
bool is_invalidate = pkt->isInvalidate();
+ // The block was marked as not readable while there was a pending
+ // cache maintenance operation, restore its flag.
+ if (pkt->isClean() && !is_invalidate && valid_blk) {
+ blk->status |= BlkReadable;
+ }
+
// First offset for critical word first calculations
int initial_offset = initial_tgt->pkt->getOffset(blkSize);
DPRINTF(Cache, "processing deferred snoop...\n");
// If the response is invalidating, a snooping target can
// be satisfied if it is also invalidating. If the reponse is, not
- // only invalidating, but more specifically an InvalidateResp, the
- // MSHR was created due to an InvalidateReq and a cache above is
- // waiting to satisfy a WriteLineReq. In this case even an
+ // only invalidating, but more specifically an InvalidateResp and
+ // the MSHR was created due to an InvalidateReq then a cache above
+ // is waiting to satisfy a WriteLineReq. In this case even an
// non-invalidating snoop is added as a target here since this is
// the ordering point. When the InvalidateResp reaches this cache,
// the snooping target will snoop further the cache above with the
// WriteLineReq.
- assert(!(is_invalidate &&
- pkt->cmd != MemCmd::InvalidateResp &&
- !mshr->hasPostInvalidate()));
+ assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
+ pkt->req->isCacheMaintenance() ||
+ mshr->hasPostInvalidate());
handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
break;
if (snoopPkt.isBlockCached()) {
pkt->setBlockCached();
}
+ // If the request was satisfied by snooping the cache
+ // above, mark the original packet as satisfied too.
+ if (snoopPkt.satisfied()) {
+ pkt->setSatisfied();
+ }
} else {
cpuSidePort->sendAtomicSnoop(pkt);
if (!alreadyResponded && pkt->cacheResponding()) {
}
}
- if (!blk || !blk->isValid()) {
+ bool respond = false;
+ bool blk_valid = blk && blk->isValid();
+ if (pkt->isClean()) {
+ if (blk_valid && blk->isDirty()) {
+ DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
+ __func__, pkt->print(), blk->print());
+ PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest());
+ PacketList writebacks;
+ writebacks.push_back(wb_pkt);
+
+ if (is_timing) {
+ // anything that is merely forwarded pays for the forward
+ // latency and the delay provided by the crossbar
+ Tick forward_time = clockEdge(forwardLatency) +
+ pkt->headerDelay;
+ doWritebacks(writebacks, forward_time);
+ } else {
+ doWritebacksAtomic(writebacks);
+ }
+ pkt->setSatisfied();
+ }
+ } else if (!blk_valid) {
DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
pkt->print());
if (is_deferred) {
} else {
DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
pkt->print(), blk->print());
- }
- chatty_assert(!(isReadOnly && blk->isDirty()),
- "Should never have a dirty block in a read-only cache %s\n",
- name());
+ // We may end up modifying both the block state and the packet (if
+ // we respond in atomic mode), so just figure out what to do now
+ // and then do it later. We respond to all snoops that need
+ // responses provided we have the block in dirty state. The
+ // invalidation itself is taken care of below. We don't respond to
+ // cache maintenance operations as this is done by the destination
+ // xbar.
+ respond = blk->isDirty() && pkt->needsResponse();
- // We may end up modifying both the block state and the packet (if
- // we respond in atomic mode), so just figure out what to do now
- // and then do it later. We respond to all snoops that need
- // responses provided we have the block in dirty state. The
- // invalidation itself is taken care of below.
- bool respond = blk->isDirty() && pkt->needsResponse();
- bool have_writable = blk->isWritable();
+ chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
+ "a dirty block in a read-only cache %s\n", name());
+ }
// Invalidate any prefetch's from below that would strip write permissions
// MemCmd::HardPFReq is only observed by upstream caches. After missing
// Exclusive to Shared, or remain in Shared
if (!pkt->req->isUncacheable())
blk->status &= ~BlkWritable;
+ DPRINTF(Cache, "new state is %s\n", blk->print());
}
if (respond) {
// memory, and also prevent any memory from even seeing the
// request
pkt->setCacheResponding();
- if (have_writable) {
+ if (!pkt->isClean() && blk->isWritable()) {
// inform the cache hierarchy that this cache had the line
// in the Modified state so that we avoid unnecessary
// invalidations (see Packet::setResponderHadWritable)
// Do this last in case it deallocates block data or something
// like that
- if (invalidate) {
+ if (blk_valid && invalidate) {
invalidateBlock(blk);
+ DPRINTF(Cache, "new state is %s\n", blk->print());
}
- DPRINTF(Cache, "new state is %s\n", blk->print());
-
return snoop_delay;
}
return;
}
+ // Bypass any existing cache maintenance requests if the request
+ // has been satisfied already (i.e., the dirty block has been
+ // found).
+ if (mshr && pkt->req->isCacheMaintenance() && pkt->satisfied()) {
+ return;
+ }
+
// Let the MSHR itself track the snoop and decide whether we want
// to go ahead and do the regular cache snoop
if (mshr && mshr->handleSnoop(pkt, order++)) {
false, false);
}
- if (invalidate) {
+ if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
// Invalidation trumps our writeback... discard here
// Note: markInService will remove entry from writeback buffer.
markInService(wb_entry);
// as forwarded packets may already have existing state
pkt->pushSenderState(mshr);
+ if (pkt->isClean() && blk && blk->isDirty()) {
+ // A cache clean opearation is looking for a dirty block. Mark
+ // the packet so that the destination xbar can determine that
+ // there will be a follow-up write packet as well.
+ pkt->setSatisfied();
+ }
+
if (!memSidePort->sendTimingReq(pkt)) {
// we are awaiting a retry, but we
// delete the packet and will be creating a new packet
bool pending_modified_resp = !pkt->hasSharers() &&
pkt->cacheResponding();
markInService(mshr, pending_modified_resp);
+ if (pkt->isClean() && blk && blk->isDirty()) {
+ // A cache clean opearation is looking for a dirty
+ // block. If a dirty block is encountered a WriteClean
+ // will update any copies to the path to the memory
+ // until the point of reference.
+ DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
+ __func__, pkt->print(), blk->print());
+ PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest());
+ PacketList writebacks;
+ writebacks.push_back(wb_pkt);
+ doWritebacks(writebacks, 0);
+ }
+
return false;
}
}