/*
+ * Copyright (c) 2010 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
* Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * Copyright (c) 2010 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* Cache definitions.
*/
-#include "sim/host.hh"
#include "base/fast_alloc.hh"
#include "base/misc.hh"
-#include "base/range_ops.hh"
-
-#include "mem/cache/cache.hh"
+#include "base/range.hh"
+#include "base/types.hh"
#include "mem/cache/blk.hh"
+#include "mem/cache/cache.hh"
#include "mem/cache/mshr.hh"
#include "mem/cache/prefetch/base.hh"
-
-#include "sim/sim_exit.hh" // for SimExitEvent
-
+#include "sim/sim_exit.hh"
template<class TagStore>
Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
: BaseCache(p),
- prefetchAccess(p->prefetch_access),
tags(tags),
prefetcher(pf),
doFastWrites(true),
- prefetchMiss(p->prefetch_miss)
+ prefetchOnAccess(p->prefetch_on_access)
{
tempBlock = new BlkType();
tempBlock->data = new uint8_t[blkSize];
cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
- "CpuSidePort",
- p->cpu_side_filter_ranges);
+ "CpuSidePort");
memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
- "MemSidePort",
- p->mem_side_filter_ranges);
+ "MemSidePort");
cpuSidePort->setOtherPort(memSidePort);
memSidePort->setOtherPort(cpuSidePort);
tags->setCache(this);
- prefetcher->setCache(this);
+ if (prefetcher)
+ prefetcher->setCache(this);
}
template<class TagStore>
{
BaseCache::regStats();
tags->regStats(name());
- prefetcher->regStats(name());
+ if (prefetcher)
+ prefetcher->regStats(name());
}
template<class TagStore>
} else if (if_name == "functional") {
CpuSidePort *funcPort =
new CpuSidePort(name() + "-cpu_side_funcport", this,
- "CpuSideFuncPort",
- std::vector<Range<Addr> >());
+ "CpuSideFuncPort");
funcPort->setOtherPort(memSidePort);
return funcPort;
} else {
panic("Invalid size for conditional read/write\n");
}
- if (overwrite_mem)
+ if (overwrite_mem) {
std::memcpy(blk_data, &overwrite_val, pkt->getSize());
+ blk->status |= BlkDirty;
+ }
}
template<class TagStore>
void
-Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
+Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
+ bool deferred_response,
+ bool pending_downgrade)
{
- assert(blk);
+ assert(blk && blk->isValid());
// Occasionally this is not true... if we are a lower-level cache
// satisfying a string of Read and ReadEx requests from
// upper-level caches, a Read will mark the block as shared but we
// can satisfy a following ReadEx anyway since we can rely on the
// Read requester(s) to have buffered the ReadEx snoop and to
// invalidate their blocks after receiving them.
- // assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
+ // assert(!pkt->needsExclusive() || blk->isWritable());
assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
// Check RMW operations first since both isRead() and
if (pkt->cmd == MemCmd::SwapReq) {
cmpAndSwap(blk, pkt);
} else if (pkt->isWrite()) {
- blk->status |= BlkDirty;
if (blk->checkWrite(pkt)) {
pkt->writeDataToBlock(blk->data, blkSize);
+ blk->status |= BlkDirty;
}
} else if (pkt->isRead()) {
- if (pkt->isLocked()) {
+ if (pkt->isLLSC()) {
blk->trackLoadLocked(pkt);
}
pkt->setDataFromBlock(blk->data, blkSize);
// special handling for coherent block requests from
// upper-level caches
if (pkt->needsExclusive()) {
- // on ReadExReq we give up our copy
+ // if we have a dirty copy, make sure the recipient
+ // keeps it marked dirty
+ if (blk->isDirty()) {
+ pkt->assertMemInhibit();
+ }
+ // on ReadExReq we give up our copy unconditionally
tags->invalidateBlk(blk);
+ } else if (blk->isWritable() && !pending_downgrade
+ && !pkt->sharedAsserted()) {
+ // we can give the requester an exclusive copy (by not
+ // asserting shared line) on a read request if:
+ // - we have an exclusive copy at this level (& below)
+ // - we don't have a pending snoop from below
+ // signaling another read request
+ // - no other cache above has a copy (otherwise it
+ // would have asseretd shared line on request)
+
+ if (blk->isDirty()) {
+ // special considerations if we're owner:
+ if (!deferred_response) {
+ // if we are responding immediately and can
+ // signal that we're transferring ownership
+ // along with exclusivity, do so
+ pkt->assertMemInhibit();
+ blk->status &= ~BlkDirty;
+ } else {
+ // if we're responding after our own miss,
+ // there's a window where the recipient didn't
+ // know it was getting ownership and may not
+ // have responded to snoops correctly, so we
+ // can't pass off ownership *or* exclusivity
+ pkt->assertShared();
+ }
+ }
} else {
- // on ReadReq we create shareable copies here and in
- // the requester
+ // otherwise only respond with a shared copy
pkt->assertShared();
- blk->status &= ~BlkWritable;
}
}
} else {
// Not a read or write... must be an upgrade. it's OK
// to just ack those as long as we have an exclusive
// copy at this level.
- assert(pkt->cmd == MemCmd::UpgradeReq);
+ assert(pkt->isUpgrade());
tags->invalidateBlk(blk);
}
}
template<class TagStore>
void
-Cache<TagStore>::markInService(MSHR *mshr)
+Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
{
- markInServiceInternal(mshr);
+ markInServiceInternal(mshr, pkt);
#if 0
if (mshr->originalCmd == MemCmd::HardPFReq) {
DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
int &lat, PacketList &writebacks)
{
- if (pkt->req->isUncacheable()) {
+ if (pkt->req->isUncacheable()) {
+ if (pkt->req->isClrex()) {
+ tags->clearLocks();
+ } else {
+ blk = tags->findBlock(pkt->getAddr());
+ if (blk != NULL) {
+ tags->invalidateBlk(blk);
+ }
+ }
+
blk = NULL;
lat = hitLatency;
return false;
}
- blk = tags->findBlock(pkt->getAddr(), lat);
+ int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
+ blk = tags->accessBlock(pkt->getAddr(), lat, id);
- if (prefetchAccess) {
- //We are determining prefetches on access stream, call prefetcher
- prefetcher->handleMiss(pkt, curTick);
- }
-
- DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(), pkt->getAddr(),
- (blk) ? "hit" : "miss");
+ DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
+ pkt->req->isInstFetch() ? " (ifetch)" : "",
+ pkt->getAddr(), (blk) ? "hit" : "miss");
if (blk != NULL) {
- if (blk->isPrefetch()) {
- //Signal that this was a hit under prefetch (no need for
- //use prefetch (only can get here if true)
- DPRINTF(HWPrefetch, "Hit a block that was prefetched\n");
- blk->status &= ~BlkHWPrefetched;
- if (prefetchMiss) {
- //If we are using the miss stream, signal the
- //prefetcher otherwise the access stream would have
- //already signaled this hit
- prefetcher->handleMiss(pkt, curTick);
- }
- }
-
if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
// OK to satisfy access
- hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
+ incHitCount(pkt, id);
satisfyCpuSideRequest(pkt, blk);
return true;
}
if (blk == NULL) {
// no replaceable block available, give up.
// writeback will be forwarded to next level.
- incMissCount(pkt);
+ incMissCount(pkt, id);
return false;
}
+ int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
+ tags->insertBlock(pkt->getAddr(), blk, id);
blk->status = BlkValid | BlkReadable;
}
std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
blk->status |= BlkDirty;
// nothing else to do; writeback doesn't expect response
assert(!pkt->needsResponse());
- hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
+ incHitCount(pkt, id);
return true;
}
- incMissCount(pkt);
+ incMissCount(pkt, id);
- if (blk == NULL && pkt->isLocked() && pkt->isWrite()) {
+ if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
// complete miss on store conditional... just give up now
pkt->req->setExtraData(0);
return true;
}
if (pkt->req->isUncacheable()) {
+ if (pkt->req->isClrex()) {
+ tags->clearLocks();
+ } else {
+ BlkType *blk = tags->findBlock(pkt->getAddr());
+ if (blk != NULL) {
+ tags->invalidateBlk(blk);
+ }
+ }
+
// writes go in write buffer, reads use MSHR
if (pkt->isWrite() && !pkt->isRead()) {
allocateWriteBuffer(pkt, time, true);
}
#endif
+ // track time of availability of next prefetch, if any
+ Tick next_pf_time = 0;
+
bool needsResponse = pkt->needsResponse();
if (satisfied) {
} else {
delete pkt;
}
+
+ if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
+ if (blk)
+ blk->status &= ~BlkHWPrefetched;
+ next_pf_time = prefetcher->notify(pkt, time);
+ }
} else {
// miss
- if (prefetchMiss)
- prefetcher->handleMiss(pkt, time);
- Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
+ Addr blk_addr = blockAlign(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr);
if (mshr) {
// MSHR hit
//@todo remove hw_pf here
- mshr_hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
- if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) {
+ mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
+ if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
mshr->threadNum = -1;
}
mshr->allocateTarget(pkt, time, order++);
}
} else {
// no MSHR
- mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
+ mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
// always mark as cache fill for now... if we implement
// no-write-allocate or bypass accesses this will have to
// be changed.
allocateMissBuffer(pkt, time, true);
}
+
+ if (prefetcher) {
+ next_pf_time = prefetcher->notify(pkt, time);
+ }
}
}
+ if (next_pf_time != 0)
+ requestMemSideBus(Request_PF, std::max(time, next_pf_time));
+
// copy writebacks to write buffer
while (!writebacks.empty()) {
PacketPtr wbPkt = writebacks.front();
bool blkValid = blk && blk->isValid();
if (cpu_pkt->req->isUncacheable()) {
- assert(blk == NULL);
+ //assert(blk == NULL);
return NULL;
}
- if (!blkValid && (cpu_pkt->cmd == MemCmd::Writeback ||
- cpu_pkt->cmd == MemCmd::UpgradeReq)) {
+ if (!blkValid &&
+ (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
// Writebacks that weren't allocated in access() and upgrades
// from upper-level caches that missed completely just go
// through.
// only reason to be here is that blk is shared
// (read-only) and we need exclusive
assert(needsExclusive && !blk->isWritable());
- cmd = MemCmd::UpgradeReq;
+ cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
} else {
// block is invalid
cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
if (!access(pkt, blk, lat, writebacks)) {
// MISS
- PacketPtr busPkt = getBusPacket(pkt, blk, pkt->needsExclusive());
+ PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
- bool isCacheFill = (busPkt != NULL);
+ bool is_forward = (bus_pkt == NULL);
- if (busPkt == NULL) {
+ if (is_forward) {
// just forwarding the same request to the next level
// no local cache operation involved
- busPkt = pkt;
+ bus_pkt = pkt;
}
DPRINTF(Cache, "Sending an atomic %s for %x\n",
- busPkt->cmdString(), busPkt->getAddr());
+ bus_pkt->cmdString(), bus_pkt->getAddr());
#if TRACING_ON
CacheBlk::State old_state = blk ? blk->status : 0;
#endif
- lat += memSidePort->sendAtomic(busPkt);
+ lat += memSidePort->sendAtomic(bus_pkt);
DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
- busPkt->cmdString(), busPkt->getAddr(), old_state);
-
- bool is_error = busPkt->isError();
- assert(!busPkt->wasNacked());
-
- if (is_error && pkt->needsResponse()) {
- pkt->makeAtomicResponse();
- pkt->copyError(busPkt);
- } else if (isCacheFill && !is_error) {
- blk = handleFill(busPkt, blk, writebacks);
- satisfyCpuSideRequest(pkt, blk);
- delete busPkt;
+ bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
+
+ assert(!bus_pkt->wasNacked());
+
+ // If packet was a forward, the response (if any) is already
+ // in place in the bus_pkt == pkt structure, so we don't need
+ // to do anything. Otherwise, use the separate bus_pkt to
+ // generate response to pkt and then delete it.
+ if (!is_forward) {
+ if (pkt->needsResponse()) {
+ assert(bus_pkt->isResponse());
+ if (bus_pkt->isError()) {
+ pkt->makeAtomicResponse();
+ pkt->copyError(bus_pkt);
+ } else if (bus_pkt->isRead() ||
+ bus_pkt->cmd == MemCmd::UpgradeResp) {
+ // we're updating cache state to allow us to
+ // satisfy the upstream request from the cache
+ blk = handleFill(bus_pkt, blk, writebacks);
+ satisfyCpuSideRequest(pkt, blk);
+ } else {
+ // we're satisfying the upstream request without
+ // modifying cache state, e.g., a write-through
+ pkt->makeAtomicResponse();
+ }
+ }
+ delete bus_pkt;
}
}
+ // Note that we don't invoke the prefetcher at all in atomic mode.
+ // It's not clear how to do it properly, particularly for
+ // prefetchers that aggressively generate prefetch candidates and
+ // rely on bandwidth contention to throttle them; these will tend
+ // to pollute the cache in atomic mode since there is no bandwidth
+ // contention. If we ever do want to enable prefetching in atomic
+ // mode, though, this is the place to do it... see timingAccess()
+ // for an example (though we'd want to issue the prefetch(es)
+ // immediately rather than calling requestMemSideBus() as we do
+ // there).
+
// Handle writebacks if needed
while (!writebacks.empty()){
PacketPtr wbPkt = writebacks.front();
CachePort *incomingPort,
CachePort *otherSidePort)
{
- Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
+ Addr blk_addr = blockAlign(pkt->getAddr());
BlkType *blk = tags->findBlock(pkt->getAddr());
pkt->pushLabel(name());
PacketList writebacks;
if (pkt->req->isUncacheable()) {
- mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
+ mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
miss_latency;
} else {
- mshr_miss_latency[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
+ mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
miss_latency;
}
- if (mshr->isCacheFill && !is_error) {
+ bool is_fill = !mshr->isForward &&
+ (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
+
+ if (is_fill && !is_error) {
DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
pkt->getAddr());
while (mshr->hasTargets()) {
MSHR::Target *target = mshr->getTarget();
- if (target->isCpuSide()) {
+ switch (target->source) {
+ case MSHR::Target::FromCPU:
Tick completion_time;
- if (blk != NULL) {
- satisfyCpuSideRequest(target->pkt, blk);
+ if (is_fill) {
+ satisfyCpuSideRequest(target->pkt, blk,
+ true, mshr->hasPostDowngrade());
// How many bytes past the first request is this one
int transfer_offset =
target->pkt->getOffset(blkSize) - initial_offset;
(transfer_offset ? pkt->finishTime : pkt->firstWordTime);
assert(!target->pkt->req->isUncacheable());
- missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
+ missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
completion_time - target->recvTime;
+ } else if (target->pkt->cmd == MemCmd::StoreCondReq &&
+ pkt->cmd == MemCmd::UpgradeFailResp) {
+ // failed StoreCond upgrade
+ completion_time = tags->getHitLatency() + pkt->finishTime;
+ target->pkt->req->setExtraData(0);
} else {
// not a cache fill, just forwarding response
completion_time = tags->getHitLatency() + pkt->finishTime;
// if this packet is an error copy that to the new packet
if (is_error)
target->pkt->copyError(pkt);
- if (pkt->isInvalidate()) {
+ if (target->pkt->cmd == MemCmd::ReadResp &&
+ (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
// If intermediate cache got ReadRespWithInvalidate,
// propagate that. Response should not have
// isInvalidate() set otherwise.
- assert(target->pkt->cmd == MemCmd::ReadResp);
- assert(pkt->cmd == MemCmd::ReadRespWithInvalidate);
target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
}
cpuSidePort->respond(target->pkt, completion_time);
- } else {
+ break;
+
+ case MSHR::Target::FromPrefetcher:
+ assert(target->pkt->cmd == MemCmd::HardPFReq);
+ if (blk)
+ blk->status |= BlkHWPrefetched;
+ delete target->pkt->req;
+ delete target->pkt;
+ break;
+
+ case MSHR::Target::FromSnoop:
// I don't believe that a snoop can be in an error state
assert(!is_error);
// response to snoop request
DPRINTF(Cache, "processing deferred snoop...\n");
+ assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
handleSnoop(target->pkt, blk, true, true,
- mshr->pendingInvalidate || pkt->isInvalidate());
+ mshr->hasPostInvalidate());
+ break;
+
+ default:
+ panic("Illegal target->source enum %d\n", target->source);
}
mshr->popTarget();
}
- if (pkt->isInvalidate()) {
- tags->invalidateBlk(blk);
+ if (blk) {
+ if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
+ tags->invalidateBlk(blk);
+ } else if (mshr->hasPostDowngrade()) {
+ blk->status &= ~BlkWritable;
+ }
}
if (mshr->promoteDeferredTargets()) {
// avoid later read getting stale data while write miss is
// outstanding.. see comment in timingAccess()
- blk->status &= ~BlkReadable;
+ if (blk) {
+ blk->status &= ~BlkReadable;
+ }
MSHRQueue *mq = mshr->queue;
mq->markPending(mshr);
requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
{
assert(blk && blk->isValid() && blk->isDirty());
- writebacks[0/*pkt->req->getThreadNum()*/]++;
+ writebacks[0/*pkt->req->threadId()*/]++;
Request *writebackReq =
new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
typename Cache<TagStore>::BlkType*
Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
{
- BlkType *blk = tags->findReplacement(addr, writebacks);
+ BlkType *blk = tags->findVictim(addr, writebacks);
if (blk->isValid()) {
Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
assert(!blk->isWritable());
assert(repl_mshr->needsExclusive());
// too hard to replace block with transient state
+ // allocation failed, block not inserted
return NULL;
} else {
DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
}
}
- // Set tag for new block. Caller is responsible for setting status.
- blk->tag = tags->extractTag(addr);
return blk;
}
tempBlock->set = tags->extractSet(addr);
tempBlock->tag = tags->extractTag(addr);
DPRINTF(Cache, "using temp block for %x\n", addr);
+ } else {
+ int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
+ tags->insertBlock(pkt->getAddr(), blk, id);
}
+
+ // starting from scratch with a new block
+ blk->status = 0;
} else {
// existing block... probably an upgrade
assert(blk->tag == tags->extractTag(addr));
// either we're getting new data or the block should already be valid
assert(pkt->hasData() || blk->isValid());
+ // don't clear block status... if block is already dirty we
+ // don't want to lose that
}
+ blk->status |= BlkValid | BlkReadable;
+
if (!pkt->sharedAsserted()) {
- blk->status = BlkValid | BlkReadable | BlkWritable;
- } else {
- assert(!pkt->needsExclusive());
- blk->status = BlkValid | BlkReadable;
+ blk->status |= BlkWritable;
+ // If we got this via cache-to-cache transfer (i.e., from a
+ // cache that was an owner) and took away that owner's copy,
+ // then we need to write it back. Normally this happens
+ // anyway as a side effect of getting a copy to write it, but
+ // there are cases (such as failed store conditionals or
+ // compare-and-swaps) where we'll demand an exclusive copy but
+ // end up not writing it.
+ if (pkt->memInhibitAsserted())
+ blk->status |= BlkDirty;
}
DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
{
// timing-mode snoop responses require a new packet, unless we
// already made a copy...
- PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt, true);
- if (!req_pkt->isInvalidate()) {
- // note that we're ignoring the shared flag on req_pkt... it's
- // basically irrelevant, as we'll always assert shared unless
- // it's an exclusive request, in which case the shared line
- // should never be asserted1
- pkt->assertShared();
- }
+ PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
+ assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
pkt->allocate();
pkt->makeTimingResponse();
if (pkt->isRead()) {
assert(!(pending_inval && !is_deferred));
assert(pkt->isRequest());
- // first propagate snoop upward to see if anyone above us wants to
- // handle it. save & restore packet src since it will get
- // rewritten to be relative to cpu-side bus (if any)
- bool alreadyResponded = pkt->memInhibitAsserted();
- if (is_timing) {
- Packet *snoopPkt = new Packet(pkt, true); // clear flags
- snoopPkt->setExpressSnoop();
- snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
- cpuSidePort->sendTiming(snoopPkt);
- if (snoopPkt->memInhibitAsserted()) {
- // cache-to-cache response from some upper cache
- assert(!alreadyResponded);
- pkt->assertMemInhibit();
+ // the packet may get modified if we or a forwarded snooper
+ // responds in atomic mode, so remember a few things about the
+ // original packet up front
+ bool invalidate = pkt->isInvalidate();
+ bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
+
+ if (forwardSnoops) {
+ // first propagate snoop upward to see if anyone above us wants to
+ // handle it. save & restore packet src since it will get
+ // rewritten to be relative to cpu-side bus (if any)
+ bool alreadyResponded = pkt->memInhibitAsserted();
+ if (is_timing) {
+ Packet *snoopPkt = new Packet(pkt, true); // clear flags
+ snoopPkt->setExpressSnoop();
+ snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
+ cpuSidePort->sendTiming(snoopPkt);
+ if (snoopPkt->memInhibitAsserted()) {
+ // cache-to-cache response from some upper cache
+ assert(!alreadyResponded);
+ pkt->assertMemInhibit();
+ } else {
+ delete snoopPkt->senderState;
+ }
+ if (snoopPkt->sharedAsserted()) {
+ pkt->assertShared();
+ }
+ delete snoopPkt;
} else {
- delete snoopPkt->senderState;
- }
- if (snoopPkt->sharedAsserted()) {
- pkt->assertShared();
- }
- delete snoopPkt;
- } else {
- int origSrc = pkt->getSrc();
- cpuSidePort->sendAtomic(pkt);
- if (!alreadyResponded && pkt->memInhibitAsserted()) {
- // cache-to-cache response from some upper cache:
- // forward response to original requester
- assert(pkt->isResponse());
+ int origSrc = pkt->getSrc();
+ cpuSidePort->sendAtomic(pkt);
+ if (!alreadyResponded && pkt->memInhibitAsserted()) {
+ // cache-to-cache response from some upper cache:
+ // forward response to original requester
+ assert(pkt->isResponse());
+ }
+ pkt->setSrc(origSrc);
}
- pkt->setSrc(origSrc);
}
if (!blk || !blk->isValid()) {
// and then do it later
bool respond = blk->isDirty() && pkt->needsResponse();
bool have_exclusive = blk->isWritable();
- bool invalidate = pkt->isInvalidate();
- if (pkt->isRead() && !pkt->isInvalidate()) {
- assert(!pkt->needsExclusive());
+ if (pkt->isRead() && !invalidate) {
+ assert(!needs_exclusive);
pkt->assertShared();
int bits_to_clear = BlkWritable;
const bool haveOwnershipState = true; // for now
BlkType *blk = tags->findBlock(pkt->getAddr());
- Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
+ Addr blk_addr = blockAlign(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr);
// Let the MSHR itself track the snoop and decide whether we want
// fall through... no pending requests. Try a prefetch.
assert(!miss_mshr && !write_mshr);
- if (!mshrQueue.isFull()) {
+ if (prefetcher && !mshrQueue.isFull()) {
// If we have a miss queue slot, we can try a prefetch
PacketPtr pkt = prefetcher->getPacket();
if (pkt) {
- // Update statistic on number of prefetches issued
- // (hwpf_mshr_misses)
- mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
- // Don't request bus, since we already have it
- return allocateMissBuffer(pkt, curTick, false);
+ Addr pf_addr = blockAlign(pkt->getAddr());
+ if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) {
+ // Update statistic on number of prefetches issued
+ // (hwpf_mshr_misses)
+ mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
+ // Don't request bus, since we already have it
+ return allocateMissBuffer(pkt, curTick, false);
+ }
}
}
PacketPtr tgt_pkt = mshr->getTarget()->pkt;
PacketPtr pkt = NULL;
- if (mshr->isSimpleForward()) {
+ if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq) {
+ // SCUpgradeReq saw invalidation while queued in MSHR, so now
+ // that we are getting around to processing it, just treat it
+ // as if we got a failure response
+ pkt = new Packet(tgt_pkt);
+ pkt->cmd = MemCmd::UpgradeFailResp;
+ pkt->senderState = mshr;
+ pkt->firstWordTime = pkt->finishTime = curTick;
+ handleResponse(pkt);
+ return NULL;
+ } else if (mshr->isForwardNoResponse()) {
// no response expected, just forward packet as it is
assert(tags->findBlock(mshr->addr) == NULL);
pkt = tgt_pkt;
BlkType *blk = tags->findBlock(mshr->addr);
pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
- mshr->isCacheFill = (pkt != NULL);
+ mshr->isForward = (pkt == NULL);
- if (pkt == NULL) {
+ if (mshr->isForward) {
// not a cache block request, but a response is expected
- assert(!mshr->isSimpleForward());
// make copy of current packet to forward, keep current
// copy for response handling
pkt = new Packet(tgt_pkt);
}
+template<class TagStore>
+Tick
+Cache<TagStore>::nextMSHRReadyTime()
+{
+ Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
+ writeBuffer.nextMSHRReadyTime());
+
+ if (prefetcher) {
+ nextReady = std::min(nextReady,
+ prefetcher->nextPrefetchReadyTime());
+ }
+
+ return nextReady;
+}
+
+
///////////////
//
// CpuSidePort
Cache<TagStore>::CpuSidePort::
getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
{
- // CPU side port doesn't snoop; it's a target only.
- bool dummy;
- otherPort->getPeerAddressRanges(resp, dummy);
- FilterRangeList(filterRanges, resp);
+ // CPU side port doesn't snoop; it's a target only. It can
+ // potentially respond to any address.
snoop = false;
+ resp.push_back(myCache()->getAddrRange());
}
template<class TagStore>
Cache<TagStore>::
CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
- const std::string &_label,
- std::vector<Range<Addr> > filterRanges)
- : BaseCache::CachePort(_name, _cache, _label, filterRanges)
+ const std::string &_label)
+ : BaseCache::CachePort(_name, _cache, _label)
{
}
Cache<TagStore>::MemSidePort::
getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
{
- otherPort->getPeerAddressRanges(resp, snoop);
- FilterRangeList(filterRanges, resp);
-
- // Memory-side port always snoops, so unconditionally set flag for
- // caller.
+ // Memory-side port always snoops, but never passes requests
+ // through to targets on the cpu side (so we don't add anything to
+ // the address range list).
snoop = true;
}
waitingOnRetry = !success;
if (waitingOnRetry) {
DPRINTF(CachePort, "now waiting on a retry\n");
- if (!mshr->isSimpleForward()) {
+ if (!mshr->isForwardNoResponse()) {
delete pkt;
}
} else {
- myCache()->markInService(mshr);
+ myCache()->markInService(mshr, pkt);
}
}
}
// @TODO: need to facotr in prefetch requests here somehow
if (nextReady != MaxTick) {
DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
- sendEvent->schedule(std::max(nextReady, curTick + 1));
+ schedule(sendEvent, std::max(nextReady, curTick + 1));
} else {
// no more to send right now: if we're draining, we may be done
- if (drainEvent) {
+ if (drainEvent && !sendEvent->scheduled()) {
drainEvent->process();
drainEvent = NULL;
}
template<class TagStore>
Cache<TagStore>::
MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
- const std::string &_label,
- std::vector<Range<Addr> > filterRanges)
- : BaseCache::CachePort(_name, _cache, _label, filterRanges)
+ const std::string &_label)
+ : BaseCache::CachePort(_name, _cache, _label)
{
// override default send event from SimpleTimingPort
delete sendEvent;