FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt)
{
DPRINTF(O3CPU, "Fetch unit received timing\n");
- // We shouldn't ever get a block in ownership state
- assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
+ // We shouldn't ever get a cacheable block in ownership state
+ assert(pkt->req->isUncacheable() ||
+ !(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
fetch->processCacheCompletion(pkt);
return true;
bool
DmaPort::recvTimingResp(PacketPtr pkt)
{
- // We shouldn't ever get a block in ownership state
- assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
+ // We shouldn't ever get a cacheable block in ownership state
+ assert(pkt->req->isUncacheable() ||
+ !(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
handleResp(pkt);
}
}
+bool
+BaseCache::inRange(Addr addr) const
+{
+ for (const auto& r : addrRanges) {
+ if (r.contains(addr)) {
+ return true;
+ }
+ }
+ return false;
+}
+
void
BaseCache::regStats()
{
*/
virtual bool isDirty() const = 0;
+ /**
+ * Determine if an address is in the ranges covered by this
+ * cache. This is useful to filter snoops.
+ *
+ * @param addr Address to check against
+ *
+ * @return If the address in question is in range
+ */
+ bool inRange(Addr addr) const;
+
/** Block size of this cache */
const unsigned blkSize;
// responding to the request
DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
- assert(!pkt->req->isUncacheable());
// if the packet needs exclusive, and the cache that has
// promised to respond (setting the inhibit flag) is not
promoteWholeLineWrites(pkt);
if (pkt->memInhibitAsserted()) {
- assert(!pkt->req->isUncacheable());
// have to invalidate ourselves and any lower caches even if
// upper cache will be responding
if (pkt->isInvalidate()) {
// responses)
pkt = new Packet(req_pkt, false, req_pkt->isRead());
- assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
+ assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
+ pkt->sharedAsserted());
pkt->makeTimingResponse();
if (pkt->isRead()) {
pkt->setDataFromBlock(blk_data, blkSize);
return;
}
- if (pkt->isRead() && !invalidate) {
+ if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
assert(!needs_exclusive);
pkt->assertShared();
int bits_to_clear = BlkWritable;
// will write it back at a later point
pkt->assertMemInhibit();
if (have_exclusive) {
+ // in the case of an uncacheable request there is no need
+ // to set the exclusive flag, but since the recipient does
+ // not care there is no harm in doing so
pkt->setSupplyExclusive();
}
if (is_timing) {
pkt->makeAtomicResponse();
pkt->setDataFromBlock(blk->data, blkSize);
}
- } else if (is_timing && is_deferred) {
+ }
+
+ if (!respond && is_timing && is_deferred) {
// if it's a deferred timing snoop then we've made a copy of
// the packet, and so if we're not using that copy to respond
// then we need to delete it here.
// Snoops shouldn't happen when bypassing caches
assert(!system->bypassCaches());
- // check if the packet is for an address range covered by this
- // cache, partly to not waste time looking for it, but also to
- // ensure that we only forward the snoop upwards if it is within
- // our address ranges
- bool in_range = false;
- for (AddrRangeList::const_iterator r = addrRanges.begin();
- r != addrRanges.end(); ++r) {
- if (r->contains(pkt->getAddr())) {
- in_range = true;
- break;
- }
- }
-
- // Note that some deferred snoops don't have requests, since the
- // original access may have already completed
- if ((pkt->req && pkt->req->isUncacheable()) ||
- pkt->cmd == MemCmd::Writeback || !in_range) {
- //Can't get a hit on an uncacheable address
- //Revisit this for multi level coherence
+ // no need to snoop writebacks or requests that are not in range
+ if (pkt->cmd == MemCmd::Writeback || !inRange(pkt->getAddr())) {
return;
}
// Snoops shouldn't happen when bypassing caches
assert(!system->bypassCaches());
- if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
- // Can't get a hit on an uncacheable address
- // Revisit this for multi level coherence
+ // no need to snoop writebacks or requests that are not in range
+ if (pkt->cmd == MemCmd::Writeback || !inRange(pkt->getAddr())) {
return 0;
}
if (isPendingDirty()) {
pkt->assertMemInhibit();
+ // in the case of an uncacheable request there is no need
+ // to set the exclusive flag, but since the recipient does
+ // not care there is no harm in doing so
pkt->setSupplyExclusive();
}
}
}
- if (!pkt->needsExclusive()) {
+ if (!pkt->needsExclusive() && !pkt->req->isUncacheable()) {
// This transaction will get a read-shared copy, downgrading
// our copy if we had an exclusive one
postDowngrade = true;
// determine how long to be crossbar layer is busy
Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
- // uncacheable requests need never be snooped
- if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
+ if (!system->bypassCaches()) {
// the packet is a memory-mapped request and should be
// broadcasted to our snoopers but the source
if (snoopFilter) {
// since it is a normal request, attempt to send the packet
bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
- if (snoopFilter && !pkt->req->isUncacheable()
- && !system->bypassCaches()) {
+ if (snoopFilter && !system->bypassCaches()) {
// The packet may already be overwritten by the sendTimingReq function.
// The snoop filter needs to see the original request *and* the return
// status of the send operation, so we need to recreate the original
// determine how long to be crossbar layer is busy
Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
- if (snoopFilter && !pkt->req->isUncacheable() && !system->bypassCaches()) {
+ if (snoopFilter && !system->bypassCaches()) {
// let the snoop filter inspect the response and update its state
snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
}
MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
Tick snoop_response_latency = 0;
- // uncacheable requests need never be snooped
- if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
+ if (!system->bypassCaches()) {
// forward to all snoopers but the source
std::pair<MemCmd, Tick> snoop_result;
if (snoopFilter) {
Tick response_latency = masterPorts[master_port_id]->sendAtomic(pkt);
// Lower levels have replied, tell the snoop filter
- if (snoopFilter && !pkt->req->isUncacheable() && !system->bypassCaches() &&
- pkt->isResponse()) {
+ if (snoopFilter && !system->bypassCaches() && pkt->isResponse()) {
snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
}
pkt->cmdString());
}
- // uncacheable requests need never be snooped
- if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
+ if (!system->bypassCaches()) {
// forward to all snoopers but the source
forwardFunctional(pkt, slave_port_id);
}
DPRINTF(SnoopFilter, "%s: SF value %x.%x\n",
__func__, sf_item.requested, sf_item.holder);
- if (cpkt->needsResponse()) {
+ if (!cpkt->req->isUncacheable() && cpkt->needsResponse()) {
if (!cpkt->memInhibitAsserted()) {
// Max one request per address per port
panic_if(sf_item.requested & req_port, "double request :( "\
DPRINTF(SnoopFilter, "%s: packet src %s addr 0x%x cmd %s\n",
__func__, slave_port.name(), cpkt->getAddr(), cpkt->cmdString());
+ if (cpkt->req->isUncacheable())
+ return;
+
Addr line_addr = cpkt->getAddr() & ~(linesize - 1);
SnoopMask req_port = portToMask(slave_port);
SnoopItem& sf_item = cachedLocations[line_addr];
__func__, rsp_port.name(), req_port.name(), cpkt->getAddr(),
cpkt->cmdString());
+ assert(cpkt->isResponse());
+ assert(cpkt->memInhibitAsserted());
+
+ if (cpkt->req->isUncacheable())
+ return;
+
Addr line_addr = cpkt->getAddr() & ~(linesize - 1);
SnoopMask rsp_mask = portToMask(rsp_port);
SnoopMask req_mask = portToMask(req_port);
SnoopItem& sf_item = cachedLocations[line_addr];
- assert(cpkt->isResponse());
- assert(cpkt->memInhibitAsserted());
-
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
__func__, sf_item.requested, sf_item.holder);
DPRINTF(SnoopFilter, "%s: packet src %s addr 0x%x cmd %s\n",
__func__, slave_port.name(), cpkt->getAddr(), cpkt->cmdString());
+ assert(cpkt->isResponse());
+
+ if (cpkt->req->isUncacheable())
+ return;
+
Addr line_addr = cpkt->getAddr() & ~(linesize - 1);
SnoopMask slave_mask = portToMask(slave_port);
SnoopItem& sf_item = cachedLocations[line_addr];
- assert(cpkt->isResponse());
-
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
__func__, sf_item.requested, sf_item.holder);