X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmem%2Fcache%2Fcache.cc;h=4bec7de38e5c9a6bb93faee6ba57229fc02c7ea2;hb=14732c4f970cc631e5d57bd945ce7c47f4f0185b;hp=6fb6f11236c5e88aa554f015b8a683270578e706;hpb=921a72f4f340c99305de5a4b70ca28ed8acd7861;p=gem5.git diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc index 6fb6f1123..4bec7de38 100644 --- a/src/mem/cache/cache.cc +++ b/src/mem/cache/cache.cc @@ -63,10 +63,12 @@ #include "mem/request.hh" #include "params/Cache.hh" -Cache::Cache(const CacheParams *p) - : BaseCache(p, p->system->cacheLineSize()), +Cache::Cache(const CacheParams &p) + : BaseCache(p, p.system->cacheLineSize()), doFastWrites(true) { + assert(p.tags); + assert(p.replacement_policy); } void @@ -89,14 +91,14 @@ Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, // if we have a dirty copy, make sure the recipient // keeps it marked dirty (in the modified state) - if (blk->isDirty()) { + if (blk->isSet(CacheBlk::DirtyBit)) { pkt->setCacheResponding(); - blk->status &= ~BlkDirty; + blk->clearCoherenceBits(CacheBlk::DirtyBit); } - } else if (blk->isWritable() && !pending_downgrade && - !pkt->hasSharers() && - pkt->cmd != MemCmd::ReadCleanReq) { - // we can give the requester a writable copy on a read + } else if (blk->isSet(CacheBlk::WritableBit) && + !pending_downgrade && !pkt->hasSharers() && + pkt->cmd != MemCmd::ReadCleanReq) { + // we can give the requestor a writable copy on a read // request if: // - we have a writable copy at this level (& below) // - we don't have a pending snoop from below @@ -106,7 +108,7 @@ Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, // snooping the packet) // - the read has explicitly asked for a clean // copy of the line - if (blk->isDirty()) { + if (blk->isSet(CacheBlk::DirtyBit)) { // special considerations if we're owner: if (!deferred_response) { // respond with the line in Modified state @@ -128,7 +130,7 @@ Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, // the cache hierarchy through a cache, // and first snoop upwards in all other // branches - blk->status &= ~BlkDirty; + blk->clearCoherenceBits(CacheBlk::DirtyBit); } else { // if we're responding after our own miss, // there's a window where the recipient didn't @@ -326,7 +328,7 @@ Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, // should have flushed and have no valid block assert(!blk || !blk->isValid()); - stats.cmdStats(pkt).mshr_uncacheable[pkt->req->masterId()]++; + stats.cmdStats(pkt).mshrUncacheable[pkt->req->requestorId()]++; if (pkt->isWrite()) { allocateWriteBuffer(pkt, forward_time); @@ -371,9 +373,9 @@ Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, if (!mshr) { // copy the request and create a new SoftPFReq packet RequestPtr req = std::make_shared(pkt->req->getPaddr(), - pkt->req->getSize(), - pkt->req->getFlags(), - pkt->req->masterId()); + pkt->req->getSize(), + pkt->req->getFlags(), + pkt->req->requestorId()); pf = new Packet(req, pkt->cmd); pf->allocate(); assert(pf->matchAddr(pkt)); @@ -447,7 +449,7 @@ Cache::recvTimingReq(PacketPtr pkt) // this express snoop travels towards the memory, and at // every crossbar it is snooped upwards thus reaching // every cache in the system - bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt); + M5_VAR_USED bool success = memSidePort.sendTimingReq(snoop_pkt); // express snoops always succeed assert(success); @@ -496,7 +498,7 @@ Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, const bool useUpgrades = true; assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write); if (is_whole_line_write) { - assert(!blkValid || !blk->isWritable()); + assert(!blkValid || !blk->isSet(CacheBlk::WritableBit)); // forward as invalidate to all other caches, this gives us // the line in Exclusive state, and invalidates all other // copies @@ -505,7 +507,7 @@ Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, // only reason to be here is that blk is read only and we need // it to be writable assert(needsWritable); - assert(!blk->isWritable()); + assert(!blk->isSet(CacheBlk::WritableBit)); cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || cpu_pkt->cmd == MemCmd::StoreCondFailReq) { @@ -590,7 +592,7 @@ Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, bus_pkt->print()); #if TRACING_ON - CacheBlk::State old_state = blk ? blk->status : 0; + const std::string old_state = blk ? blk->print() : ""; #endif Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt)); @@ -598,7 +600,7 @@ Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, bool is_invalidate = bus_pkt->isInvalidate(); // We are now dealing with the response handling - DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__, + DPRINTF(Cache, "%s: Receive response: %s for %s\n", __func__, bus_pkt->print(), old_state); // If packet was a forward, the response (if any) is already @@ -722,7 +724,7 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) // between the PrefetchExReq and the expected WriteReq, we // proactively mark the block as Dirty. assert(blk); - blk->status |= BlkDirty; + blk->setCoherenceBits(CacheBlk::DirtyBit); panic_if(isReadOnly, "Prefetch exclusive requests from " "read-only cache %s\n", name()); @@ -745,7 +747,7 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) if (tgt_pkt->cmd == MemCmd::WriteLineReq) { assert(!is_error); assert(blk); - assert(blk->isWritable()); + assert(blk->isSet(CacheBlk::WritableBit)); } // Here we decide whether we will satisfy the target using @@ -774,9 +776,9 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) assert(!tgt_pkt->req->isUncacheable()); - assert(tgt_pkt->req->masterId() < system->maxMasters()); + assert(tgt_pkt->req->requestorId() < system->maxRequestors()); stats.cmdStats(tgt_pkt) - .missLatency[tgt_pkt->req->masterId()] += + .missLatency[tgt_pkt->req->requestorId()] += completion_time - target.recvTime; } else if (pkt->cmd == MemCmd::UpgradeFailResp) { // failed StoreCond upgrade @@ -850,7 +852,7 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) case MSHR::Target::FromPrefetcher: assert(tgt_pkt->cmd == MemCmd::HardPFReq); if (blk) - blk->status |= BlkHWPrefetched; + blk->setPrefetched(); delete tgt_pkt; break; @@ -888,7 +890,7 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) if (is_invalidate || mshr->hasPostInvalidate()) { invalidateBlock(blk); } else if (mshr->hasPostDowngrade()) { - blk->status &= ~BlkWritable; + blk->clearCoherenceBits(CacheBlk::WritableBit); } } } @@ -896,7 +898,7 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) PacketPtr Cache::evictBlock(CacheBlk *blk) { - PacketPtr pkt = (blk->isDirty() || writebackClean) ? + PacketPtr pkt = (blk->isSet(CacheBlk::DirtyBit) || writebackClean) ? writebackBlk(blk) : cleanEvictBlk(blk); invalidateBlock(blk); @@ -908,16 +910,16 @@ PacketPtr Cache::cleanEvictBlk(CacheBlk *blk) { assert(!writebackClean); - assert(blk && blk->isValid() && !blk->isDirty()); + assert(blk && blk->isValid() && !blk->isSet(CacheBlk::DirtyBit)); // Creating a zero sized write, a message to the snoop filter RequestPtr req = std::make_shared( - regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); + regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId); if (blk->isSecure()) req->setFlags(Request::SECURE); - req->taskId(blk->task_id); + req->taskId(blk->getTaskId()); PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); pkt->allocate(); @@ -992,7 +994,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, // responds in atomic mode, so remember a few things about the // original packet up front bool invalidate = pkt->isInvalidate(); - bool M5_VAR_USED needs_writable = pkt->needsWritable(); + M5_VAR_USED bool needs_writable = pkt->needsWritable(); // at the moment we could get an uncacheable write which does not // have the invalidate flag, and we need a suitable way of dealing @@ -1006,7 +1008,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, if (forwardSnoops) { // first propagate snoop upward to see if anyone above us wants to // handle it. save & restore packet src since it will get - // rewritten to be relative to cpu-side bus (if any) + // rewritten to be relative to CPU-side bus (if any) if (is_timing) { // copy the packet so that we can clear any flags before // forwarding it upwards, we also allocate data (passing @@ -1026,7 +1028,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, // If this request is a prefetch or clean evict and an upper level // signals block present, make sure to propagate the block - // presence to the requester. + // presence to the requestor. if (snoopPkt.isBlockCached()) { pkt->setBlockCached(); } @@ -1044,7 +1046,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, cpuSidePort.sendAtomicSnoop(pkt); if (!already_responded && pkt->cacheResponding()) { // cache-to-cache response from some upper cache: - // forward response to original requester + // forward response to original requestor assert(pkt->isResponse()); } } @@ -1053,10 +1055,11 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, bool respond = false; bool blk_valid = blk && blk->isValid(); if (pkt->isClean()) { - if (blk_valid && blk->isDirty()) { + if (blk_valid && blk->isSet(CacheBlk::DirtyBit)) { DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n", __func__, pkt->print(), blk->print()); - PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); + PacketPtr wb_pkt = + writecleanBlk(blk, pkt->req->getDest(), pkt->id); PacketList writebacks; writebacks.push_back(wb_pkt); @@ -1098,10 +1101,11 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, // invalidation itself is taken care of below. We don't respond to // cache maintenance operations as this is done by the destination // xbar. - respond = blk->isDirty() && pkt->needsResponse(); + respond = blk->isSet(CacheBlk::DirtyBit) && pkt->needsResponse(); - chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have " - "a dirty block in a read-only cache %s\n", name()); + chatty_assert(!(isReadOnly && blk->isSet(CacheBlk::DirtyBit)), + "Should never have a dirty block in a read-only cache %s\n", + name()); } // Invalidate any prefetch's from below that would strip write permissions @@ -1125,8 +1129,9 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, // which means we go from Modified to Owned (and will respond // below), remain in Owned (and will respond below), from // Exclusive to Shared, or remain in Shared - if (!pkt->req->isUncacheable()) - blk->status &= ~BlkWritable; + if (!pkt->req->isUncacheable()) { + blk->clearCoherenceBits(CacheBlk::WritableBit); + } DPRINTF(Cache, "new state is %s\n", blk->print()); } @@ -1135,7 +1140,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, // memory, and also prevent any memory from even seeing the // request pkt->setCacheResponding(); - if (!pkt->isClean() && blk->isWritable()) { + if (!pkt->isClean() && blk->isSet(CacheBlk::WritableBit)) { // inform the cache hierarchy that this cache had the line // in the Modified state so that we avoid unnecessary // invalidations (see Packet::setResponderHadWritable) @@ -1391,7 +1396,7 @@ Cache::sendMSHRQueuePacket(MSHR* mshr) // prefetchSquash first may result in the MSHR being // prematurely deallocated. if (snoop_pkt.cacheResponding()) { - auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); + M5_VAR_USED auto r = outstandingSnoop.insert(snoop_pkt.req); assert(r.second); // if we are getting a snoop response with no sharers it @@ -1426,12 +1431,3 @@ Cache::sendMSHRQueuePacket(MSHR* mshr) return BaseCache::sendMSHRQueuePacket(mshr); } - -Cache* -CacheParams::create() -{ - assert(tags); - assert(replacement_policy); - - return new Cache(this); -}