From: Mitch Hayenga Date: Tue, 23 Dec 2014 14:31:18 +0000 (-0500) Subject: mem: Add parameter to reserve MSHR entries for demand access X-Git-Tag: stable_2015_04_15~45 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=6cb58b2bd2ffd19a667e3b9473ff4a0ccfd14c81;p=gem5.git mem: Add parameter to reserve MSHR entries for demand access Adds a new parameter that reserves some number of MSHR entries for demand accesses. This helps prevent prefetchers from taking all MSHRs, forcing demand requests from the CPU to stall. --- diff --git a/src/mem/cache/BaseCache.py b/src/mem/cache/BaseCache.py index 9ffe39981..035decf9a 100644 --- a/src/mem/cache/BaseCache.py +++ b/src/mem/cache/BaseCache.py @@ -54,6 +54,7 @@ class BaseCache(MemObject): max_miss_count = Param.Counter(0, "number of misses to handle before calling exit") mshrs = Param.Int("number of MSHRs (max outstanding requests)") + demand_mshr_reserve = Param.Int(1, "mshrs to reserve for demand access") size = Param.MemorySize("capacity in bytes") forward_snoops = Param.Bool(True, "forward snoops from mem side to cpu side") diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc index faa000c09..d89517b9c 100644 --- a/src/mem/cache/base.cc +++ b/src/mem/cache/base.cc @@ -68,8 +68,8 @@ BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, BaseCache::BaseCache(const Params *p) : MemObject(p), cpuSidePort(nullptr), memSidePort(nullptr), - mshrQueue("MSHRs", p->mshrs, 4, MSHRQueue_MSHRs), - writeBuffer("write buffer", p->write_buffers, p->mshrs+1000, + mshrQueue("MSHRs", p->mshrs, 4, p->demand_mshr_reserve, MSHRQueue_MSHRs), + writeBuffer("write buffer", p->write_buffers, p->mshrs+1000, 0, MSHRQueue_WriteBuffer), blkSize(p->system->cacheLineSize()), hitLatency(p->hit_latency), diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index f9eacb897..da04cf6f9 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -1841,7 +1841,7 @@ Cache::getNextMSHR() // fall through... no pending requests. Try a prefetch. assert(!miss_mshr && !write_mshr); - if (prefetcher && !mshrQueue.isFull()) { + if (prefetcher && mshrQueue.canPrefetch()) { // If we have a miss queue slot, we can try a prefetch PacketPtr pkt = prefetcher->getPacket(); if (pkt) { diff --git a/src/mem/cache/mshr_queue.cc b/src/mem/cache/mshr_queue.cc index 9146cddf7..cdd6da52c 100644 --- a/src/mem/cache/mshr_queue.cc +++ b/src/mem/cache/mshr_queue.cc @@ -52,10 +52,12 @@ using namespace std; MSHRQueue::MSHRQueue(const std::string &_label, - int num_entries, int reserve, int _index) + int num_entries, int reserve, int demand_reserve, + int _index) : label(_label), numEntries(num_entries + reserve - 1), - numReserve(reserve), registers(numEntries), - drainManager(NULL), allocated(0), inServiceEntries(0), index(_index) + numReserve(reserve), demandReserve(demand_reserve), + registers(numEntries), drainManager(NULL), allocated(0), + inServiceEntries(0), index(_index) { for (int i = 0; i < numEntries; ++i) { registers[i].queue = this; diff --git a/src/mem/cache/mshr_queue.hh b/src/mem/cache/mshr_queue.hh index 7ab3c7e74..7050421fe 100644 --- a/src/mem/cache/mshr_queue.hh +++ b/src/mem/cache/mshr_queue.hh @@ -77,6 +77,12 @@ class MSHRQueue : public Drainable */ const int numReserve; + /** + * The number of entries to reserve for future demand accesses. + * Prevent prefetcher from taking all mshr entries + */ + const int demandReserve; + /** MSHR storage. */ std::vector registers; /** Holds pointers to all allocated entries. */ @@ -106,9 +112,11 @@ class MSHRQueue : public Drainable * @param num_entrys The number of entries in this queue. * @param reserve The minimum number of entries needed to satisfy * any access. + * @param demand_reserve The minimum number of entries needed to satisfy + * demand accesses. */ MSHRQueue(const std::string &_label, int num_entries, int reserve, - int index); + int demand_reserve, int index); /** * Find the first MSHR that matches the provided address. @@ -217,6 +225,15 @@ class MSHRQueue : public Drainable return (allocated > numEntries - numReserve); } + /** + * Returns true if sufficient mshrs for prefetch. + * @return True if sufficient mshrs for prefetch. + */ + bool canPrefetch() const + { + return (allocated < numEntries - (numReserve + demandReserve)); + } + /** * Returns the MSHR at the head of the readyList. * @return The next request to service.