size = '1MB'
assoc = 16
write_buffers = 8
- # Simple stride prefetcher
- prefetch_policy = 'stride'
prefetch_on_access = 'true'
- prefetch_latency = '1.0ns'
- prefetch_degree = 8
-
+ # Simple stride prefetcher
+ prefetcher = StridePrefetcher(degree=8, latency='1.0ns')
from m5.params import *
from m5.proxy import Self
from MemObject import MemObject
+from Prefetcher import BasePrefetcher
-class Prefetch(Enum): vals = ['none', 'tagged', 'stride', 'ghb']
class BaseCache(MemObject):
type = 'BaseCache'
write_buffers = Param.Int(8, "number of write buffers")
prefetch_on_access = Param.Bool(False,
"notify the hardware prefetcher on every access (not just misses)")
- prefetcher_size = Param.Int(100,
- "Number of entries in the hardware prefetch queue")
- prefetch_past_page = Param.Bool(False,
- "Allow prefetches to cross virtual page boundaries")
- prefetch_serial_squash = Param.Bool(False,
- "Squash prefetches with a later time on a subsequent miss")
- prefetch_degree = Param.Int(1,
- "Degree of the prefetch depth")
- prefetch_latency = Param.Latency(10 * Self.latency,
- "Latency of the prefetcher")
- prefetch_policy = Param.Prefetch('none',
- "Type of prefetcher to use")
- prefetch_use_cpu_id = Param.Bool(True,
- "Use the CPU ID to separate calculations of prefetches")
- prefetch_data_accesses_only = Param.Bool(False,
- "Only prefetch on data not on instruction accesses")
+ prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache")
cpu_side = Port("Port on side closer to CPU")
mem_side = Port("Port on side closer to MEM")
addr_range = Param.AddrRange(AllMemory, "The address range for the CPU-side port")
#include <vector>
#include "config/the_isa.hh"
-#include "enums/Prefetch.hh"
#include "mem/cache/base.hh"
#include "mem/cache/cache.hh"
#include "mem/config/cache.hh"
#include "mem/cache/tags/iic.hh"
#endif
-//Prefetcher Headers
-#include "mem/cache/prefetch/ghb.hh"
-#include "mem/cache/prefetch/stride.hh"
-#include "mem/cache/prefetch/tagged.hh"
using namespace std;
#define BUILD_CACHE(TAGS, tags) \
do { \
- BasePrefetcher *pf; \
- if (prefetch_policy == Enums::tagged) { \
- pf = new TaggedPrefetcher(this); \
- } \
- else if (prefetch_policy == Enums::stride) { \
- pf = new StridePrefetcher(this); \
- } \
- else if (prefetch_policy == Enums::ghb) { \
- pf = new GHBPrefetcher(this); \
- } \
- else { \
- pf = NULL; \
- } \
Cache<TAGS> *retval = \
- new Cache<TAGS>(this, tags, pf); \
+ new Cache<TAGS>(this, tags); \
return retval; \
} while (0)
public:
/** Instantiates a basic cache object. */
- Cache(const Params *p, TagStore *tags, BasePrefetcher *prefetcher);
+ Cache(const Params *p, TagStore *tags);
virtual Port *getPort(const std::string &if_name, int idx = -1);
#include "sim/sim_exit.hh"
template<class TagStore>
-Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
+Cache<TagStore>::Cache(const Params *p, TagStore *tags)
: BaseCache(p),
tags(tags),
- prefetcher(pf),
+ prefetcher(p->prefetcher),
doFastWrites(true),
prefetchOnAccess(p->prefetch_on_access)
{
{
BaseCache::regStats();
tags->regStats(name());
- if (prefetcher)
- prefetcher->regStats(name());
}
template<class TagStore>
--- /dev/null
+from m5.SimObject import SimObject
+from m5.params import *
+class BasePrefetcher(SimObject):
+ type = 'BasePrefetcher'
+ abstract = True
+ size = Param.Int(100,
+ "Number of entries in the hardware prefetch queue")
+ cross_pages = Param.Bool(False,
+ "Allow prefetches to cross virtual page boundaries")
+ serial_squash = Param.Bool(False,
+ "Squash prefetches with a later time on a subsequent miss")
+ degree = Param.Int(1,
+ "Degree of the prefetch depth")
+ latency = Param.Latency('10t',
+ "Latency of the prefetcher")
+ use_cpu_id = Param.Bool(True,
+ "Use the CPU ID to separate calculations of prefetches")
+ data_accesses_only = Param.Bool(False,
+ "Only prefetch on data not on instruction accesses")
+
+class GHBPrefetcher(BasePrefetcher):
+ type = 'GHBPrefetcher'
+ cxx_class = 'GHBPrefetcher'
+
+class StridePrefetcher(BasePrefetcher):
+ type = 'StridePrefetcher'
+ cxx_class = 'StridePrefetcher'
+
+class TaggedPrefetcher(BasePrefetcher):
+ type = 'TaggedPrefetcher'
+ cxx_class = 'TaggedPrefetcher'
+
+
+
+
if env['TARGET_ISA'] == 'no':
Return()
+SimObject('Prefetcher.py')
Source('base.cc')
Source('ghb.cc')
#include "mem/cache/base.hh"
#include "mem/request.hh"
-BasePrefetcher::BasePrefetcher(const BaseCacheParams *p)
- : size(p->prefetcher_size), pageStop(!p->prefetch_past_page),
- serialSquash(p->prefetch_serial_squash),
- onlyData(p->prefetch_data_accesses_only)
+BasePrefetcher::BasePrefetcher(const Params *p)
+ : SimObject(p), size(p->size), latency(p->latency), degree(p->degree),
+ useContextId(p->use_cpu_id), pageStop(!p->cross_pages),
+ serialSquash(p->serial_squash), onlyData(p->data_accesses_only)
{
}
{
cache = _cache;
blkSize = cache->getBlockSize();
- _name = cache->name() + "-pf";
}
void
-BasePrefetcher::regStats(const std::string &name)
+BasePrefetcher::regStats()
{
pfIdentified
- .name(name + ".prefetcher.num_hwpf_identified")
+ .name(name() + ".prefetcher.num_hwpf_identified")
.desc("number of hwpf identified")
;
pfMSHRHit
- .name(name + ".prefetcher.num_hwpf_already_in_mshr")
+ .name(name() + ".prefetcher.num_hwpf_already_in_mshr")
.desc("number of hwpf that were already in mshr")
;
pfCacheHit
- .name(name + ".prefetcher.num_hwpf_already_in_cache")
+ .name(name() + ".prefetcher.num_hwpf_already_in_cache")
.desc("number of hwpf that were already in the cache")
;
pfBufferHit
- .name(name + ".prefetcher.num_hwpf_already_in_prefetcher")
+ .name(name() + ".prefetcher.num_hwpf_already_in_prefetcher")
.desc("number of hwpf that were already in the prefetch queue")
;
pfRemovedFull
- .name(name + ".prefetcher.num_hwpf_evicted")
+ .name(name() + ".prefetcher.num_hwpf_evicted")
.desc("number of hwpf removed due to no buffer left")
;
pfRemovedMSHR
- .name(name + ".prefetcher.num_hwpf_removed_MSHR_hit")
+ .name(name() + ".prefetcher.num_hwpf_removed_MSHR_hit")
.desc("number of hwpf removed because MSHR allocated")
;
pfIssued
- .name(name + ".prefetcher.num_hwpf_issued")
+ .name(name() + ".prefetcher.num_hwpf_issued")
.desc("number of hwpf issued")
;
pfSpanPage
- .name(name + ".prefetcher.num_hwpf_span_page")
+ .name(name() + ".prefetcher.num_hwpf_span_page")
.desc("number of hwpf spanning a virtual page")
;
pfSquashed
- .name(name + ".prefetcher.num_hwpf_squashed_from_miss")
+ .name(name() + ".prefetcher.num_hwpf_squashed_from_miss")
.desc("number of hwpf that got squashed due to a miss "
"aborting calculation time")
;
{
return roundDown(a, TheISA::VMPageSize) == roundDown(b, TheISA::VMPageSize);
}
+
+
#include "base/statistics.hh"
#include "mem/packet.hh"
#include "params/BaseCache.hh"
+#include "sim/sim_object.hh"
class BaseCache;
-class BasePrefetcher
+class BasePrefetcher : public SimObject
{
protected:
/** The block size of the parent cache. */
int blkSize;
+ /** The latency before a prefetch is issued */
+ Tick latency;
+
+ /** The number of prefetches to issue */
+ unsigned degree;
+
+ /** If patterns should be found per context id */
+ bool useContextId;
/** Do we prefetch across page boundaries. */
bool pageStop;
/** Do we prefetch on only data reads, or on inst reads as well. */
bool onlyData;
- std::string _name;
-
public:
Stats::Scalar pfIdentified;
Stats::Scalar pfSpanPage;
Stats::Scalar pfSquashed;
- void regStats(const std::string &name);
+ void regStats();
public:
-
- BasePrefetcher(const BaseCacheParams *p);
+ typedef BasePrefetcherParams Params;
+ BasePrefetcher(const Params *p);
virtual ~BasePrefetcher() {}
- const std::string name() const { return _name; }
-
void setCache(BaseCache *_cache);
/**
* Utility function: are addresses a and b on the same VM page?
*/
bool samePage(Addr a, Addr b);
-};
-
+ public:
+ const Params*
+ params() const
+ {
+ return dynamic_cast<const Params *>(_params);
+ }
+};
#endif //__MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
}
}
}
+
+
+GHBPrefetcher*
+GHBPrefetcherParams::create()
+{
+ return new GHBPrefetcher(this);
+}
#define __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
#include "mem/cache/prefetch/base.hh"
+#include "params/GHBPrefetcher.hh"
class GHBPrefetcher : public BasePrefetcher
{
Addr secondLastMissAddr[Max_Contexts];
Addr lastMissAddr[Max_Contexts];
- Tick latency;
- int degree;
- bool useContextId;
-
public:
-
- GHBPrefetcher(const BaseCacheParams *p)
- : BasePrefetcher(p), latency(p->prefetch_latency),
- degree(p->prefetch_degree), useContextId(p->prefetch_use_cpu_id)
+ GHBPrefetcher(const Params *p)
+ : BasePrefetcher(p)
{
}
tab.push_back(new_entry);
}
}
+
+
+StridePrefetcher*
+StridePrefetcherParams::create()
+{
+ return new StridePrefetcher(this);
+}
#include <climits>
#include "mem/cache/prefetch/base.hh"
+#include "params/StridePrefetcher.hh"
class StridePrefetcher : public BasePrefetcher
{
Addr *lastMissAddr[Max_Contexts];
std::list<StrideEntry*> table[Max_Contexts];
- Tick latency;
- int degree;
- bool useContextId;
public:
- StridePrefetcher(const BaseCacheParams *p)
- : BasePrefetcher(p), latency(p->prefetch_latency),
- degree(p->prefetch_degree), useContextId(p->prefetch_use_cpu_id)
+ StridePrefetcher(const Params *p)
+ : BasePrefetcher(p)
{
}
#include "mem/cache/prefetch/tagged.hh"
-TaggedPrefetcher::TaggedPrefetcher(const BaseCacheParams *p)
- : BasePrefetcher(p),
- latency(p->prefetch_latency), degree(p->prefetch_degree)
+TaggedPrefetcher::TaggedPrefetcher(const Params *p)
+ : BasePrefetcher(p)
{
}
}
+TaggedPrefetcher*
+TaggedPrefetcherParams::create()
+{
+ return new TaggedPrefetcher(this);
+}
#define __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
#include "mem/cache/prefetch/base.hh"
+#include "params/TaggedPrefetcher.hh"
+
class TaggedPrefetcher : public BasePrefetcher
{
- protected:
-
- Tick latency;
- int degree;
-
public:
- TaggedPrefetcher(const BaseCacheParams *p);
+ TaggedPrefetcher(const Params *p);
~TaggedPrefetcher() {}