onWrite(p->on_write), onData(p->on_data), onInst(p->on_inst),
masterId(p->sys->getMasterId(this)), pageBytes(p->sys->getPageBytes()),
prefetchOnAccess(p->prefetch_on_access),
- useVirtualAddresses(p->use_virtual_addresses), issuedPrefetches(0),
+ useVirtualAddresses(p->use_virtual_addresses),
+ prefetchStats(this), issuedPrefetches(0),
usefulPrefetches(0), tlb(nullptr)
{
}
blkSize = cache->getBlockSize();
lBlkSize = floorLog2(blkSize);
}
-
-void
-Base::regStats()
+Base::StatGroup::StatGroup(Stats::Group *parent)
+ : Stats::Group(parent),
+ ADD_STAT(pfIssued, "number of hwpf issued")
{
- ClockedObject::regStats();
-
- pfIssued
- .name(name() + ".num_hwpf_issued")
- .desc("number of hwpf issued")
- ;
-
}
+
bool
Base::observeAccess(const PacketPtr &pkt, bool miss) const
{
#include <cstdint>
#include "arch/generic/tlb.hh"
+#include "arch/isa_traits.hh"
#include "base/statistics.hh"
#include "base/types.hh"
#include "mem/packet.hh"
Addr pageOffset(Addr a) const;
/** Build the address of the i-th block inside the page */
Addr pageIthBlockAddress(Addr page, uint32_t i) const;
-
- Stats::Scalar pfIssued;
+ struct StatGroup : public Stats::Group
+ {
+ StatGroup(Stats::Group *parent);
+ Stats::Scalar pfIssued;
+ } prefetchStats;
/** Total prefetches issued */
uint64_t issuedPrefetches;
virtual Tick nextPrefetchReadyTime() const = 0;
- /**
- * Register local statistics.
- */
- void regStats() override;
/**
* Register probe points for this object.
latency(p->latency), queueSquash(p->queue_squash),
queueFilter(p->queue_filter), cacheSnoop(p->cache_snoop),
tagPrefetch(p->tag_prefetch),
- throttleControlPct(p->throttle_control_percentage)
+ throttleControlPct(p->throttle_control_percentage), statsQueued(this)
{
}
addr_prio.first = blockAddress(addr_prio.first);
if (!samePage(addr_prio.first, pfi.getAddr())) {
- pfSpanPage += 1;
+ statsQueued.pfSpanPage += 1;
}
bool can_cross_page = (tlb != nullptr);
if (can_cross_page || samePage(addr_prio.first, pfi.getAddr())) {
PrefetchInfo new_pfi(pfi,addr_prio.first);
- pfIdentified++;
+ statsQueued.pfIdentified++;
DPRINTF(HWPrefetch, "Found a pf candidate addr: %#x, "
"inserting into prefetch queue.\n", new_pfi.getAddr());
// Create and insert the request
PacketPtr pkt = pfq.front().pkt;
pfq.pop_front();
- pfIssued++;
+ prefetchStats.pfIssued++;
issuedPrefetches += 1;
assert(pkt != nullptr);
DPRINTF(HWPrefetch, "Generating prefetch for %#x.\n", pkt->getAddr());
processMissingTranslations(queueSize - pfq.size());
return pkt;
}
-
-void
-Queued::regStats()
+Queued::QueuedStats::QueuedStats(Stats::Group *parent)
+ : Stats::Group(parent),
+ ADD_STAT(pfIdentified, "number of prefetch candidates identified"),
+ ADD_STAT(pfBufferHit,
+ "number of redundant prefetches already in prefetch queue"),
+ ADD_STAT(pfInCache,
+ "number of redundant prefetches already in cache/mshr dropped"),
+ ADD_STAT(pfRemovedFull,
+ "number of prefetches dropped due to prefetch queue size"),
+ ADD_STAT(pfSpanPage, "number of prefetches that crossed the page")
{
- Base::regStats();
-
- pfIdentified
- .name(name() + ".pfIdentified")
- .desc("number of prefetch candidates identified");
-
- pfBufferHit
- .name(name() + ".pfBufferHit")
- .desc("number of redundant prefetches already in prefetch queue");
-
- pfInCache
- .name(name() + ".pfInCache")
- .desc("number of redundant prefetches already in cache/mshr dropped");
-
- pfRemovedFull
- .name(name() + ".pfRemovedFull")
- .desc("number of prefetches dropped due to prefetch queue size");
-
- pfSpanPage
- .name(name() + ".pfSpanPage")
- .desc("number of prefetches that crossed the page");
}
// check if this prefetch is already redundant
if (cacheSnoop && (inCache(target_paddr, it->pfInfo.isSecure()) ||
inMissQueue(target_paddr, it->pfInfo.isSecure()))) {
- pfInCache++;
+ statsQueued.pfInCache++;
DPRINTF(HWPrefetch, "Dropping redundant in "
"cache/MSHR prefetch addr:%#x\n", target_paddr);
} else {
/* If the address is already in the queue, update priority and leave */
if (it != queue.end()) {
- pfBufferHit++;
+ statsQueued.pfBufferHit++;
if (it->priority < priority) {
/* Update priority value and position in the queue */
it->priority = priority;
if (has_target_pa && cacheSnoop &&
(inCache(target_paddr, new_pfi.isSecure()) ||
inMissQueue(target_paddr, new_pfi.isSecure()))) {
- pfInCache++;
+ statsQueued.pfInCache++;
DPRINTF(HWPrefetch, "Dropping redundant in "
"cache/MSHR prefetch addr:%#x\n", target_paddr);
return;
{
/* Verify prefetch buffer space for request */
if (queue.size() == queueSize) {
- pfRemovedFull++;
+ statsQueued.pfRemovedFull++;
/* Lowest priority packet */
iterator it = queue.end();
panic_if (it == queue.begin(),
/** Percentage of requests that can be throttled */
const unsigned int throttleControlPct;
- // STATS
- Stats::Scalar pfIdentified;
- Stats::Scalar pfBufferHit;
- Stats::Scalar pfInCache;
- Stats::Scalar pfRemovedFull;
- Stats::Scalar pfSpanPage;
-
+ struct QueuedStats : public Stats::Group
+ {
+ QueuedStats(Stats::Group *parent);
+ // STATS
+ Stats::Scalar pfIdentified;
+ Stats::Scalar pfBufferHit;
+ Stats::Scalar pfInCache;
+ Stats::Scalar pfRemovedFull;
+ Stats::Scalar pfSpanPage;
+ } statsQueued;
public:
using AddrPriority = std::pair<Addr, int32_t>;
return pfq.empty() ? MaxTick : pfq.front().tick;
}
- void regStats() override;
-
private:
/**