tag_prefetch = Param.Bool(True, "Tag prefetch with PC of generating access")
+ # The throttle_control_percentage controls how many of the candidate
+ # addresses generated by the prefetcher will be finally turned into
+ # prefetch requests
+ # - If set to 100, all candidates can be discarded (one request
+ # will always be allowed to be generated)
+ # - Setting it to 0 will disable the throttle control, so requests are
+ # created for all candidates
+ # - If set to 60, 40% of candidates will generate a request, and the
+ # remaining 60% will be generated depending on the current accuracy
+ throttle_control_percentage = Param.Percent(0, "Percentage of requests \
+ that can be throttled depending on the accuracy of the prefetcher.")
+
class StridePrefetcher(QueuedPrefetcher):
type = 'StridePrefetcher'
cxx_class = 'StridePrefetcher'
p->max_prefetch_requests_with_pending_translation),
latency(p->latency), queueSquash(p->queue_squash),
queueFilter(p->queue_filter), cacheSnoop(p->cache_snoop),
- tagPrefetch(p->tag_prefetch)
+ tagPrefetch(p->tag_prefetch),
+ throttleControlPct(p->throttle_control_percentage)
{
}
}
}
+size_t
+QueuedPrefetcher::getMaxPermittedPrefetches(size_t total) const
+{
+ /**
+ * Throttle generated prefetches based in the accuracy of the prefetcher.
+ * Accuracy is computed based in the ratio of useful prefetches with
+ * respect to the number of issued prefetches.
+ *
+ * The throttleControlPct controls how many of the candidate addresses
+ * generated by the prefetcher will be finally turned into prefetch
+ * requests
+ * - If set to 100, all candidates can be discarded (one request
+ * will always be allowed to be generated)
+ * - Setting it to 0 will disable the throttle control, so requests are
+ * created for all candidates
+ * - If set to 60, 40% of candidates will generate a request, and the
+ * remaining 60% will be generated depending on the current accuracy
+ */
+
+ size_t max_pfs = total;
+ if (total > 0 && issuedPrefetches > 0) {
+ size_t throttle_pfs = (total * throttleControlPct) / 100;
+ size_t min_pfs = (total - throttle_pfs) == 0 ?
+ 1 : (total - throttle_pfs);
+ max_pfs = min_pfs + (total - min_pfs) *
+ usefulPrefetches / issuedPrefetches;
+ }
+ return max_pfs;
+}
+
void
QueuedPrefetcher::notify(const PacketPtr &pkt, const PrefetchInfo &pfi)
{
std::vector<AddrPriority> addresses;
calculatePrefetch(pfi, addresses);
+ // Get the maximu number of prefetches that we are allowed to generate
+ size_t max_pfs = getMaxPermittedPrefetches(addresses.size());
+
// Queue up generated prefetches
+ size_t num_pfs = 0;
for (AddrPriority& addr_prio : addresses) {
// Block align prefetch address
"inserting into prefetch queue.\n", new_pfi.getAddr());
// Create and insert the request
insert(pkt, new_pfi, addr_prio.second);
+ num_pfs += 1;
+ if (num_pfs == max_pfs) {
+ break;
+ }
} else {
DPRINTF(HWPrefetch, "Ignoring page crossing prefetch.\n");
}
/** Tag prefetch with PC of generating access? */
const bool tagPrefetch;
+ /** Percentage of requests that can be throttled */
+ const unsigned int throttleControlPct;
+
// STATS
Stats::Scalar pfIdentified;
Stats::Scalar pfBufferHit;
bool alreadyInQueue(std::list<DeferredPacket> &queue,
const PrefetchInfo &pfi, int32_t priority);
+ /**
+ * Returns the maxmimum number of prefetch requests that are allowed
+ * to be created from the number of prefetch candidates provided.
+ * The behavior of this service is controlled with the throttleControlPct
+ * parameter.
+ * @param total number of prefetch candidates generated by the prefetcher
+ * @return the number of these request candidates are allowed to be created
+ */
+ size_t getMaxPermittedPrefetches(size_t total) const;
+
RequestPtr createPrefetchRequest(Addr addr, PrefetchInfo const &pfi,
PacketPtr pkt);
};