mem-cache: virtual address support for prefetchers
[gem5.git] / src / mem / cache / prefetch / queued.cc
1 /*
2 * Copyright (c) 2014-2015 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Mitch Hayenga
38 */
39
40 #include "mem/cache/prefetch/queued.hh"
41
42 #include <cassert>
43
44 #include "base/logging.hh"
45 #include "base/trace.hh"
46 #include "debug/HWPrefetch.hh"
47 #include "mem/request.hh"
48 #include "params/QueuedPrefetcher.hh"
49
50 QueuedPrefetcher::QueuedPrefetcher(const QueuedPrefetcherParams *p)
51 : BasePrefetcher(p), queueSize(p->queue_size), latency(p->latency),
52 queueSquash(p->queue_squash), queueFilter(p->queue_filter),
53 cacheSnoop(p->cache_snoop), tagPrefetch(p->tag_prefetch)
54 {
55
56 }
57
58 QueuedPrefetcher::~QueuedPrefetcher()
59 {
60 // Delete the queued prefetch packets
61 for (DeferredPacket &p : pfq) {
62 delete p.pkt;
63 }
64 }
65
66 void
67 QueuedPrefetcher::notify(const PacketPtr &pkt, const PrefetchInfo &pfi)
68 {
69 Addr blk_addr = blockAddress(pfi.getAddr());
70 bool is_secure = pfi.isSecure();
71
72 // Squash queued prefetches if demand miss to same line
73 if (queueSquash) {
74 auto itr = pfq.begin();
75 while (itr != pfq.end()) {
76 if (itr->pfInfo.getAddr() == blk_addr &&
77 itr->pfInfo.isSecure() == is_secure) {
78 delete itr->pkt;
79 itr = pfq.erase(itr);
80 } else {
81 ++itr;
82 }
83 }
84 }
85
86 // Calculate prefetches given this access
87 std::vector<AddrPriority> addresses;
88 calculatePrefetch(pfi, addresses);
89
90 // Queue up generated prefetches
91 for (AddrPriority& addr_prio : addresses) {
92
93 // Block align prefetch address
94 addr_prio.first = blockAddress(addr_prio.first);
95
96 PrefetchInfo new_pfi(pfi,addr_prio.first);
97
98 pfIdentified++;
99 DPRINTF(HWPrefetch, "Found a pf candidate addr: %#x, "
100 "inserting into prefetch queue.\n", new_pfi.getAddr());
101
102 // Create and insert the request
103 insert(pkt, new_pfi, addr_prio.second);
104 }
105 }
106
107 PacketPtr
108 QueuedPrefetcher::getPacket()
109 {
110 DPRINTF(HWPrefetch, "Requesting a prefetch to issue.\n");
111
112 if (pfq.empty()) {
113 DPRINTF(HWPrefetch, "No hardware prefetches available.\n");
114 return nullptr;
115 }
116
117 PacketPtr pkt = pfq.front().pkt;
118 pfq.pop_front();
119
120 pfIssued++;
121 assert(pkt != nullptr);
122 DPRINTF(HWPrefetch, "Generating prefetch for %#x.\n", pkt->getAddr());
123 return pkt;
124 }
125
126 QueuedPrefetcher::const_iterator
127 QueuedPrefetcher::inPrefetch(const PrefetchInfo &pfi) const
128 {
129 for (const_iterator dp = pfq.begin(); dp != pfq.end(); dp++) {
130 if (dp->pfInfo.sameAddr(pfi)) return dp;
131 }
132
133 return pfq.end();
134 }
135
136 QueuedPrefetcher::iterator
137 QueuedPrefetcher::inPrefetch(const PrefetchInfo &pfi)
138 {
139 for (iterator dp = pfq.begin(); dp != pfq.end(); dp++) {
140 if (dp->pfInfo.sameAddr(pfi)) return dp;
141 }
142
143 return pfq.end();
144 }
145
146 void
147 QueuedPrefetcher::regStats()
148 {
149 BasePrefetcher::regStats();
150
151 pfIdentified
152 .name(name() + ".pfIdentified")
153 .desc("number of prefetch candidates identified");
154
155 pfBufferHit
156 .name(name() + ".pfBufferHit")
157 .desc("number of redundant prefetches already in prefetch queue");
158
159 pfInCache
160 .name(name() + ".pfInCache")
161 .desc("number of redundant prefetches already in cache/mshr dropped");
162
163 pfRemovedFull
164 .name(name() + ".pfRemovedFull")
165 .desc("number of prefetches dropped due to prefetch queue size");
166
167 pfSpanPage
168 .name(name() + ".pfSpanPage")
169 .desc("number of prefetches not generated due to page crossing");
170 }
171
172 void
173 QueuedPrefetcher::insert(const PacketPtr &pkt, PrefetchInfo &new_pfi,
174 int32_t priority)
175 {
176 if (queueFilter) {
177 iterator it = inPrefetch(new_pfi);
178 /* If the address is already in the queue, update priority and leave */
179 if (it != pfq.end()) {
180 pfBufferHit++;
181 if (it->priority < priority) {
182 /* Update priority value and position in the queue */
183 it->priority = priority;
184 iterator prev = it;
185 bool cont = true;
186 while (cont && prev != pfq.begin()) {
187 prev--;
188 /* If the packet has higher priority, swap */
189 if (*it > *prev) {
190 std::swap(*it, *prev);
191 it = prev;
192 }
193 }
194 DPRINTF(HWPrefetch, "Prefetch addr already in "
195 "prefetch queue, priority updated\n");
196 } else {
197 DPRINTF(HWPrefetch, "Prefetch addr already in "
198 "prefetch queue\n");
199 }
200 return;
201 }
202 }
203
204 Addr target_addr = new_pfi.getAddr();
205 if (useVirtualAddresses) {
206 assert(pkt->req->hasPaddr());
207 //if we trained with virtual addresses, compute the phsysical address
208 if (new_pfi.getAddr() >= pkt->req->getVaddr()) {
209 //positive stride
210 target_addr = pkt->req->getPaddr() +
211 (new_pfi.getAddr() - pkt->req->getVaddr());
212 } else {
213 //negative stride
214 target_addr = pkt->req->getPaddr() -
215 (pkt->req->getVaddr() - new_pfi.getAddr());
216 }
217 }
218
219 if (cacheSnoop && (inCache(target_addr, new_pfi.isSecure()) ||
220 inMissQueue(target_addr, new_pfi.isSecure()))) {
221 pfInCache++;
222 DPRINTF(HWPrefetch, "Dropping redundant in "
223 "cache/MSHR prefetch addr:%#x\n", target_addr);
224 return;
225 }
226
227 /* Create a prefetch memory request */
228 RequestPtr pf_req =
229 std::make_shared<Request>(target_addr, blkSize, 0, masterId);
230
231 if (new_pfi.isSecure()) {
232 pf_req->setFlags(Request::SECURE);
233 }
234 pf_req->taskId(ContextSwitchTaskId::Prefetcher);
235 PacketPtr pf_pkt = new Packet(pf_req, MemCmd::HardPFReq);
236 pf_pkt->allocate();
237 if (tagPrefetch && new_pfi.hasPC()) {
238 // Tag prefetch packet with accessing pc
239 pf_pkt->req->setPC(new_pfi.getPC());
240 }
241
242 /* Verify prefetch buffer space for request */
243 if (pfq.size() == queueSize) {
244 pfRemovedFull++;
245 /* Lowest priority packet */
246 iterator it = pfq.end();
247 panic_if (it == pfq.begin(), "Prefetch queue is both full and empty!");
248 --it;
249 /* Look for oldest in that level of priority */
250 panic_if (it == pfq.begin(), "Prefetch queue is full with 1 element!");
251 iterator prev = it;
252 bool cont = true;
253 /* While not at the head of the queue */
254 while (cont && prev != pfq.begin()) {
255 prev--;
256 /* While at the same level of priority */
257 cont = prev->priority == it->priority;
258 if (cont)
259 /* update pointer */
260 it = prev;
261 }
262 DPRINTF(HWPrefetch, "Prefetch queue full, removing lowest priority "
263 "oldest packet, addr: %#x", it->pfInfo.getAddr());
264 delete it->pkt;
265 pfq.erase(it);
266 }
267
268 Tick pf_time = curTick() + clockPeriod() * latency;
269 DPRINTF(HWPrefetch, "Prefetch queued. "
270 "addr:%#x priority: %3d tick:%lld.\n",
271 target_addr, priority, pf_time);
272
273 /* Create the packet and find the spot to insert it */
274 DeferredPacket dpp(new_pfi, pf_time, pf_pkt, priority);
275 if (pfq.size() == 0) {
276 pfq.emplace_back(dpp);
277 } else {
278 iterator it = pfq.end();
279 do {
280 --it;
281 } while (it != pfq.begin() && dpp > *it);
282 /* If we reach the head, we have to see if the new element is new head
283 * or not */
284 if (it == pfq.begin() && dpp <= *it)
285 it++;
286 pfq.insert(it, dpp);
287 }
288 }