Make memory commands dense again to avoid cache stat table explosion.
[gem5.git] / src / mem / cache / prefetch / base_prefetcher.cc
1 /*
2 * Copyright (c) 2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 */
30
31 /**
32 * @file
33 * Hardware Prefetcher Definition.
34 */
35
36 #include "base/trace.hh"
37 #include "mem/cache/base_cache.hh"
38 #include "mem/cache/prefetch/base_prefetcher.hh"
39 #include "mem/request.hh"
40 #include <list>
41
42 BasePrefetcher::BasePrefetcher(int size, bool pageStop, bool serialSquash,
43 bool cacheCheckPush, bool onlyData)
44 :size(size), pageStop(pageStop), serialSquash(serialSquash),
45 cacheCheckPush(cacheCheckPush), only_data(onlyData)
46 {
47 }
48
49 void
50 BasePrefetcher::setCache(BaseCache *_cache)
51 {
52 cache = _cache;
53 blkSize = cache->getBlockSize();
54 }
55
56 void
57 BasePrefetcher::regStats(const std::string &name)
58 {
59 pfIdentified
60 .name(name + ".prefetcher.num_hwpf_identified")
61 .desc("number of hwpf identified")
62 ;
63
64 pfMSHRHit
65 .name(name + ".prefetcher.num_hwpf_already_in_mshr")
66 .desc("number of hwpf that were already in mshr")
67 ;
68
69 pfCacheHit
70 .name(name + ".prefetcher.num_hwpf_already_in_cache")
71 .desc("number of hwpf that were already in the cache")
72 ;
73
74 pfBufferHit
75 .name(name + ".prefetcher.num_hwpf_already_in_prefetcher")
76 .desc("number of hwpf that were already in the prefetch queue")
77 ;
78
79 pfRemovedFull
80 .name(name + ".prefetcher.num_hwpf_evicted")
81 .desc("number of hwpf removed due to no buffer left")
82 ;
83
84 pfRemovedMSHR
85 .name(name + ".prefetcher.num_hwpf_removed_MSHR_hit")
86 .desc("number of hwpf removed because MSHR allocated")
87 ;
88
89 pfIssued
90 .name(name + ".prefetcher.num_hwpf_issued")
91 .desc("number of hwpf issued")
92 ;
93
94 pfSpanPage
95 .name(name + ".prefetcher.num_hwpf_span_page")
96 .desc("number of hwpf spanning a virtual page")
97 ;
98
99 pfSquashed
100 .name(name + ".prefetcher.num_hwpf_squashed_from_miss")
101 .desc("number of hwpf that got squashed due to a miss aborting calculation time")
102 ;
103 }
104
105 inline bool
106 BasePrefetcher::inCache(Addr addr)
107 {
108 if (cache->inCache(addr)) {
109 pfCacheHit++;
110 return true;
111 }
112 return false;
113 }
114
115 inline bool
116 BasePrefetcher::inMissQueue(Addr addr)
117 {
118 if (cache->inMissQueue(addr)) {
119 pfMSHRHit++;
120 return true;
121 }
122 return false;
123 }
124
125 PacketPtr
126 BasePrefetcher::getPacket()
127 {
128 DPRINTF(HWPrefetch, "%s:Requesting a hw_pf to issue\n", cache->name());
129
130 if (pf.empty()) {
131 DPRINTF(HWPrefetch, "%s:No HW_PF found\n", cache->name());
132 return NULL;
133 }
134
135 PacketPtr pkt;
136 bool keepTrying = false;
137 do {
138 pkt = *pf.begin();
139 pf.pop_front();
140 if (!cacheCheckPush) {
141 keepTrying = cache->inCache(pkt->getAddr());
142 }
143 if (pf.empty()) {
144 cache->clearMasterRequest(Request_PF);
145 if (keepTrying) return NULL; //None left, all were in cache
146 }
147 } while (keepTrying);
148
149 pfIssued++;
150 return pkt;
151 }
152
153 void
154 BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
155 {
156 if (!pkt->req->isUncacheable() && !(pkt->req->isInstRead() && only_data))
157 {
158 //Calculate the blk address
159 Addr blkAddr = pkt->getAddr() & ~(Addr)(blkSize-1);
160
161 //Check if miss is in pfq, if so remove it
162 std::list<PacketPtr>::iterator iter = inPrefetch(blkAddr);
163 if (iter != pf.end()) {
164 DPRINTF(HWPrefetch, "%s:Saw a miss to a queued prefetch, removing it\n", cache->name());
165 pfRemovedMSHR++;
166 pf.erase(iter);
167 if (pf.empty())
168 cache->clearMasterRequest(Request_PF);
169 }
170
171 //Remove anything in queue with delay older than time
172 //since everything is inserted in time order, start from end
173 //and work until pf.empty() or time is earlier
174 //This is done to emulate Aborting the previous work on a new miss
175 //Needed for serial calculators like GHB
176 if (serialSquash) {
177 iter = pf.end();
178 iter--;
179 while (!pf.empty() && ((*iter)->time >= time)) {
180 pfSquashed++;
181 pf.pop_back();
182 iter--;
183 }
184 if (pf.empty())
185 cache->clearMasterRequest(Request_PF);
186 }
187
188
189 std::list<Addr> addresses;
190 std::list<Tick> delays;
191 calculatePrefetch(pkt, addresses, delays);
192
193 std::list<Addr>::iterator addr = addresses.begin();
194 std::list<Tick>::iterator delay = delays.begin();
195 while (addr != addresses.end())
196 {
197 DPRINTF(HWPrefetch, "%s:Found a pf canidate, inserting into prefetch queue\n", cache->name());
198 //temp calc this here...
199 pfIdentified++;
200 //create a prefetch memreq
201 Request * prefetchReq = new Request(*addr, blkSize, 0);
202 PacketPtr prefetch;
203 prefetch = new Packet(prefetchReq, MemCmd::HardPFReq, -1);
204 prefetch->allocate();
205 prefetch->req->setThreadContext(pkt->req->getCpuNum(),
206 pkt->req->getThreadNum());
207
208 prefetch->time = time + (*delay); //@todo ADD LATENCY HERE
209 //... initialize
210
211 //Check if it is already in the cache
212 if (cacheCheckPush) {
213 if (cache->inCache(prefetch->getAddr())) {
214 addr++;
215 delay++;
216 continue;
217 }
218 }
219
220 //Check if it is already in the miss_queue
221 if (cache->inMissQueue(prefetch->getAddr())) {
222 addr++;
223 delay++;
224 continue;
225 }
226
227 //Check if it is already in the pf buffer
228 if (inPrefetch(prefetch->getAddr()) != pf.end()) {
229 pfBufferHit++;
230 addr++;
231 delay++;
232 continue;
233 }
234
235 //We just remove the head if we are full
236 if (pf.size() == size)
237 {
238 DPRINTF(HWPrefetch, "%s:Inserting into prefetch queue, it was full removing oldest\n", cache->name());
239 pfRemovedFull++;
240 pf.pop_front();
241 }
242
243 pf.push_back(prefetch);
244 prefetch->flags |= CACHE_LINE_FILL;
245
246 //Make sure to request the bus, with proper delay
247 cache->setMasterRequest(Request_PF, prefetch->time);
248
249 //Increment through the list
250 addr++;
251 delay++;
252 }
253 }
254 }
255
256 std::list<PacketPtr>::iterator
257 BasePrefetcher::inPrefetch(Addr address)
258 {
259 //Guaranteed to only be one match, we always check before inserting
260 std::list<PacketPtr>::iterator iter;
261 for (iter=pf.begin(); iter != pf.end(); iter++) {
262 if (((*iter)->getAddr() & ~(Addr)(blkSize-1)) == address) {
263 return iter;
264 }
265 }
266 return pf.end();
267 }
268
269