Backing in more changsets, getting closer to compile
[gem5.git] / src / mem / cache / prefetch / base_prefetcher.cc
1 /*
2 * Copyright (c) 2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 */
30
31 /**
32 * @file
33 * Hardware Prefetcher Definition.
34 */
35
36 #include "base/trace.hh"
37 #include "mem/cache/base_cache.hh"
38 #include "mem/cache/prefetch/base_prefetcher.hh"
39 #include <list>
40
41 BasePrefetcher::BasePrefetcher(int size, bool pageStop, bool serialSquash,
42 bool cacheCheckPush, bool onlyData)
43 :size(size), pageStop(pageStop), serialSquash(serialSquash),
44 cacheCheckPush(cacheCheckPush), only_data(onlyData)
45 {
46 }
47
48 void
49 BasePrefetcher::setCache(BaseCache *_cache)
50 {
51 cache = _cache;
52 blkSize = cache->getBlockSize();
53 }
54
55 void
56 BasePrefetcher::regStats(const std::string &name)
57 {
58 pfIdentified
59 .name(name + ".prefetcher.num_hwpf_identified")
60 .desc("number of hwpf identified")
61 ;
62
63 pfMSHRHit
64 .name(name + ".prefetcher.num_hwpf_already_in_mshr")
65 .desc("number of hwpf that were already in mshr")
66 ;
67
68 pfCacheHit
69 .name(name + ".prefetcher.num_hwpf_already_in_cache")
70 .desc("number of hwpf that were already in the cache")
71 ;
72
73 pfBufferHit
74 .name(name + ".prefetcher.num_hwpf_already_in_prefetcher")
75 .desc("number of hwpf that were already in the prefetch queue")
76 ;
77
78 pfRemovedFull
79 .name(name + ".prefetcher.num_hwpf_evicted")
80 .desc("number of hwpf removed due to no buffer left")
81 ;
82
83 pfRemovedMSHR
84 .name(name + ".prefetcher.num_hwpf_removed_MSHR_hit")
85 .desc("number of hwpf removed because MSHR allocated")
86 ;
87
88 pfIssued
89 .name(name + ".prefetcher.num_hwpf_issued")
90 .desc("number of hwpf issued")
91 ;
92
93 pfSpanPage
94 .name(name + ".prefetcher.num_hwpf_span_page")
95 .desc("number of hwpf spanning a virtual page")
96 ;
97
98 pfSquashed
99 .name(name + ".prefetcher.num_hwpf_squashed_from_miss")
100 .desc("number of hwpf that got squashed due to a miss aborting calculation time")
101 ;
102 }
103
104 Packet *
105 BasePrefetcher::getPacket()
106 {
107 DPRINTF(HWPrefetch, "%s:Requesting a hw_pf to issue\n", cache->name());
108
109 if (pf.empty()) {
110 DPRINTF(HWPrefetch, "%s:No HW_PF found\n", cache->name());
111 return NULL;
112 }
113
114 Packet * pkt;
115 bool keepTrying = false;
116 do {
117 pkt = *pf.begin();
118 pf.pop_front();
119 if (!cacheCheckPush) {
120 keepTrying = inCache(pkt);
121 }
122 if (pf.empty()) {
123 cache->clearMasterRequest(Request_PF);
124 if (keepTrying) return NULL; //None left, all were in cache
125 }
126 } while (keepTrying);
127
128 pfIssued++;
129 return pkt;
130 }
131
132 void
133 BasePrefetcher::handleMiss(Packet * &pkt, Tick time)
134 {
135 if (!pkt->req->isUncacheable() && !(pkt->isInstRead() && only_data))
136 {
137 //Calculate the blk address
138 Addr blkAddr = pkt->paddr & ~(Addr)(blkSize-1);
139
140 //Check if miss is in pfq, if so remove it
141 std::list<Packet *>::iterator iter = inPrefetch(blkAddr);
142 if (iter != pf.end()) {
143 DPRINTF(HWPrefetch, "%s:Saw a miss to a queued prefetch, removing it\n", cache->name());
144 pfRemovedMSHR++;
145 pf.erase(iter);
146 if (pf.empty())
147 cache->clearMasterRequest(Request_PF);
148 }
149
150 //Remove anything in queue with delay older than time
151 //since everything is inserted in time order, start from end
152 //and work until pf.empty() or time is earlier
153 //This is done to emulate Aborting the previous work on a new miss
154 //Needed for serial calculators like GHB
155 if (serialSquash) {
156 iter = pf.end();
157 iter--;
158 while (!pf.empty() && ((*iter)->time >= time)) {
159 pfSquashed++;
160 pf.pop_back();
161 iter--;
162 }
163 if (pf.empty())
164 cache->clearMasterRequest(Request_PF);
165 }
166
167
168 std::list<Addr> addresses;
169 std::list<Tick> delays;
170 calculatePrefetch(pkt, addresses, delays);
171
172 std::list<Addr>::iterator addr = addresses.begin();
173 std::list<Tick>::iterator delay = delays.begin();
174 while (addr != addresses.end())
175 {
176 DPRINTF(HWPrefetch, "%s:Found a pf canidate, inserting into prefetch queue\n", cache->name());
177 //temp calc this here...
178 pfIdentified++;
179 //create a prefetch memreq
180 Packet * prefetch;
181 prefetch = new Packet();
182 prefetch->paddr = (*addr);
183 prefetch->size = blkSize;
184 prefetch->cmd = Hard_Prefetch;
185 prefetch->xc = pkt->xc;
186 prefetch->data = new uint8_t[blkSize];
187 prefetch->req->asid = pkt->req->asid;
188 prefetch->req->setThreadNum() = pkt->req->getThreadNum();
189 prefetch->time = time + (*delay); //@todo ADD LATENCY HERE
190 //... initialize
191
192 //Check if it is already in the cache
193 if (cacheCheckPush) {
194 if (inCache(prefetch)) {
195 addr++;
196 delay++;
197 continue;
198 }
199 }
200
201 //Check if it is already in the miss_queue
202 if (inMissQueue(prefetch->paddr, prefetch->req->asid)) {
203 addr++;
204 delay++;
205 continue;
206 }
207
208 //Check if it is already in the pf buffer
209 if (inPrefetch(prefetch->paddr) != pf.end()) {
210 pfBufferHit++;
211 addr++;
212 delay++;
213 continue;
214 }
215
216 //We just remove the head if we are full
217 if (pf.size() == size)
218 {
219 DPRINTF(HWPrefetch, "%s:Inserting into prefetch queue, it was full removing oldest\n", cache->name());
220 pfRemovedFull++;
221 pf.pop_front();
222 }
223
224 pf.push_back(prefetch);
225 prefetch->flags |= CACHE_LINE_FILL;
226
227 //Make sure to request the bus, with proper delay
228 cache->setMasterRequest(Request_PF, prefetch->time);
229
230 //Increment through the list
231 addr++;
232 delay++;
233 }
234 }
235 }
236
237 std::list<Packet *>::iterator
238 BasePrefetcher::inPrefetch(Addr address)
239 {
240 //Guaranteed to only be one match, we always check before inserting
241 std::list<Packet *>::iterator iter;
242 for (iter=pf.begin(); iter != pf.end(); iter++) {
243 if (((*iter)->paddr & ~(Addr)(blkSize-1)) == address) {
244 return iter;
245 }
246 }
247 return pf.end();
248 }
249
250