refactor code for the packet, get rid of packet_impl.hh
[gem5.git] / src / mem / cache / prefetch / base_prefetcher.cc
1 /*
2 * Copyright (c) 2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 */
30
31 /**
32 * @file
33 * Hardware Prefetcher Definition.
34 */
35
36 #include "base/trace.hh"
37 #include "mem/cache/base_cache.hh"
38 #include "mem/cache/prefetch/base_prefetcher.hh"
39 #include "mem/request.hh"
40 #include <list>
41
42 BasePrefetcher::BasePrefetcher(int size, bool pageStop, bool serialSquash,
43 bool cacheCheckPush, bool onlyData)
44 :size(size), pageStop(pageStop), serialSquash(serialSquash),
45 cacheCheckPush(cacheCheckPush), only_data(onlyData)
46 {
47 }
48
49 void
50 BasePrefetcher::setCache(BaseCache *_cache)
51 {
52 cache = _cache;
53 blkSize = cache->getBlockSize();
54 }
55
56 void
57 BasePrefetcher::regStats(const std::string &name)
58 {
59 pfIdentified
60 .name(name + ".prefetcher.num_hwpf_identified")
61 .desc("number of hwpf identified")
62 ;
63
64 pfMSHRHit
65 .name(name + ".prefetcher.num_hwpf_already_in_mshr")
66 .desc("number of hwpf that were already in mshr")
67 ;
68
69 pfCacheHit
70 .name(name + ".prefetcher.num_hwpf_already_in_cache")
71 .desc("number of hwpf that were already in the cache")
72 ;
73
74 pfBufferHit
75 .name(name + ".prefetcher.num_hwpf_already_in_prefetcher")
76 .desc("number of hwpf that were already in the prefetch queue")
77 ;
78
79 pfRemovedFull
80 .name(name + ".prefetcher.num_hwpf_evicted")
81 .desc("number of hwpf removed due to no buffer left")
82 ;
83
84 pfRemovedMSHR
85 .name(name + ".prefetcher.num_hwpf_removed_MSHR_hit")
86 .desc("number of hwpf removed because MSHR allocated")
87 ;
88
89 pfIssued
90 .name(name + ".prefetcher.num_hwpf_issued")
91 .desc("number of hwpf issued")
92 ;
93
94 pfSpanPage
95 .name(name + ".prefetcher.num_hwpf_span_page")
96 .desc("number of hwpf spanning a virtual page")
97 ;
98
99 pfSquashed
100 .name(name + ".prefetcher.num_hwpf_squashed_from_miss")
101 .desc("number of hwpf that got squashed due to a miss aborting calculation time")
102 ;
103 }
104
105 Packet *
106 BasePrefetcher::getPacket()
107 {
108 DPRINTF(HWPrefetch, "%s:Requesting a hw_pf to issue\n", cache->name());
109
110 if (pf.empty()) {
111 DPRINTF(HWPrefetch, "%s:No HW_PF found\n", cache->name());
112 return NULL;
113 }
114
115 Packet * pkt;
116 bool keepTrying = false;
117 do {
118 pkt = *pf.begin();
119 pf.pop_front();
120 if (!cacheCheckPush) {
121 keepTrying = inCache(pkt);
122 }
123 if (pf.empty()) {
124 cache->clearMasterRequest(Request_PF);
125 if (keepTrying) return NULL; //None left, all were in cache
126 }
127 } while (keepTrying);
128
129 pfIssued++;
130 return pkt;
131 }
132
133 void
134 BasePrefetcher::handleMiss(Packet * &pkt, Tick time)
135 {
136 if (!pkt->req->isUncacheable() && !(pkt->req->isInstRead() && only_data))
137 {
138 //Calculate the blk address
139 Addr blkAddr = pkt->getAddr() & ~(Addr)(blkSize-1);
140
141 //Check if miss is in pfq, if so remove it
142 std::list<Packet *>::iterator iter = inPrefetch(blkAddr);
143 if (iter != pf.end()) {
144 DPRINTF(HWPrefetch, "%s:Saw a miss to a queued prefetch, removing it\n", cache->name());
145 pfRemovedMSHR++;
146 pf.erase(iter);
147 if (pf.empty())
148 cache->clearMasterRequest(Request_PF);
149 }
150
151 //Remove anything in queue with delay older than time
152 //since everything is inserted in time order, start from end
153 //and work until pf.empty() or time is earlier
154 //This is done to emulate Aborting the previous work on a new miss
155 //Needed for serial calculators like GHB
156 if (serialSquash) {
157 iter = pf.end();
158 iter--;
159 while (!pf.empty() && ((*iter)->time >= time)) {
160 pfSquashed++;
161 pf.pop_back();
162 iter--;
163 }
164 if (pf.empty())
165 cache->clearMasterRequest(Request_PF);
166 }
167
168
169 std::list<Addr> addresses;
170 std::list<Tick> delays;
171 calculatePrefetch(pkt, addresses, delays);
172
173 std::list<Addr>::iterator addr = addresses.begin();
174 std::list<Tick>::iterator delay = delays.begin();
175 while (addr != addresses.end())
176 {
177 DPRINTF(HWPrefetch, "%s:Found a pf canidate, inserting into prefetch queue\n", cache->name());
178 //temp calc this here...
179 pfIdentified++;
180 //create a prefetch memreq
181 Request * prefetchReq = new Request(*addr, blkSize, 0);
182 Packet * prefetch;
183 prefetch = new Packet(prefetchReq, Packet::HardPFReq, -1);
184 prefetch->allocate();
185 prefetch->req->setThreadContext(pkt->req->getCpuNum(),
186 pkt->req->getThreadNum());
187
188 prefetch->time = time + (*delay); //@todo ADD LATENCY HERE
189 //... initialize
190
191 //Check if it is already in the cache
192 if (cacheCheckPush) {
193 if (inCache(prefetch)) {
194 addr++;
195 delay++;
196 continue;
197 }
198 }
199
200 //Check if it is already in the miss_queue
201 if (inMissQueue(prefetch->getAddr())) {
202 addr++;
203 delay++;
204 continue;
205 }
206
207 //Check if it is already in the pf buffer
208 if (inPrefetch(prefetch->getAddr()) != pf.end()) {
209 pfBufferHit++;
210 addr++;
211 delay++;
212 continue;
213 }
214
215 //We just remove the head if we are full
216 if (pf.size() == size)
217 {
218 DPRINTF(HWPrefetch, "%s:Inserting into prefetch queue, it was full removing oldest\n", cache->name());
219 pfRemovedFull++;
220 pf.pop_front();
221 }
222
223 pf.push_back(prefetch);
224 prefetch->flags |= CACHE_LINE_FILL;
225
226 //Make sure to request the bus, with proper delay
227 cache->setMasterRequest(Request_PF, prefetch->time);
228
229 //Increment through the list
230 addr++;
231 delay++;
232 }
233 }
234 }
235
236 std::list<Packet *>::iterator
237 BasePrefetcher::inPrefetch(Addr address)
238 {
239 //Guaranteed to only be one match, we always check before inserting
240 std::list<Packet *>::iterator iter;
241 for (iter=pf.begin(); iter != pf.end(); iter++) {
242 if (((*iter)->getAddr() & ~(Addr)(blkSize-1)) == address) {
243 return iter;
244 }
245 }
246 return pf.end();
247 }
248
249