mem: Consistently use ISO prefixes
[gem5.git] / src / mem / snoop_filter.cc
1 /*
2 * Copyright (c) 2013-2017,2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /**
39 * @file
40 * Implementation of a snoop filter.
41 */
42
43 #include "mem/snoop_filter.hh"
44
45 #include "base/logging.hh"
46 #include "base/trace.hh"
47 #include "debug/SnoopFilter.hh"
48 #include "sim/system.hh"
49
50 const int SnoopFilter::SNOOP_MASK_SIZE;
51
52 void
53 SnoopFilter::eraseIfNullEntry(SnoopFilterCache::iterator& sf_it)
54 {
55 SnoopItem& sf_item = sf_it->second;
56 if ((sf_item.requested | sf_item.holder).none()) {
57 cachedLocations.erase(sf_it);
58 DPRINTF(SnoopFilter, "%s: Removed SF entry.\n",
59 __func__);
60 }
61 }
62
63 std::pair<SnoopFilter::SnoopList, Cycles>
64 SnoopFilter::lookupRequest(const Packet* cpkt, const ResponsePort&
65 cpu_side_port)
66 {
67 DPRINTF(SnoopFilter, "%s: src %s packet %s\n", __func__,
68 cpu_side_port.name(), cpkt->print());
69
70 // check if the packet came from a cache
71 bool allocate = !cpkt->req->isUncacheable() && cpu_side_port.isSnooping()
72 && cpkt->fromCache();
73 Addr line_addr = cpkt->getBlockAddr(linesize);
74 if (cpkt->isSecure()) {
75 line_addr |= LineSecure;
76 }
77 SnoopMask req_port = portToMask(cpu_side_port);
78 reqLookupResult.it = cachedLocations.find(line_addr);
79 bool is_hit = (reqLookupResult.it != cachedLocations.end());
80
81 // If the snoop filter has no entry, and we should not allocate,
82 // do not create a new snoop filter entry, simply return a NULL
83 // portlist.
84 if (!is_hit && !allocate)
85 return snoopDown(lookupLatency);
86
87 // If no hit in snoop filter create a new element and update iterator
88 if (!is_hit) {
89 reqLookupResult.it =
90 cachedLocations.emplace(line_addr, SnoopItem()).first;
91 }
92 SnoopItem& sf_item = reqLookupResult.it->second;
93 SnoopMask interested = sf_item.holder | sf_item.requested;
94
95 // Store unmodified value of snoop filter item in temp storage in
96 // case we need to revert because of a send retry in
97 // updateRequest.
98 reqLookupResult.retryItem = sf_item;
99
100 stats.totRequests++;
101 if (is_hit) {
102 if (interested.count() == 1)
103 stats.hitSingleRequests++;
104 else
105 stats.hitMultiRequests++;
106 }
107
108 DPRINTF(SnoopFilter, "%s: SF value %x.%x\n",
109 __func__, sf_item.requested, sf_item.holder);
110
111 // If we are not allocating, we are done
112 if (!allocate)
113 return snoopSelected(maskToPortList(interested & ~req_port),
114 lookupLatency);
115
116 if (cpkt->needsResponse()) {
117 if (!cpkt->cacheResponding()) {
118 // Max one request per address per port
119 panic_if((sf_item.requested & req_port).any(),
120 "double request :( SF value %x.%x\n",
121 sf_item.requested, sf_item.holder);
122
123 // Mark in-flight requests to distinguish later on
124 sf_item.requested |= req_port;
125 DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
126 __func__, sf_item.requested, sf_item.holder);
127 } else {
128 // NOTE: The memInhibit might have been asserted by a cache closer
129 // to the CPU, already -> the response will not be seen by this
130 // filter -> we do not need to keep the in-flight request, but make
131 // sure that we know that that cluster has a copy
132 panic_if((sf_item.holder & req_port).none(),
133 "Need to hold the value!");
134 DPRINTF(SnoopFilter,
135 "%s: not marking request. SF value %x.%x\n",
136 __func__, sf_item.requested, sf_item.holder);
137 }
138 } else { // if (!cpkt->needsResponse())
139 assert(cpkt->isEviction());
140 // make sure that the sender actually had the line
141 panic_if((sf_item.holder & req_port).none(), "requestor %x is not a " \
142 "holder :( SF value %x.%x\n", req_port,
143 sf_item.requested, sf_item.holder);
144 // CleanEvicts and Writebacks -> the sender and all caches above
145 // it may not have the line anymore.
146 if (!cpkt->isBlockCached()) {
147 sf_item.holder &= ~req_port;
148 DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
149 __func__, sf_item.requested, sf_item.holder);
150 }
151 }
152
153 return snoopSelected(maskToPortList(interested & ~req_port), lookupLatency);
154 }
155
156 void
157 SnoopFilter::finishRequest(bool will_retry, Addr addr, bool is_secure)
158 {
159 if (reqLookupResult.it != cachedLocations.end()) {
160 // since we rely on the caller, do a basic check to ensure
161 // that finishRequest is being called following lookupRequest
162 Addr line_addr = (addr & ~(Addr(linesize - 1)));
163 if (is_secure) {
164 line_addr |= LineSecure;
165 }
166 assert(reqLookupResult.it->first == line_addr);
167 if (will_retry) {
168 SnoopItem retry_item = reqLookupResult.retryItem;
169 // Undo any changes made in lookupRequest to the snoop filter
170 // entry if the request will come again. retryItem holds
171 // the previous value of the snoopfilter entry.
172 reqLookupResult.it->second = retry_item;
173
174 DPRINTF(SnoopFilter, "%s: restored SF value %x.%x\n",
175 __func__, retry_item.requested, retry_item.holder);
176 }
177
178 eraseIfNullEntry(reqLookupResult.it);
179 }
180 }
181
182 std::pair<SnoopFilter::SnoopList, Cycles>
183 SnoopFilter::lookupSnoop(const Packet* cpkt)
184 {
185 DPRINTF(SnoopFilter, "%s: packet %s\n", __func__, cpkt->print());
186
187 assert(cpkt->isRequest());
188
189 Addr line_addr = cpkt->getBlockAddr(linesize);
190 if (cpkt->isSecure()) {
191 line_addr |= LineSecure;
192 }
193 auto sf_it = cachedLocations.find(line_addr);
194 bool is_hit = (sf_it != cachedLocations.end());
195
196 panic_if(!is_hit && (cachedLocations.size() >= maxEntryCount),
197 "snoop filter exceeded capacity of %d cache blocks\n",
198 maxEntryCount);
199
200 // If the snoop filter has no entry, simply return a NULL
201 // portlist, there is no point creating an entry only to remove it
202 // later
203 if (!is_hit)
204 return snoopDown(lookupLatency);
205
206 SnoopItem& sf_item = sf_it->second;
207
208 SnoopMask interested = (sf_item.holder | sf_item.requested);
209
210 stats.totSnoops++;
211
212 if (interested.count() == 1)
213 stats.hitSingleSnoops++;
214 else
215 stats.hitMultiSnoops++;
216
217 // ReadEx and Writes require both invalidation and exlusivity, while reads
218 // require neither. Writebacks on the other hand require exclusivity but
219 // not the invalidation. Previously Writebacks did not generate upward
220 // snoops so this was never an issue. Now that Writebacks generate snoops
221 // we need a special case for Writebacks. Additionally cache maintenance
222 // operations can generate snoops as they clean and/or invalidate all
223 // caches down to the specified point of reference.
224 assert(cpkt->isWriteback() || cpkt->req->isUncacheable() ||
225 (cpkt->isInvalidate() == cpkt->needsWritable()) ||
226 cpkt->req->isCacheMaintenance());
227 if (cpkt->isInvalidate() && sf_item.requested.none()) {
228 // Early clear of the holder, if no other request is currently going on
229 // @todo: This should possibly be updated even though we do not filter
230 // upward snoops
231 DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
232 __func__, sf_item.requested, sf_item.holder);
233 sf_item.holder = 0;
234 DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
235 __func__, sf_item.requested, sf_item.holder);
236 eraseIfNullEntry(sf_it);
237 }
238
239 return snoopSelected(maskToPortList(interested), lookupLatency);
240 }
241
242 void
243 SnoopFilter::updateSnoopResponse(const Packet* cpkt,
244 const ResponsePort& rsp_port,
245 const ResponsePort& req_port)
246 {
247 DPRINTF(SnoopFilter, "%s: rsp %s req %s packet %s\n",
248 __func__, rsp_port.name(), req_port.name(), cpkt->print());
249
250 assert(cpkt->isResponse());
251 assert(cpkt->cacheResponding());
252
253 // if this snoop response is due to an uncacheable request, or is
254 // being turned into a normal response, there is nothing more to
255 // do
256 if (cpkt->req->isUncacheable() || !req_port.isSnooping()) {
257 return;
258 }
259
260 Addr line_addr = cpkt->getBlockAddr(linesize);
261 if (cpkt->isSecure()) {
262 line_addr |= LineSecure;
263 }
264 SnoopMask rsp_mask = portToMask(rsp_port);
265 SnoopMask req_mask = portToMask(req_port);
266 SnoopItem& sf_item = cachedLocations[line_addr];
267
268 DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
269 __func__, sf_item.requested, sf_item.holder);
270
271 // The source should have the line
272 panic_if((sf_item.holder & rsp_mask).none(),
273 "SF value %x.%x does not have the line\n",
274 sf_item.requested, sf_item.holder);
275
276 // The destination should have had a request in
277 panic_if((sf_item.requested & req_mask).none(), "SF value %x.%x missing "\
278 "the original request\n", sf_item.requested, sf_item.holder);
279
280 // If the snoop response has no sharers the line is passed in
281 // Modified state, and we know that there are no other copies, or
282 // they will all be invalidated imminently
283 if (!cpkt->hasSharers()) {
284 DPRINTF(SnoopFilter,
285 "%s: dropping %x because non-shared snoop "
286 "response SF val: %x.%x\n", __func__, rsp_mask,
287 sf_item.requested, sf_item.holder);
288 sf_item.holder = 0;
289 }
290 assert(!cpkt->isWriteback());
291 // @todo Deal with invalidating responses
292 sf_item.holder |= req_mask;
293 sf_item.requested &= ~req_mask;
294 assert((sf_item.requested | sf_item.holder).any());
295 DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
296 __func__, sf_item.requested, sf_item.holder);
297 }
298
299 void
300 SnoopFilter::updateSnoopForward(const Packet* cpkt,
301 const ResponsePort& rsp_port, const RequestPort& req_port)
302 {
303 DPRINTF(SnoopFilter, "%s: rsp %s req %s packet %s\n",
304 __func__, rsp_port.name(), req_port.name(), cpkt->print());
305
306 assert(cpkt->isResponse());
307 assert(cpkt->cacheResponding());
308
309 Addr line_addr = cpkt->getBlockAddr(linesize);
310 if (cpkt->isSecure()) {
311 line_addr |= LineSecure;
312 }
313 auto sf_it = cachedLocations.find(line_addr);
314 bool is_hit = sf_it != cachedLocations.end();
315
316 // Nothing to do if it is not a hit
317 if (!is_hit)
318 return;
319
320 // If the snoop response has no sharers the line is passed in
321 // Modified state, and we know that there are no other copies, or
322 // they will all be invalidated imminently
323 if (!cpkt->hasSharers()) {
324 SnoopItem& sf_item = sf_it->second;
325
326 DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
327 __func__, sf_item.requested, sf_item.holder);
328 sf_item.holder = 0;
329 DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
330 __func__, sf_item.requested, sf_item.holder);
331
332 eraseIfNullEntry(sf_it);
333 }
334 }
335
336 void
337 SnoopFilter::updateResponse(const Packet* cpkt, const ResponsePort&
338 cpu_side_port)
339 {
340 DPRINTF(SnoopFilter, "%s: src %s packet %s\n",
341 __func__, cpu_side_port.name(), cpkt->print());
342
343 assert(cpkt->isResponse());
344
345 // we only allocate if the packet actually came from a cache, but
346 // start by checking if the port is snooping
347 if (cpkt->req->isUncacheable() || !cpu_side_port.isSnooping())
348 return;
349
350 // next check if we actually allocated an entry
351 Addr line_addr = cpkt->getBlockAddr(linesize);
352 if (cpkt->isSecure()) {
353 line_addr |= LineSecure;
354 }
355 auto sf_it = cachedLocations.find(line_addr);
356 if (sf_it == cachedLocations.end())
357 return;
358
359 SnoopMask response_mask = portToMask(cpu_side_port);
360 SnoopItem& sf_item = sf_it->second;
361
362 DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
363 __func__, sf_item.requested, sf_item.holder);
364
365 // Make sure we have seen the actual request, too
366 panic_if((sf_item.requested & response_mask).none(),
367 "SF value %x.%x missing request bit\n",
368 sf_item.requested, sf_item.holder);
369
370 sf_item.requested &= ~response_mask;
371 // Update the residency of the cache line.
372
373 if (cpkt->req->isCacheMaintenance()) {
374 // A cache clean response does not carry any data so it
375 // shouldn't change the holders, unless it is invalidating.
376 if (cpkt->isInvalidate()) {
377 sf_item.holder &= ~response_mask;
378 }
379 eraseIfNullEntry(sf_it);
380 } else {
381 // Any other response implies that a cache above will have the
382 // block.
383 sf_item.holder |= response_mask;
384 assert((sf_item.holder | sf_item.requested).any());
385 }
386 DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
387 __func__, sf_item.requested, sf_item.holder);
388 }
389
390 SnoopFilter::SnoopFilterStats::SnoopFilterStats(Stats::Group *parent):
391 Stats::Group(parent),
392 ADD_STAT(totRequests,"Total number of requests made to the snoop filter."),
393 ADD_STAT(hitSingleRequests,
394 "Number of requests hitting in the snoop filter with a single "\
395 "holder of the requested data."),
396 ADD_STAT(hitMultiRequests,
397 "Number of requests hitting in the snoop filter with multiple "\
398 "(>1) holders of the requested data."),
399 ADD_STAT(totSnoops,"Total number of snoops made to the snoop filter."),
400 ADD_STAT(hitSingleSnoops,
401 "Number of snoops hitting in the snoop filter with a single "\
402 "holder of the requested data."),
403 ADD_STAT(hitMultiSnoops,
404 "Number of snoops hitting in the snoop filter with multiple "\
405 "(>1) holders of the requested data.")
406 {}
407
408 void
409 SnoopFilter::regStats()
410 {
411 SimObject::regStats();
412 }