mem-cache: Implement a multi compressor
[gem5.git] / src / mem / cache / mshr.hh
1 /*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44 /**
45 * @file
46 * Miss Status and Handling Register (MSHR) declaration.
47 */
48
49 #ifndef __MEM_CACHE_MSHR_HH__
50 #define __MEM_CACHE_MSHR_HH__
51
52 #include <cassert>
53 #include <iosfwd>
54 #include <list>
55 #include <string>
56 #include <vector>
57
58 #include "base/printable.hh"
59 #include "base/types.hh"
60 #include "mem/cache/queue_entry.hh"
61 #include "mem/packet.hh"
62 #include "mem/request.hh"
63 #include "sim/core.hh"
64
65 class BaseCache;
66
67 /**
68 * Miss Status and handling Register. This class keeps all the information
69 * needed to handle a cache miss including a list of target requests.
70 * @sa \ref gem5MemorySystem "gem5 Memory System"
71 */
72 class MSHR : public QueueEntry, public Printable
73 {
74
75 /**
76 * Consider the queues friends to avoid making everything public.
77 */
78 template<typename Entry>
79 friend class Queue;
80 friend class MSHRQueue;
81
82 private:
83
84 /** Flag set by downstream caches */
85 bool downstreamPending;
86
87 /**
88 * Here we use one flag to track both if:
89 *
90 * 1. We are going to become owner or not, i.e., we will get the
91 * block in an ownership state (Owned or Modified) with BlkDirty
92 * set. This determines whether or not we are going to become the
93 * responder and ordering point for future requests that we snoop.
94 *
95 * 2. We know that we are going to get a writable block, i.e. we
96 * will get the block in writable state (Exclusive or Modified
97 * state) with BlkWritable set. That determines whether additional
98 * targets with needsWritable set will be able to be satisfied, or
99 * if not should be put on the deferred list to possibly wait for
100 * another request that does give us writable access.
101 *
102 * Condition 2 is actually just a shortcut that saves us from
103 * possibly building a deferred target list and calling
104 * promoteWritable() every time we get a writable block. Condition
105 * 1, tracking ownership, is what is important. However, we never
106 * receive ownership without marking the block dirty, and
107 * consequently use pendingModified to track both ownership and
108 * writability rather than having separate pendingDirty and
109 * pendingWritable flags.
110 */
111 bool pendingModified;
112
113 /** Did we snoop an invalidate while waiting for data? */
114 bool postInvalidate;
115
116 /** Did we snoop a read while waiting for data? */
117 bool postDowngrade;
118
119 public:
120
121 /** Track if we sent this as a whole line write or not */
122 bool wasWholeLineWrite;
123
124 /** True if the entry is just a simple forward from an upper level */
125 bool isForward;
126
127 class Target : public QueueEntry::Target {
128 public:
129
130 enum Source {
131 FromCPU,
132 FromSnoop,
133 FromPrefetcher
134 };
135
136 const Source source; //!< Request from cpu, memory, or prefetcher?
137
138 /**
139 * We use this flag to track whether we have cleared the
140 * downstreamPending flag for the MSHR of the cache above
141 * where this packet originates from and guard noninitial
142 * attempts to clear it.
143 *
144 * The flag markedPending needs to be updated when the
145 * TargetList is in service which can be:
146 * 1) during the Target instantiation if the MSHR is in
147 * service and the target is not deferred,
148 * 2) when the MSHR becomes in service if the target is not
149 * deferred,
150 * 3) or when the TargetList is promoted (deferredTargets ->
151 * targets).
152 */
153 bool markedPending;
154
155 const bool allocOnFill; //!< Should the response servicing this
156 //!< target list allocate in the cache?
157
158 Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
159 Source _source, bool _markedPending, bool alloc_on_fill)
160 : QueueEntry::Target(_pkt, _readyTime, _order), source(_source),
161 markedPending(_markedPending), allocOnFill(alloc_on_fill)
162 {}
163 };
164
165 class TargetList : public std::list<Target> {
166
167 public:
168 bool needsWritable;
169 bool hasUpgrade;
170 /** Set when the response should allocate on fill */
171 bool allocOnFill;
172 /**
173 * Determine whether there was at least one non-snooping
174 * target coming from another cache.
175 */
176 bool hasFromCache;
177
178 TargetList();
179
180 /**
181 * Use the provided packet and the source to update the
182 * flags of this TargetList.
183 *
184 * @param pkt Packet considered for the flag update
185 * @param source Indicates the source of the packet
186 * @param alloc_on_fill Whether the pkt would allocate on a fill
187 */
188 void updateFlags(PacketPtr pkt, Target::Source source,
189 bool alloc_on_fill);
190
191 /**
192 * Reset state
193 *
194 * @param blk_addr Address of the cache block
195 * @param blk_size Size of the cache block
196 */
197 void init(Addr blk_addr, Addr blk_size) {
198 blkAddr = blk_addr;
199 blkSize = blk_size;
200 writesBitmap.resize(blk_size);
201
202 resetFlags();
203 }
204
205 void resetFlags() {
206 canMergeWrites = true;
207 std::fill(writesBitmap.begin(), writesBitmap.end(), false);
208
209 needsWritable = false;
210 hasUpgrade = false;
211 allocOnFill = false;
212 hasFromCache = false;
213 }
214
215 /**
216 * Goes through the list of targets and uses them to populate
217 * the flags of this TargetList. When the function returns the
218 * flags are consistent with the properties of packets in the
219 * list.
220 */
221 void populateFlags();
222
223 /**
224 * Add the specified packet in the TargetList. This function
225 * stores information related to the added packet and updates
226 * accordingly the flags.
227 *
228 * @param pkt Packet considered for adding
229 */
230 void
231 updateWriteFlags(PacketPtr pkt)
232 {
233 // if we have already seen writes for the full block stop
234 // here, this might be a full line write followed by
235 // other compatible requests (e.g., reads)
236 if (!isWholeLineWrite()) {
237 // Avoid merging requests with special flags (e.g.,
238 // strictly ordered)
239 const Request::FlagsType no_merge_flags =
240 Request::UNCACHEABLE | Request::STRICT_ORDER |
241 Request::MMAPPED_IPR | Request::PRIVILEGED |
242 Request::LLSC | Request::MEM_SWAP |
243 Request::MEM_SWAP_COND | Request::SECURE;
244 const auto &req_flags = pkt->req->getFlags();
245 bool compat_write = pkt->isWrite() &&
246 !req_flags.isSet(no_merge_flags);
247 canMergeWrites &= compat_write;
248
249 // if this request is the first target in this list
250 // and additionally a whole-line write, we need to
251 // service it as a whole-line even if we won't allow
252 // any further merging (e.g., SECURE whole line
253 // write).
254 bool first_write = pkt->isWrite() && (size() == 0);
255 if (first_write || compat_write) {
256 auto offset = pkt->getOffset(blkSize);
257 auto begin = writesBitmap.begin() + offset;
258 std::fill(begin, begin + pkt->getSize(), true);
259 }
260 }
261 }
262
263 /**
264 * Tests if the flags of this TargetList have their default
265 * values.
266 *
267 * @return True if the TargetList are reset, false otherwise.
268 */
269 bool isReset() const {
270 return !needsWritable && !hasUpgrade && !allocOnFill &&
271 !hasFromCache && canMergeWrites;
272 }
273
274 /**
275 * Add the specified packet in the TargetList. This function
276 * stores information related to the added packet and updates
277 * accordingly the flags.
278 *
279 * @param pkt Packet considered for adding
280 * @param readTime Tick at which the packet is processed by this cache
281 * @param order A counter giving a unique id to each target
282 * @param source Indicates the source agent of the packet
283 * @param markPending Set for deferred targets or pending MSHRs
284 * @param alloc_on_fill Whether it should allocate on a fill
285 */
286 void add(PacketPtr pkt, Tick readyTime, Counter order,
287 Target::Source source, bool markPending, bool alloc_on_fill);
288
289 /**
290 * Convert upgrades to the equivalent request if the cache line they
291 * refer to would have been invalid (Upgrade -> ReadEx, SC* -> Fail).
292 * Used to rejig ordering between targets waiting on an MSHR. */
293 void replaceUpgrades();
294
295 void clearDownstreamPending();
296 void clearDownstreamPending(iterator begin, iterator end);
297 bool trySatisfyFunctional(PacketPtr pkt);
298 void print(std::ostream &os, int verbosity,
299 const std::string &prefix) const;
300
301 /**
302 * Check if this list contains writes that cover an entire
303 * cache line. This is used as part of the miss-packet
304 * creation. Note that new requests may arrive after a
305 * miss-packet has been created, and for the corresponding
306 * fill we use the wasWholeLineWrite field.
307 */
308 bool isWholeLineWrite() const
309 {
310 return std::all_of(writesBitmap.begin(), writesBitmap.end(),
311 [](bool i) { return i; });
312 }
313
314 private:
315 /** Address of the cache block for this list of targets. */
316 Addr blkAddr;
317
318 /** Size of the cache block. */
319 Addr blkSize;
320
321 /** Indicates whether we can merge incoming write requests */
322 bool canMergeWrites;
323
324 // NOTE: std::vector<bool> might not meet satisfy the
325 // ForwardIterator requirement and therefore cannot be used
326 // for writesBitmap.
327 /**
328 * Track which bytes are written by requests in this target
329 * list.
330 */
331 std::vector<char> writesBitmap;
332 };
333
334 /** A list of MSHRs. */
335 typedef std::list<MSHR *> List;
336 /** MSHR list iterator. */
337 typedef List::iterator Iterator;
338
339 /** The pending* and post* flags are only valid if inService is
340 * true. Using the accessor functions lets us detect if these
341 * flags are accessed improperly.
342 */
343
344 /** True if we need to get a writable copy of the block. */
345 bool needsWritable() const { return targets.needsWritable; }
346
347 bool isCleaning() const {
348 PacketPtr pkt = targets.front().pkt;
349 return pkt->isClean();
350 }
351
352 bool isPendingModified() const {
353 assert(inService); return pendingModified;
354 }
355
356 bool hasPostInvalidate() const {
357 assert(inService); return postInvalidate;
358 }
359
360 bool hasPostDowngrade() const {
361 assert(inService); return postDowngrade;
362 }
363
364 bool sendPacket(BaseCache &cache) override;
365
366 bool allocOnFill() const {
367 return targets.allocOnFill;
368 }
369
370 /**
371 * Determine if there are non-deferred requests from other caches
372 *
373 * @return true if any of the targets is from another cache
374 */
375 bool hasFromCache() const {
376 return targets.hasFromCache;
377 }
378
379 private:
380 /**
381 * Promotes deferred targets that satisfy a predicate
382 *
383 * Deferred targets are promoted to the target list if they
384 * satisfy a given condition. The operation stops at the first
385 * deferred target that doesn't satisfy the condition.
386 *
387 * @param pred A condition on a Target
388 */
389 void promoteIf(const std::function<bool (Target &)>& pred);
390
391 /**
392 * Pointer to this MSHR on the ready list.
393 * @sa MissQueue, MSHRQueue::readyList
394 */
395 Iterator readyIter;
396
397 /**
398 * Pointer to this MSHR on the allocated list.
399 * @sa MissQueue, MSHRQueue::allocatedList
400 */
401 Iterator allocIter;
402
403 /** List of all requests that match the address */
404 TargetList targets;
405
406 TargetList deferredTargets;
407
408 public:
409 /**
410 * Check if this MSHR contains only compatible writes, and if they
411 * span the entire cache line. This is used as part of the
412 * miss-packet creation. Note that new requests may arrive after a
413 * miss-packet has been created, and for the fill we therefore use
414 * the wasWholeLineWrite field.
415 */
416 bool isWholeLineWrite() const {
417 return targets.isWholeLineWrite();
418 }
419
420 /**
421 * Allocate a miss to this MSHR.
422 * @param blk_addr The address of the block.
423 * @param blk_size The number of bytes to request.
424 * @param pkt The original miss.
425 * @param when_ready When should the MSHR be ready to act upon.
426 * @param _order The logical order of this MSHR
427 * @param alloc_on_fill Should the cache allocate a block on fill
428 */
429 void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
430 Tick when_ready, Counter _order, bool alloc_on_fill);
431
432 void markInService(bool pending_modified_resp);
433
434 void clearDownstreamPending();
435
436 /**
437 * Mark this MSHR as free.
438 */
439 void deallocate();
440
441 /**
442 * Add a request to the list of targets.
443 * @param target The target.
444 */
445 void allocateTarget(PacketPtr target, Tick when, Counter order,
446 bool alloc_on_fill);
447 bool handleSnoop(PacketPtr target, Counter order);
448
449 /** A simple constructor. */
450 MSHR();
451
452 /**
453 * Returns the current number of allocated targets.
454 * @return The current number of allocated targets.
455 */
456 int getNumTargets() const
457 { return targets.size() + deferredTargets.size(); }
458
459 /**
460 * Extracts the subset of the targets that can be serviced given a
461 * received response. This function returns the targets list
462 * unless the response is a ReadRespWithInvalidate. The
463 * ReadRespWithInvalidate is only invalidating response that its
464 * invalidation was not expected when the request (a
465 * ReadSharedReq) was sent out. For ReadRespWithInvalidate we can
466 * safely service only the first FromCPU target and all FromSnoop
467 * targets (inform all snoopers that we no longer have the block).
468 *
469 * @param pkt The response from the downstream memory
470 */
471 TargetList extractServiceableTargets(PacketPtr pkt);
472
473 /**
474 * Returns true if there are targets left.
475 * @return true if there are targets
476 */
477 bool hasTargets() const { return !targets.empty(); }
478
479 /**
480 * Returns a reference to the first target.
481 * @return A pointer to the first target.
482 */
483 QueueEntry::Target *getTarget() override
484 {
485 assert(hasTargets());
486 return &targets.front();
487 }
488
489 /**
490 * Pop first target.
491 */
492 void popTarget()
493 {
494 targets.pop_front();
495 }
496
497 bool promoteDeferredTargets();
498
499 /**
500 * Promotes deferred targets that do not require writable
501 *
502 * Move targets from the deferred targets list to the target list
503 * starting from the first deferred target until the first target
504 * that is a cache maintenance operation or needs a writable copy
505 * of the block
506 */
507 void promoteReadable();
508
509 /**
510 * Promotes deferred targets that do not require writable
511 *
512 * Requests in the deferred target list are moved to the target
513 * list up until the first target that is a cache maintenance
514 * operation or needs a writable copy of the block
515 */
516 void promoteWritable();
517
518 bool trySatisfyFunctional(PacketPtr pkt);
519
520 /**
521 * Adds a delay relative to the current tick to the current MSHR
522 * @param delay_ticks the desired delay in ticks
523 */
524 void delay(Tick delay_ticks)
525 {
526 assert(readyTime <= curTick());
527 readyTime = curTick() + delay_ticks;
528 }
529
530 /**
531 * Prints the contents of this MSHR for debugging.
532 */
533 void print(std::ostream &os,
534 int verbosity = 0,
535 const std::string &prefix = "") const override;
536 /**
537 * A no-args wrapper of print(std::ostream...) meant to be
538 * invoked from DPRINTFs avoiding string overheads in fast mode
539 *
540 * @return string with mshr fields + [deferred]targets
541 */
542 std::string print() const;
543
544 bool matchBlockAddr(const Addr addr, const bool is_secure) const override;
545 bool matchBlockAddr(const PacketPtr pkt) const override;
546 bool conflictAddr(const QueueEntry* entry) const override;
547 };
548
549 #endif // __MEM_CACHE_MSHR_HH__