cpu: Apply the ARM TLB rework to the O3 checker CPU.
[gem5.git] / src / mem / packet.hh
1 /*
2 * Copyright (c) 2012-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 * Andreas Hansson
45 * Nikos Nikoleris
46 */
47
48 /**
49 * @file
50 * Declaration of the Packet class.
51 */
52
53 #ifndef __MEM_PACKET_HH__
54 #define __MEM_PACKET_HH__
55
56 #include <bitset>
57 #include <cassert>
58 #include <list>
59
60 #include "base/addr_range.hh"
61 #include "base/cast.hh"
62 #include "base/compiler.hh"
63 #include "base/flags.hh"
64 #include "base/logging.hh"
65 #include "base/printable.hh"
66 #include "base/types.hh"
67 #include "config/the_isa.hh"
68 #include "mem/request.hh"
69 #include "sim/core.hh"
70
71 class Packet;
72 typedef Packet *PacketPtr;
73 typedef uint8_t* PacketDataPtr;
74 typedef std::list<PacketPtr> PacketList;
75 typedef uint64_t PacketId;
76
77 class MemCmd
78 {
79 friend class Packet;
80
81 public:
82 /**
83 * List of all commands associated with a packet.
84 */
85 enum Command
86 {
87 InvalidCmd,
88 ReadReq,
89 ReadResp,
90 ReadRespWithInvalidate,
91 WriteReq,
92 WriteResp,
93 WritebackDirty,
94 WritebackClean,
95 WriteClean, // writes dirty data below without evicting
96 CleanEvict,
97 SoftPFReq,
98 SoftPFExReq,
99 HardPFReq,
100 SoftPFResp,
101 HardPFResp,
102 WriteLineReq,
103 UpgradeReq,
104 SCUpgradeReq, // Special "weak" upgrade for StoreCond
105 UpgradeResp,
106 SCUpgradeFailReq, // Failed SCUpgradeReq in MSHR (never sent)
107 UpgradeFailResp, // Valid for SCUpgradeReq only
108 ReadExReq,
109 ReadExResp,
110 ReadCleanReq,
111 ReadSharedReq,
112 LoadLockedReq,
113 StoreCondReq,
114 StoreCondFailReq, // Failed StoreCondReq in MSHR (never sent)
115 StoreCondResp,
116 SwapReq,
117 SwapResp,
118 // MessageReq and MessageResp are deprecated.
119 MemFenceReq = SwapResp + 3,
120 MemFenceResp,
121 CleanSharedReq,
122 CleanSharedResp,
123 CleanInvalidReq,
124 CleanInvalidResp,
125 // Error responses
126 // @TODO these should be classified as responses rather than
127 // requests; coding them as requests initially for backwards
128 // compatibility
129 InvalidDestError, // packet dest field invalid
130 BadAddressError, // memory address invalid
131 FunctionalReadError, // unable to fulfill functional read
132 FunctionalWriteError, // unable to fulfill functional write
133 // Fake simulator-only commands
134 PrintReq, // Print state matching address
135 FlushReq, //request for a cache flush
136 InvalidateReq, // request for address to be invalidated
137 InvalidateResp,
138 NUM_MEM_CMDS
139 };
140
141 private:
142 /**
143 * List of command attributes.
144 */
145 enum Attribute
146 {
147 IsRead, //!< Data flows from responder to requester
148 IsWrite, //!< Data flows from requester to responder
149 IsUpgrade,
150 IsInvalidate,
151 IsClean, //!< Cleans any existing dirty blocks
152 NeedsWritable, //!< Requires writable copy to complete in-cache
153 IsRequest, //!< Issued by requester
154 IsResponse, //!< Issue by responder
155 NeedsResponse, //!< Requester needs response from target
156 IsEviction,
157 IsSWPrefetch,
158 IsHWPrefetch,
159 IsLlsc, //!< Alpha/MIPS LL or SC access
160 HasData, //!< There is an associated payload
161 IsError, //!< Error response
162 IsPrint, //!< Print state matching address (for debugging)
163 IsFlush, //!< Flush the address from caches
164 FromCache, //!< Request originated from a caching agent
165 NUM_COMMAND_ATTRIBUTES
166 };
167
168 /**
169 * Structure that defines attributes and other data associated
170 * with a Command.
171 */
172 struct CommandInfo
173 {
174 /// Set of attribute flags.
175 const std::bitset<NUM_COMMAND_ATTRIBUTES> attributes;
176 /// Corresponding response for requests; InvalidCmd if no
177 /// response is applicable.
178 const Command response;
179 /// String representation (for printing)
180 const std::string str;
181 };
182
183 /// Array to map Command enum to associated info.
184 static const CommandInfo commandInfo[];
185
186 private:
187
188 Command cmd;
189
190 bool
191 testCmdAttrib(MemCmd::Attribute attrib) const
192 {
193 return commandInfo[cmd].attributes[attrib] != 0;
194 }
195
196 public:
197
198 bool isRead() const { return testCmdAttrib(IsRead); }
199 bool isWrite() const { return testCmdAttrib(IsWrite); }
200 bool isUpgrade() const { return testCmdAttrib(IsUpgrade); }
201 bool isRequest() const { return testCmdAttrib(IsRequest); }
202 bool isResponse() const { return testCmdAttrib(IsResponse); }
203 bool needsWritable() const { return testCmdAttrib(NeedsWritable); }
204 bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
205 bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
206 bool isEviction() const { return testCmdAttrib(IsEviction); }
207 bool isClean() const { return testCmdAttrib(IsClean); }
208 bool fromCache() const { return testCmdAttrib(FromCache); }
209
210 /**
211 * A writeback is an eviction that carries data.
212 */
213 bool isWriteback() const { return testCmdAttrib(IsEviction) &&
214 testCmdAttrib(HasData); }
215
216 /**
217 * Check if this particular packet type carries payload data. Note
218 * that this does not reflect if the data pointer of the packet is
219 * valid or not.
220 */
221 bool hasData() const { return testCmdAttrib(HasData); }
222 bool isLLSC() const { return testCmdAttrib(IsLlsc); }
223 bool isSWPrefetch() const { return testCmdAttrib(IsSWPrefetch); }
224 bool isHWPrefetch() const { return testCmdAttrib(IsHWPrefetch); }
225 bool isPrefetch() const { return testCmdAttrib(IsSWPrefetch) ||
226 testCmdAttrib(IsHWPrefetch); }
227 bool isError() const { return testCmdAttrib(IsError); }
228 bool isPrint() const { return testCmdAttrib(IsPrint); }
229 bool isFlush() const { return testCmdAttrib(IsFlush); }
230
231 Command
232 responseCommand() const
233 {
234 return commandInfo[cmd].response;
235 }
236
237 /// Return the string to a cmd given by idx.
238 const std::string &toString() const { return commandInfo[cmd].str; }
239 int toInt() const { return (int)cmd; }
240
241 MemCmd(Command _cmd) : cmd(_cmd) { }
242 MemCmd(int _cmd) : cmd((Command)_cmd) { }
243 MemCmd() : cmd(InvalidCmd) { }
244
245 bool operator==(MemCmd c2) const { return (cmd == c2.cmd); }
246 bool operator!=(MemCmd c2) const { return (cmd != c2.cmd); }
247 };
248
249 /**
250 * A Packet is used to encapsulate a transfer between two objects in
251 * the memory system (e.g., the L1 and L2 cache). (In contrast, a
252 * single Request travels all the way from the requester to the
253 * ultimate destination and back, possibly being conveyed by several
254 * different Packets along the way.)
255 */
256 class Packet : public Printable
257 {
258 public:
259 typedef uint32_t FlagsType;
260 typedef ::Flags<FlagsType> Flags;
261
262 private:
263
264 enum : FlagsType {
265 // Flags to transfer across when copying a packet
266 COPY_FLAGS = 0x0000003F,
267
268 // Flags that are used to create reponse packets
269 RESPONDER_FLAGS = 0x00000009,
270
271 // Does this packet have sharers (which means it should not be
272 // considered writable) or not. See setHasSharers below.
273 HAS_SHARERS = 0x00000001,
274
275 // Special control flags
276 /// Special timing-mode atomic snoop for multi-level coherence.
277 EXPRESS_SNOOP = 0x00000002,
278
279 /// Allow a responding cache to inform the cache hierarchy
280 /// that it had a writable copy before responding. See
281 /// setResponderHadWritable below.
282 RESPONDER_HAD_WRITABLE = 0x00000004,
283
284 // Snoop co-ordination flag to indicate that a cache is
285 // responding to a snoop. See setCacheResponding below.
286 CACHE_RESPONDING = 0x00000008,
287
288 // The writeback/writeclean should be propagated further
289 // downstream by the receiver
290 WRITE_THROUGH = 0x00000010,
291
292 // Response co-ordination flag for cache maintenance
293 // operations
294 SATISFIED = 0x00000020,
295
296 /// Are the 'addr' and 'size' fields valid?
297 VALID_ADDR = 0x00000100,
298 VALID_SIZE = 0x00000200,
299
300 /// Is the data pointer set to a value that shouldn't be freed
301 /// when the packet is destroyed?
302 STATIC_DATA = 0x00001000,
303 /// The data pointer points to a value that should be freed when
304 /// the packet is destroyed. The pointer is assumed to be pointing
305 /// to an array, and delete [] is consequently called
306 DYNAMIC_DATA = 0x00002000,
307
308 /// suppress the error if this packet encounters a functional
309 /// access failure.
310 SUPPRESS_FUNC_ERROR = 0x00008000,
311
312 // Signal block present to squash prefetch and cache evict packets
313 // through express snoop flag
314 BLOCK_CACHED = 0x00010000
315 };
316
317 Flags flags;
318
319 public:
320 typedef MemCmd::Command Command;
321
322 /// The command field of the packet.
323 MemCmd cmd;
324
325 const PacketId id;
326
327 /// A pointer to the original request.
328 RequestPtr req;
329
330 private:
331 /**
332 * A pointer to the data being transferred. It can be different
333 * sizes at each level of the hierarchy so it belongs to the
334 * packet, not request. This may or may not be populated when a
335 * responder receives the packet. If not populated memory should
336 * be allocated.
337 */
338 PacketDataPtr data;
339
340 /// The address of the request. This address could be virtual or
341 /// physical, depending on the system configuration.
342 Addr addr;
343
344 /// True if the request targets the secure memory space.
345 bool _isSecure;
346
347 /// The size of the request or transfer.
348 unsigned size;
349
350 /**
351 * Track the bytes found that satisfy a functional read.
352 */
353 std::vector<bool> bytesValid;
354
355 // Quality of Service priority value
356 uint8_t _qosValue;
357
358 public:
359
360 /**
361 * The extra delay from seeing the packet until the header is
362 * transmitted. This delay is used to communicate the crossbar
363 * forwarding latency to the neighbouring object (e.g. a cache)
364 * that actually makes the packet wait. As the delay is relative,
365 * a 32-bit unsigned should be sufficient.
366 */
367 uint32_t headerDelay;
368
369 /**
370 * Keep track of the extra delay incurred by snooping upwards
371 * before sending a request down the memory system. This is used
372 * by the coherent crossbar to account for the additional request
373 * delay.
374 */
375 uint32_t snoopDelay;
376
377 /**
378 * The extra pipelining delay from seeing the packet until the end of
379 * payload is transmitted by the component that provided it (if
380 * any). This includes the header delay. Similar to the header
381 * delay, this is used to make up for the fact that the
382 * crossbar does not make the packet wait. As the delay is
383 * relative, a 32-bit unsigned should be sufficient.
384 */
385 uint32_t payloadDelay;
386
387 /**
388 * A virtual base opaque structure used to hold state associated
389 * with the packet (e.g., an MSHR), specific to a SimObject that
390 * sees the packet. A pointer to this state is returned in the
391 * packet's response so that the SimObject in question can quickly
392 * look up the state needed to process it. A specific subclass
393 * would be derived from this to carry state specific to a
394 * particular sending device.
395 *
396 * As multiple SimObjects may add their SenderState throughout the
397 * memory system, the SenderStates create a stack, where a
398 * SimObject can add a new Senderstate, as long as the
399 * predecessing SenderState is restored when the response comes
400 * back. For this reason, the predecessor should always be
401 * populated with the current SenderState of a packet before
402 * modifying the senderState field in the request packet.
403 */
404 struct SenderState
405 {
406 SenderState* predecessor;
407 SenderState() : predecessor(NULL) {}
408 virtual ~SenderState() {}
409 };
410
411 /**
412 * Object used to maintain state of a PrintReq. The senderState
413 * field of a PrintReq should always be of this type.
414 */
415 class PrintReqState : public SenderState
416 {
417 private:
418 /**
419 * An entry in the label stack.
420 */
421 struct LabelStackEntry
422 {
423 const std::string label;
424 std::string *prefix;
425 bool labelPrinted;
426 LabelStackEntry(const std::string &_label, std::string *_prefix);
427 };
428
429 typedef std::list<LabelStackEntry> LabelStack;
430 LabelStack labelStack;
431
432 std::string *curPrefixPtr;
433
434 public:
435 std::ostream &os;
436 const int verbosity;
437
438 PrintReqState(std::ostream &os, int verbosity = 0);
439 ~PrintReqState();
440
441 /**
442 * Returns the current line prefix.
443 */
444 const std::string &curPrefix() { return *curPrefixPtr; }
445
446 /**
447 * Push a label onto the label stack, and prepend the given
448 * prefix string onto the current prefix. Labels will only be
449 * printed if an object within the label's scope is printed.
450 */
451 void pushLabel(const std::string &lbl,
452 const std::string &prefix = " ");
453
454 /**
455 * Pop a label off the label stack.
456 */
457 void popLabel();
458
459 /**
460 * Print all of the pending unprinted labels on the
461 * stack. Called by printObj(), so normally not called by
462 * users unless bypassing printObj().
463 */
464 void printLabels();
465
466 /**
467 * Print a Printable object to os, because it matched the
468 * address on a PrintReq.
469 */
470 void printObj(Printable *obj);
471 };
472
473 /**
474 * This packet's sender state. Devices should use dynamic_cast<>
475 * to cast to the state appropriate to the sender. The intent of
476 * this variable is to allow a device to attach extra information
477 * to a request. A response packet must return the sender state
478 * that was attached to the original request (even if a new packet
479 * is created).
480 */
481 SenderState *senderState;
482
483 /**
484 * Push a new sender state to the packet and make the current
485 * sender state the predecessor of the new one. This should be
486 * prefered over direct manipulation of the senderState member
487 * variable.
488 *
489 * @param sender_state SenderState to push at the top of the stack
490 */
491 void pushSenderState(SenderState *sender_state);
492
493 /**
494 * Pop the top of the state stack and return a pointer to it. This
495 * assumes the current sender state is not NULL. This should be
496 * preferred over direct manipulation of the senderState member
497 * variable.
498 *
499 * @return The current top of the stack
500 */
501 SenderState *popSenderState();
502
503 /**
504 * Go through the sender state stack and return the first instance
505 * that is of type T (as determined by a dynamic_cast). If there
506 * is no sender state of type T, NULL is returned.
507 *
508 * @return The topmost state of type T
509 */
510 template <typename T>
511 T * findNextSenderState() const
512 {
513 T *t = NULL;
514 SenderState* sender_state = senderState;
515 while (t == NULL && sender_state != NULL) {
516 t = dynamic_cast<T*>(sender_state);
517 sender_state = sender_state->predecessor;
518 }
519 return t;
520 }
521
522 /// Return the string name of the cmd field (for debugging and
523 /// tracing).
524 const std::string &cmdString() const { return cmd.toString(); }
525
526 /// Return the index of this command.
527 inline int cmdToIndex() const { return cmd.toInt(); }
528
529 bool isRead() const { return cmd.isRead(); }
530 bool isWrite() const { return cmd.isWrite(); }
531 bool isUpgrade() const { return cmd.isUpgrade(); }
532 bool isRequest() const { return cmd.isRequest(); }
533 bool isResponse() const { return cmd.isResponse(); }
534 bool needsWritable() const
535 {
536 // we should never check if a response needsWritable, the
537 // request has this flag, and for a response we should rather
538 // look at the hasSharers flag (if not set, the response is to
539 // be considered writable)
540 assert(isRequest());
541 return cmd.needsWritable();
542 }
543 bool needsResponse() const { return cmd.needsResponse(); }
544 bool isInvalidate() const { return cmd.isInvalidate(); }
545 bool isEviction() const { return cmd.isEviction(); }
546 bool isClean() const { return cmd.isClean(); }
547 bool fromCache() const { return cmd.fromCache(); }
548 bool isWriteback() const { return cmd.isWriteback(); }
549 bool hasData() const { return cmd.hasData(); }
550 bool hasRespData() const
551 {
552 MemCmd resp_cmd = cmd.responseCommand();
553 return resp_cmd.hasData();
554 }
555 bool isLLSC() const { return cmd.isLLSC(); }
556 bool isError() const { return cmd.isError(); }
557 bool isPrint() const { return cmd.isPrint(); }
558 bool isFlush() const { return cmd.isFlush(); }
559
560 bool isWholeLineWrite(unsigned blk_size)
561 {
562 return (cmd == MemCmd::WriteReq || cmd == MemCmd::WriteLineReq) &&
563 getOffset(blk_size) == 0 && getSize() == blk_size;
564 }
565
566 //@{
567 /// Snoop flags
568 /**
569 * Set the cacheResponding flag. This is used by the caches to
570 * signal another cache that they are responding to a request. A
571 * cache will only respond to snoops if it has the line in either
572 * Modified or Owned state. Note that on snoop hits we always pass
573 * the line as Modified and never Owned. In the case of an Owned
574 * line we proceed to invalidate all other copies.
575 *
576 * On a cache fill (see Cache::handleFill), we check hasSharers
577 * first, ignoring the cacheResponding flag if hasSharers is set.
578 * A line is consequently allocated as:
579 *
580 * hasSharers cacheResponding state
581 * true false Shared
582 * true true Shared
583 * false false Exclusive
584 * false true Modified
585 */
586 void setCacheResponding()
587 {
588 assert(isRequest());
589 assert(!flags.isSet(CACHE_RESPONDING));
590 flags.set(CACHE_RESPONDING);
591 }
592 bool cacheResponding() const { return flags.isSet(CACHE_RESPONDING); }
593 /**
594 * On fills, the hasSharers flag is used by the caches in
595 * combination with the cacheResponding flag, as clarified
596 * above. If the hasSharers flag is not set, the packet is passing
597 * writable. Thus, a response from a memory passes the line as
598 * writable by default.
599 *
600 * The hasSharers flag is also used by upstream caches to inform a
601 * downstream cache that they have the block (by calling
602 * setHasSharers on snoop request packets that hit in upstream
603 * cachs tags or MSHRs). If the snoop packet has sharers, a
604 * downstream cache is prevented from passing a dirty line upwards
605 * if it was not explicitly asked for a writable copy. See
606 * Cache::satisfyCpuSideRequest.
607 *
608 * The hasSharers flag is also used on writebacks, in
609 * combination with the WritbackClean or WritebackDirty commands,
610 * to allocate the block downstream either as:
611 *
612 * command hasSharers state
613 * WritebackDirty false Modified
614 * WritebackDirty true Owned
615 * WritebackClean false Exclusive
616 * WritebackClean true Shared
617 */
618 void setHasSharers() { flags.set(HAS_SHARERS); }
619 bool hasSharers() const { return flags.isSet(HAS_SHARERS); }
620 //@}
621
622 /**
623 * The express snoop flag is used for two purposes. Firstly, it is
624 * used to bypass flow control for normal (non-snoop) requests
625 * going downstream in the memory system. In cases where a cache
626 * is responding to a snoop from another cache (it had a dirty
627 * line), but the line is not writable (and there are possibly
628 * other copies), the express snoop flag is set by the downstream
629 * cache to invalidate all other copies in zero time. Secondly,
630 * the express snoop flag is also set to be able to distinguish
631 * snoop packets that came from a downstream cache, rather than
632 * snoop packets from neighbouring caches.
633 */
634 void setExpressSnoop() { flags.set(EXPRESS_SNOOP); }
635 bool isExpressSnoop() const { return flags.isSet(EXPRESS_SNOOP); }
636
637 /**
638 * On responding to a snoop request (which only happens for
639 * Modified or Owned lines), make sure that we can transform an
640 * Owned response to a Modified one. If this flag is not set, the
641 * responding cache had the line in the Owned state, and there are
642 * possibly other Shared copies in the memory system. A downstream
643 * cache helps in orchestrating the invalidation of these copies
644 * by sending out the appropriate express snoops.
645 */
646 void setResponderHadWritable()
647 {
648 assert(cacheResponding());
649 assert(!responderHadWritable());
650 flags.set(RESPONDER_HAD_WRITABLE);
651 }
652 bool responderHadWritable() const
653 { return flags.isSet(RESPONDER_HAD_WRITABLE); }
654
655 /**
656 * Copy the reponse flags from an input packet to this packet. The
657 * reponse flags determine whether a responder has been found and
658 * the state at which the block will be at the destination.
659 *
660 * @pkt The packet that we will copy flags from
661 */
662 void copyResponderFlags(const PacketPtr pkt);
663
664 /**
665 * A writeback/writeclean cmd gets propagated further downstream
666 * by the receiver when the flag is set.
667 */
668 void setWriteThrough()
669 {
670 assert(cmd.isWrite() &&
671 (cmd.isEviction() || cmd == MemCmd::WriteClean));
672 flags.set(WRITE_THROUGH);
673 }
674 void clearWriteThrough() { flags.clear(WRITE_THROUGH); }
675 bool writeThrough() const { return flags.isSet(WRITE_THROUGH); }
676
677 /**
678 * Set when a request hits in a cache and the cache is not going
679 * to respond. This is used by the crossbar to coordinate
680 * responses for cache maintenance operations.
681 */
682 void setSatisfied()
683 {
684 assert(cmd.isClean());
685 assert(!flags.isSet(SATISFIED));
686 flags.set(SATISFIED);
687 }
688 bool satisfied() const { return flags.isSet(SATISFIED); }
689
690 void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); }
691 bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); }
692 void setBlockCached() { flags.set(BLOCK_CACHED); }
693 bool isBlockCached() const { return flags.isSet(BLOCK_CACHED); }
694 void clearBlockCached() { flags.clear(BLOCK_CACHED); }
695
696 /**
697 * QoS Value getter
698 * Returns 0 if QoS value was never set (constructor default).
699 *
700 * @return QoS priority value of the packet
701 */
702 inline uint8_t qosValue() const { return _qosValue; }
703
704 /**
705 * QoS Value setter
706 * Interface for setting QoS priority value of the packet.
707 *
708 * @param qos_value QoS priority value
709 */
710 inline void qosValue(const uint8_t qos_value)
711 { _qosValue = qos_value; }
712
713 inline MasterID masterId() const { return req->masterId(); }
714
715 // Network error conditions... encapsulate them as methods since
716 // their encoding keeps changing (from result field to command
717 // field, etc.)
718 void
719 setBadAddress()
720 {
721 assert(isResponse());
722 cmd = MemCmd::BadAddressError;
723 }
724
725 void copyError(Packet *pkt) { assert(pkt->isError()); cmd = pkt->cmd; }
726
727 Addr getAddr() const { assert(flags.isSet(VALID_ADDR)); return addr; }
728 /**
729 * Update the address of this packet mid-transaction. This is used
730 * by the address mapper to change an already set address to a new
731 * one based on the system configuration. It is intended to remap
732 * an existing address, so it asserts that the current address is
733 * valid.
734 */
735 void setAddr(Addr _addr) { assert(flags.isSet(VALID_ADDR)); addr = _addr; }
736
737 unsigned getSize() const { assert(flags.isSet(VALID_SIZE)); return size; }
738
739 /**
740 * Get address range to which this packet belongs.
741 *
742 * @return Address range of this packet.
743 */
744 AddrRange getAddrRange() const;
745
746 Addr getOffset(unsigned int blk_size) const
747 {
748 return getAddr() & Addr(blk_size - 1);
749 }
750
751 Addr getBlockAddr(unsigned int blk_size) const
752 {
753 return getAddr() & ~(Addr(blk_size - 1));
754 }
755
756 bool isSecure() const
757 {
758 assert(flags.isSet(VALID_ADDR));
759 return _isSecure;
760 }
761
762 /**
763 * Accessor function to atomic op.
764 */
765 AtomicOpFunctor *getAtomicOp() const { return req->getAtomicOpFunctor(); }
766 bool isAtomicOp() const { return req->isAtomic(); }
767
768 /**
769 * It has been determined that the SC packet should successfully update
770 * memory. Therefore, convert this SC packet to a normal write.
771 */
772 void
773 convertScToWrite()
774 {
775 assert(isLLSC());
776 assert(isWrite());
777 cmd = MemCmd::WriteReq;
778 }
779
780 /**
781 * When ruby is in use, Ruby will monitor the cache line and the
782 * phys memory should treat LL ops as normal reads.
783 */
784 void
785 convertLlToRead()
786 {
787 assert(isLLSC());
788 assert(isRead());
789 cmd = MemCmd::ReadReq;
790 }
791
792 /**
793 * Constructor. Note that a Request object must be constructed
794 * first, but the Requests's physical address and size fields need
795 * not be valid. The command must be supplied.
796 */
797 Packet(const RequestPtr &_req, MemCmd _cmd)
798 : cmd(_cmd), id((PacketId)_req.get()), req(_req),
799 data(nullptr), addr(0), _isSecure(false), size(0),
800 _qosValue(0), headerDelay(0), snoopDelay(0),
801 payloadDelay(0), senderState(NULL)
802 {
803 if (req->hasPaddr()) {
804 addr = req->getPaddr();
805 flags.set(VALID_ADDR);
806 _isSecure = req->isSecure();
807 }
808 if (req->hasSize()) {
809 size = req->getSize();
810 flags.set(VALID_SIZE);
811 }
812 }
813
814 /**
815 * Alternate constructor if you are trying to create a packet with
816 * a request that is for a whole block, not the address from the
817 * req. this allows for overriding the size/addr of the req.
818 */
819 Packet(const RequestPtr &_req, MemCmd _cmd, int _blkSize, PacketId _id = 0)
820 : cmd(_cmd), id(_id ? _id : (PacketId)_req.get()), req(_req),
821 data(nullptr), addr(0), _isSecure(false),
822 _qosValue(0), headerDelay(0),
823 snoopDelay(0), payloadDelay(0), senderState(NULL)
824 {
825 if (req->hasPaddr()) {
826 addr = req->getPaddr() & ~(_blkSize - 1);
827 flags.set(VALID_ADDR);
828 _isSecure = req->isSecure();
829 }
830 size = _blkSize;
831 flags.set(VALID_SIZE);
832 }
833
834 /**
835 * Alternate constructor for copying a packet. Copy all fields
836 * *except* if the original packet's data was dynamic, don't copy
837 * that, as we can't guarantee that the new packet's lifetime is
838 * less than that of the original packet. In this case the new
839 * packet should allocate its own data.
840 */
841 Packet(const PacketPtr pkt, bool clear_flags, bool alloc_data)
842 : cmd(pkt->cmd), id(pkt->id), req(pkt->req),
843 data(nullptr),
844 addr(pkt->addr), _isSecure(pkt->_isSecure), size(pkt->size),
845 bytesValid(pkt->bytesValid),
846 _qosValue(pkt->qosValue()),
847 headerDelay(pkt->headerDelay),
848 snoopDelay(0),
849 payloadDelay(pkt->payloadDelay),
850 senderState(pkt->senderState)
851 {
852 if (!clear_flags)
853 flags.set(pkt->flags & COPY_FLAGS);
854
855 flags.set(pkt->flags & (VALID_ADDR|VALID_SIZE));
856
857 // should we allocate space for data, or not, the express
858 // snoops do not need to carry any data as they only serve to
859 // co-ordinate state changes
860 if (alloc_data) {
861 // even if asked to allocate data, if the original packet
862 // holds static data, then the sender will not be doing
863 // any memcpy on receiving the response, thus we simply
864 // carry the pointer forward
865 if (pkt->flags.isSet(STATIC_DATA)) {
866 data = pkt->data;
867 flags.set(STATIC_DATA);
868 } else {
869 allocate();
870 }
871 }
872 }
873
874 /**
875 * Generate the appropriate read MemCmd based on the Request flags.
876 */
877 static MemCmd
878 makeReadCmd(const RequestPtr &req)
879 {
880 if (req->isLLSC())
881 return MemCmd::LoadLockedReq;
882 else if (req->isPrefetchEx())
883 return MemCmd::SoftPFExReq;
884 else if (req->isPrefetch())
885 return MemCmd::SoftPFReq;
886 else
887 return MemCmd::ReadReq;
888 }
889
890 /**
891 * Generate the appropriate write MemCmd based on the Request flags.
892 */
893 static MemCmd
894 makeWriteCmd(const RequestPtr &req)
895 {
896 if (req->isLLSC())
897 return MemCmd::StoreCondReq;
898 else if (req->isSwap() || req->isAtomic())
899 return MemCmd::SwapReq;
900 else if (req->isCacheInvalidate()) {
901 return req->isCacheClean() ? MemCmd::CleanInvalidReq :
902 MemCmd::InvalidateReq;
903 } else if (req->isCacheClean()) {
904 return MemCmd::CleanSharedReq;
905 } else
906 return MemCmd::WriteReq;
907 }
908
909 /**
910 * Constructor-like methods that return Packets based on Request objects.
911 * Fine-tune the MemCmd type if it's not a vanilla read or write.
912 */
913 static PacketPtr
914 createRead(const RequestPtr &req)
915 {
916 return new Packet(req, makeReadCmd(req));
917 }
918
919 static PacketPtr
920 createWrite(const RequestPtr &req)
921 {
922 return new Packet(req, makeWriteCmd(req));
923 }
924
925 /**
926 * clean up packet variables
927 */
928 ~Packet()
929 {
930 deleteData();
931 }
932
933 /**
934 * Take a request packet and modify it in place to be suitable for
935 * returning as a response to that request.
936 */
937 void
938 makeResponse()
939 {
940 assert(needsResponse());
941 assert(isRequest());
942 cmd = cmd.responseCommand();
943
944 // responses are never express, even if the snoop that
945 // triggered them was
946 flags.clear(EXPRESS_SNOOP);
947 }
948
949 void
950 makeAtomicResponse()
951 {
952 makeResponse();
953 }
954
955 void
956 makeTimingResponse()
957 {
958 makeResponse();
959 }
960
961 void
962 setFunctionalResponseStatus(bool success)
963 {
964 if (!success) {
965 if (isWrite()) {
966 cmd = MemCmd::FunctionalWriteError;
967 } else {
968 cmd = MemCmd::FunctionalReadError;
969 }
970 }
971 }
972
973 void
974 setSize(unsigned size)
975 {
976 assert(!flags.isSet(VALID_SIZE));
977
978 this->size = size;
979 flags.set(VALID_SIZE);
980 }
981
982 /**
983 * Check if packet corresponds to a given block-aligned address and
984 * address space.
985 *
986 * @param addr The address to compare against.
987 * @param is_secure Whether addr belongs to the secure address space.
988 * @param blk_size Block size in bytes.
989 * @return Whether packet matches description.
990 */
991 bool matchBlockAddr(const Addr addr, const bool is_secure,
992 const int blk_size) const;
993
994 /**
995 * Check if this packet refers to the same block-aligned address and
996 * address space as another packet.
997 *
998 * @param pkt The packet to compare against.
999 * @param blk_size Block size in bytes.
1000 * @return Whether packet matches description.
1001 */
1002 bool matchBlockAddr(const PacketPtr pkt, const int blk_size) const;
1003
1004 /**
1005 * Check if packet corresponds to a given address and address space.
1006 *
1007 * @param addr The address to compare against.
1008 * @param is_secure Whether addr belongs to the secure address space.
1009 * @return Whether packet matches description.
1010 */
1011 bool matchAddr(const Addr addr, const bool is_secure) const;
1012
1013 /**
1014 * Check if this packet refers to the same address and address space as
1015 * another packet.
1016 *
1017 * @param pkt The packet to compare against.
1018 * @return Whether packet matches description.
1019 */
1020 bool matchAddr(const PacketPtr pkt) const;
1021
1022 public:
1023 /**
1024 * @{
1025 * @name Data accessor mehtods
1026 */
1027
1028 /**
1029 * Set the data pointer to the following value that should not be
1030 * freed. Static data allows us to do a single memcpy even if
1031 * multiple packets are required to get from source to destination
1032 * and back. In essence the pointer is set calling dataStatic on
1033 * the original packet, and whenever this packet is copied and
1034 * forwarded the same pointer is passed on. When a packet
1035 * eventually reaches the destination holding the data, it is
1036 * copied once into the location originally set. On the way back
1037 * to the source, no copies are necessary.
1038 */
1039 template <typename T>
1040 void
1041 dataStatic(T *p)
1042 {
1043 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
1044 data = (PacketDataPtr)p;
1045 flags.set(STATIC_DATA);
1046 }
1047
1048 /**
1049 * Set the data pointer to the following value that should not be
1050 * freed. This version of the function allows the pointer passed
1051 * to us to be const. To avoid issues down the line we cast the
1052 * constness away, the alternative would be to keep both a const
1053 * and non-const data pointer and cleverly choose between
1054 * them. Note that this is only allowed for static data.
1055 */
1056 template <typename T>
1057 void
1058 dataStaticConst(const T *p)
1059 {
1060 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
1061 data = const_cast<PacketDataPtr>(p);
1062 flags.set(STATIC_DATA);
1063 }
1064
1065 /**
1066 * Set the data pointer to a value that should have delete []
1067 * called on it. Dynamic data is local to this packet, and as the
1068 * packet travels from source to destination, forwarded packets
1069 * will allocate their own data. When a packet reaches the final
1070 * destination it will populate the dynamic data of that specific
1071 * packet, and on the way back towards the source, memcpy will be
1072 * invoked in every step where a new packet was created e.g. in
1073 * the caches. Ultimately when the response reaches the source a
1074 * final memcpy is needed to extract the data from the packet
1075 * before it is deallocated.
1076 */
1077 template <typename T>
1078 void
1079 dataDynamic(T *p)
1080 {
1081 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
1082 data = (PacketDataPtr)p;
1083 flags.set(DYNAMIC_DATA);
1084 }
1085
1086 /**
1087 * get a pointer to the data ptr.
1088 */
1089 template <typename T>
1090 T*
1091 getPtr()
1092 {
1093 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA));
1094 assert(!isMaskedWrite());
1095 return (T*)data;
1096 }
1097
1098 template <typename T>
1099 const T*
1100 getConstPtr() const
1101 {
1102 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA));
1103 return (const T*)data;
1104 }
1105
1106 /**
1107 * Get the data in the packet byte swapped from big endian to
1108 * host endian.
1109 */
1110 template <typename T>
1111 T getBE() const;
1112
1113 /**
1114 * Get the data in the packet byte swapped from little endian to
1115 * host endian.
1116 */
1117 template <typename T>
1118 T getLE() const;
1119
1120 /**
1121 * Get the data in the packet byte swapped from the specified
1122 * endianness.
1123 */
1124 template <typename T>
1125 T get(ByteOrder endian) const;
1126
1127 #if THE_ISA != NULL_ISA
1128 /**
1129 * Get the data in the packet byte swapped from guest to host
1130 * endian.
1131 */
1132 template <typename T>
1133 T get() const
1134 M5_DEPRECATED_MSG("The memory system should be ISA independent.");
1135 #endif
1136
1137 /** Set the value in the data pointer to v as big endian. */
1138 template <typename T>
1139 void setBE(T v);
1140
1141 /** Set the value in the data pointer to v as little endian. */
1142 template <typename T>
1143 void setLE(T v);
1144
1145 /**
1146 * Set the value in the data pointer to v using the specified
1147 * endianness.
1148 */
1149 template <typename T>
1150 void set(T v, ByteOrder endian);
1151
1152 #if THE_ISA != NULL_ISA
1153 /** Set the value in the data pointer to v as guest endian. */
1154 template <typename T>
1155 void set(T v)
1156 M5_DEPRECATED_MSG("The memory system should be ISA independent.");
1157 #endif
1158
1159 /**
1160 * Get the data in the packet byte swapped from the specified
1161 * endianness and zero-extended to 64 bits.
1162 */
1163 uint64_t getUintX(ByteOrder endian) const;
1164
1165 /**
1166 * Set the value in the word w after truncating it to the length
1167 * of the packet and then byteswapping it to the desired
1168 * endianness.
1169 */
1170 void setUintX(uint64_t w, ByteOrder endian);
1171
1172 /**
1173 * Copy data into the packet from the provided pointer.
1174 */
1175 void
1176 setData(const uint8_t *p)
1177 {
1178 // we should never be copying data onto itself, which means we
1179 // must idenfity packets with static data, as they carry the
1180 // same pointer from source to destination and back
1181 assert(p != getPtr<uint8_t>() || flags.isSet(STATIC_DATA));
1182
1183 if (p != getPtr<uint8_t>()) {
1184 // for packet with allocated dynamic data, we copy data from
1185 // one to the other, e.g. a forwarded response to a response
1186 std::memcpy(getPtr<uint8_t>(), p, getSize());
1187 }
1188 }
1189
1190 /**
1191 * Copy data into the packet from the provided block pointer,
1192 * which is aligned to the given block size.
1193 */
1194 void
1195 setDataFromBlock(const uint8_t *blk_data, int blkSize)
1196 {
1197 setData(blk_data + getOffset(blkSize));
1198 }
1199
1200 /**
1201 * Copy data from the packet to the memory at the provided pointer.
1202 * @param p Pointer to which data will be copied.
1203 */
1204 void
1205 writeData(uint8_t *p) const
1206 {
1207 if (!isMaskedWrite()) {
1208 std::memcpy(p, getConstPtr<uint8_t>(), getSize());
1209 } else {
1210 assert(req->getByteEnable().size() == getSize());
1211 // Write only the enabled bytes
1212 const uint8_t *base = getConstPtr<uint8_t>();
1213 for (int i = 0; i < getSize(); i++) {
1214 if (req->getByteEnable()[i]) {
1215 p[i] = *(base + i);
1216 }
1217 // Disabled bytes stay untouched
1218 }
1219 }
1220 }
1221
1222 /**
1223 * Copy data from the packet to the provided block pointer, which
1224 * is aligned to the given block size.
1225 * @param blk_data Pointer to block to which data will be copied.
1226 * @param blkSize Block size in bytes.
1227 */
1228 void
1229 writeDataToBlock(uint8_t *blk_data, int blkSize) const
1230 {
1231 writeData(blk_data + getOffset(blkSize));
1232 }
1233
1234 /**
1235 * delete the data pointed to in the data pointer. Ok to call to
1236 * matter how data was allocted.
1237 */
1238 void
1239 deleteData()
1240 {
1241 if (flags.isSet(DYNAMIC_DATA))
1242 delete [] data;
1243
1244 flags.clear(STATIC_DATA|DYNAMIC_DATA);
1245 data = NULL;
1246 }
1247
1248 /** Allocate memory for the packet. */
1249 void
1250 allocate()
1251 {
1252 // if either this command or the response command has a data
1253 // payload, actually allocate space
1254 if (hasData() || hasRespData()) {
1255 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
1256 flags.set(DYNAMIC_DATA);
1257 data = new uint8_t[getSize()];
1258 }
1259 }
1260
1261 /** @} */
1262
1263 /** Get the data in the packet without byte swapping. */
1264 template <typename T>
1265 T getRaw() const;
1266
1267 /** Set the value in the data pointer to v without byte swapping. */
1268 template <typename T>
1269 void setRaw(T v);
1270
1271 public:
1272 /**
1273 * Check a functional request against a memory value stored in
1274 * another packet (i.e. an in-transit request or
1275 * response). Returns true if the current packet is a read, and
1276 * the other packet provides the data, which is then copied to the
1277 * current packet. If the current packet is a write, and the other
1278 * packet intersects this one, then we update the data
1279 * accordingly.
1280 */
1281 bool
1282 trySatisfyFunctional(PacketPtr other)
1283 {
1284 if (other->isMaskedWrite()) {
1285 // Do not forward data if overlapping with a masked write
1286 if (_isSecure == other->isSecure() &&
1287 getAddr() <= (other->getAddr() + other->getSize() - 1) &&
1288 other->getAddr() <= (getAddr() + getSize() - 1)) {
1289 warn("Trying to check against a masked write, skipping."
1290 " (addr: 0x%x, other addr: 0x%x)", getAddr(),
1291 other->getAddr());
1292 }
1293 return false;
1294 }
1295 // all packets that are carrying a payload should have a valid
1296 // data pointer
1297 return trySatisfyFunctional(other, other->getAddr(), other->isSecure(),
1298 other->getSize(),
1299 other->hasData() ?
1300 other->getPtr<uint8_t>() : NULL);
1301 }
1302
1303 /**
1304 * Does the request need to check for cached copies of the same block
1305 * in the memory hierarchy above.
1306 **/
1307 bool
1308 mustCheckAbove() const
1309 {
1310 return cmd == MemCmd::HardPFReq || isEviction();
1311 }
1312
1313 /**
1314 * Is this packet a clean eviction, including both actual clean
1315 * evict packets, but also clean writebacks.
1316 */
1317 bool
1318 isCleanEviction() const
1319 {
1320 return cmd == MemCmd::CleanEvict || cmd == MemCmd::WritebackClean;
1321 }
1322
1323 bool
1324 isMaskedWrite() const
1325 {
1326 return (cmd == MemCmd::WriteReq && !req->getByteEnable().empty());
1327 }
1328
1329 /**
1330 * Check a functional request against a memory value represented
1331 * by a base/size pair and an associated data array. If the
1332 * current packet is a read, it may be satisfied by the memory
1333 * value. If the current packet is a write, it may update the
1334 * memory value.
1335 */
1336 bool
1337 trySatisfyFunctional(Printable *obj, Addr base, bool is_secure, int size,
1338 uint8_t *_data);
1339
1340 /**
1341 * Push label for PrintReq (safe to call unconditionally).
1342 */
1343 void
1344 pushLabel(const std::string &lbl)
1345 {
1346 if (isPrint())
1347 safe_cast<PrintReqState*>(senderState)->pushLabel(lbl);
1348 }
1349
1350 /**
1351 * Pop label for PrintReq (safe to call unconditionally).
1352 */
1353 void
1354 popLabel()
1355 {
1356 if (isPrint())
1357 safe_cast<PrintReqState*>(senderState)->popLabel();
1358 }
1359
1360 void print(std::ostream &o, int verbosity = 0,
1361 const std::string &prefix = "") const;
1362
1363 /**
1364 * A no-args wrapper of print(std::ostream...)
1365 * meant to be invoked from DPRINTFs
1366 * avoiding string overheads in fast mode
1367 * @return string with the request's type and start<->end addresses
1368 */
1369 std::string print() const;
1370 };
1371
1372 #endif //__MEM_PACKET_HH