2 * Copyright (c) 2012-2017 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Ron Dreslinski
49 * Declaration of the Packet class.
52 #ifndef __MEM_PACKET_HH__
53 #define __MEM_PACKET_HH__
59 #include "base/cast.hh"
60 #include "base/compiler.hh"
61 #include "base/flags.hh"
62 #include "base/logging.hh"
63 #include "base/printable.hh"
64 #include "base/types.hh"
65 #include "mem/request.hh"
66 #include "sim/core.hh"
69 typedef Packet *PacketPtr;
70 typedef uint8_t* PacketDataPtr;
71 typedef std::list<PacketPtr> PacketList;
79 * List of all commands associated with a packet.
86 ReadRespWithInvalidate,
91 WriteClean, // writes dirty data below without evicting
99 SCUpgradeReq, // Special "weak" upgrade for StoreCond
101 SCUpgradeFailReq, // Failed SCUpgradeReq in MSHR (never sent)
102 UpgradeFailResp, // Valid for SCUpgradeReq only
109 StoreCondFailReq, // Failed StoreCondReq in MSHR (never sent)
118 // @TODO these should be classified as responses rather than
119 // requests; coding them as requests initially for backwards
121 InvalidDestError, // packet dest field invalid
122 BadAddressError, // memory address invalid
123 FunctionalReadError, // unable to fulfill functional read
124 FunctionalWriteError, // unable to fulfill functional write
125 // Fake simulator-only commands
126 PrintReq, // Print state matching address
127 FlushReq, //request for a cache flush
128 InvalidateReq, // request for address to be invalidated
135 * List of command attributes.
139 IsRead, //!< Data flows from responder to requester
140 IsWrite, //!< Data flows from requester to responder
143 NeedsWritable, //!< Requires writable copy to complete in-cache
144 IsRequest, //!< Issued by requester
145 IsResponse, //!< Issue by responder
146 NeedsResponse, //!< Requester needs response from target
150 IsLlsc, //!< Alpha/MIPS LL or SC access
151 HasData, //!< There is an associated payload
152 IsError, //!< Error response
153 IsPrint, //!< Print state matching address (for debugging)
154 IsFlush, //!< Flush the address from caches
155 FromCache, //!< Request originated from a caching agent
156 NUM_COMMAND_ATTRIBUTES
160 * Structure that defines attributes and other data associated
165 /// Set of attribute flags.
166 const std::bitset<NUM_COMMAND_ATTRIBUTES> attributes;
167 /// Corresponding response for requests; InvalidCmd if no
168 /// response is applicable.
169 const Command response;
170 /// String representation (for printing)
171 const std::string str;
174 /// Array to map Command enum to associated info.
175 static const CommandInfo commandInfo[];
182 testCmdAttrib(MemCmd::Attribute attrib) const
184 return commandInfo[cmd].attributes[attrib] != 0;
189 bool isRead() const { return testCmdAttrib(IsRead); }
190 bool isWrite() const { return testCmdAttrib(IsWrite); }
191 bool isUpgrade() const { return testCmdAttrib(IsUpgrade); }
192 bool isRequest() const { return testCmdAttrib(IsRequest); }
193 bool isResponse() const { return testCmdAttrib(IsResponse); }
194 bool needsWritable() const { return testCmdAttrib(NeedsWritable); }
195 bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
196 bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
197 bool isEviction() const { return testCmdAttrib(IsEviction); }
198 bool fromCache() const { return testCmdAttrib(FromCache); }
201 * A writeback is an eviction that carries data.
203 bool isWriteback() const { return testCmdAttrib(IsEviction) &&
204 testCmdAttrib(HasData); }
207 * Check if this particular packet type carries payload data. Note
208 * that this does not reflect if the data pointer of the packet is
211 bool hasData() const { return testCmdAttrib(HasData); }
212 bool isLLSC() const { return testCmdAttrib(IsLlsc); }
213 bool isSWPrefetch() const { return testCmdAttrib(IsSWPrefetch); }
214 bool isHWPrefetch() const { return testCmdAttrib(IsHWPrefetch); }
215 bool isPrefetch() const { return testCmdAttrib(IsSWPrefetch) ||
216 testCmdAttrib(IsHWPrefetch); }
217 bool isError() const { return testCmdAttrib(IsError); }
218 bool isPrint() const { return testCmdAttrib(IsPrint); }
219 bool isFlush() const { return testCmdAttrib(IsFlush); }
222 responseCommand() const
224 return commandInfo[cmd].response;
227 /// Return the string to a cmd given by idx.
228 const std::string &toString() const { return commandInfo[cmd].str; }
229 int toInt() const { return (int)cmd; }
231 MemCmd(Command _cmd) : cmd(_cmd) { }
232 MemCmd(int _cmd) : cmd((Command)_cmd) { }
233 MemCmd() : cmd(InvalidCmd) { }
235 bool operator==(MemCmd c2) const { return (cmd == c2.cmd); }
236 bool operator!=(MemCmd c2) const { return (cmd != c2.cmd); }
240 * A Packet is used to encapsulate a transfer between two objects in
241 * the memory system (e.g., the L1 and L2 cache). (In contrast, a
242 * single Request travels all the way from the requester to the
243 * ultimate destination and back, possibly being conveyed by several
244 * different Packets along the way.)
246 class Packet : public Printable
249 typedef uint32_t FlagsType;
250 typedef ::Flags<FlagsType> Flags;
255 // Flags to transfer across when copying a packet
256 COPY_FLAGS = 0x0000000F,
258 // Does this packet have sharers (which means it should not be
259 // considered writable) or not. See setHasSharers below.
260 HAS_SHARERS = 0x00000001,
262 // Special control flags
263 /// Special timing-mode atomic snoop for multi-level coherence.
264 EXPRESS_SNOOP = 0x00000002,
266 /// Allow a responding cache to inform the cache hierarchy
267 /// that it had a writable copy before responding. See
268 /// setResponderHadWritable below.
269 RESPONDER_HAD_WRITABLE = 0x00000004,
271 // Snoop co-ordination flag to indicate that a cache is
272 // responding to a snoop. See setCacheResponding below.
273 CACHE_RESPONDING = 0x00000008,
275 /// Are the 'addr' and 'size' fields valid?
276 VALID_ADDR = 0x00000100,
277 VALID_SIZE = 0x00000200,
279 /// Is the data pointer set to a value that shouldn't be freed
280 /// when the packet is destroyed?
281 STATIC_DATA = 0x00001000,
282 /// The data pointer points to a value that should be freed when
283 /// the packet is destroyed. The pointer is assumed to be pointing
284 /// to an array, and delete [] is consequently called
285 DYNAMIC_DATA = 0x00002000,
287 /// suppress the error if this packet encounters a functional
289 SUPPRESS_FUNC_ERROR = 0x00008000,
291 // Signal block present to squash prefetch and cache evict packets
292 // through express snoop flag
293 BLOCK_CACHED = 0x00010000
299 typedef MemCmd::Command Command;
301 /// The command field of the packet.
304 /// A pointer to the original request.
305 const RequestPtr req;
309 * A pointer to the data being transfered. It can be differnt
310 * sizes at each level of the heirarchy so it belongs in the
311 * packet, not request. This may or may not be populated when a
312 * responder recieves the packet. If not populated it memory should
317 /// The address of the request. This address could be virtual or
318 /// physical, depending on the system configuration.
321 /// True if the request targets the secure memory space.
324 /// The size of the request or transfer.
328 * Track the bytes found that satisfy a functional read.
330 std::vector<bool> bytesValid;
335 * The extra delay from seeing the packet until the header is
336 * transmitted. This delay is used to communicate the crossbar
337 * forwarding latency to the neighbouring object (e.g. a cache)
338 * that actually makes the packet wait. As the delay is relative,
339 * a 32-bit unsigned should be sufficient.
341 uint32_t headerDelay;
344 * Keep track of the extra delay incurred by snooping upwards
345 * before sending a request down the memory system. This is used
346 * by the coherent crossbar to account for the additional request
352 * The extra pipelining delay from seeing the packet until the end of
353 * payload is transmitted by the component that provided it (if
354 * any). This includes the header delay. Similar to the header
355 * delay, this is used to make up for the fact that the
356 * crossbar does not make the packet wait. As the delay is
357 * relative, a 32-bit unsigned should be sufficient.
359 uint32_t payloadDelay;
362 * A virtual base opaque structure used to hold state associated
363 * with the packet (e.g., an MSHR), specific to a MemObject that
364 * sees the packet. A pointer to this state is returned in the
365 * packet's response so that the MemObject in question can quickly
366 * look up the state needed to process it. A specific subclass
367 * would be derived from this to carry state specific to a
368 * particular sending device.
370 * As multiple MemObjects may add their SenderState throughout the
371 * memory system, the SenderStates create a stack, where a
372 * MemObject can add a new Senderstate, as long as the
373 * predecessing SenderState is restored when the response comes
374 * back. For this reason, the predecessor should always be
375 * populated with the current SenderState of a packet before
376 * modifying the senderState field in the request packet.
380 SenderState* predecessor;
381 SenderState() : predecessor(NULL) {}
382 virtual ~SenderState() {}
386 * Object used to maintain state of a PrintReq. The senderState
387 * field of a PrintReq should always be of this type.
389 class PrintReqState : public SenderState
393 * An entry in the label stack.
395 struct LabelStackEntry
397 const std::string label;
400 LabelStackEntry(const std::string &_label, std::string *_prefix);
403 typedef std::list<LabelStackEntry> LabelStack;
404 LabelStack labelStack;
406 std::string *curPrefixPtr;
412 PrintReqState(std::ostream &os, int verbosity = 0);
416 * Returns the current line prefix.
418 const std::string &curPrefix() { return *curPrefixPtr; }
421 * Push a label onto the label stack, and prepend the given
422 * prefix string onto the current prefix. Labels will only be
423 * printed if an object within the label's scope is printed.
425 void pushLabel(const std::string &lbl,
426 const std::string &prefix = " ");
429 * Pop a label off the label stack.
434 * Print all of the pending unprinted labels on the
435 * stack. Called by printObj(), so normally not called by
436 * users unless bypassing printObj().
441 * Print a Printable object to os, because it matched the
442 * address on a PrintReq.
444 void printObj(Printable *obj);
448 * This packet's sender state. Devices should use dynamic_cast<>
449 * to cast to the state appropriate to the sender. The intent of
450 * this variable is to allow a device to attach extra information
451 * to a request. A response packet must return the sender state
452 * that was attached to the original request (even if a new packet
455 SenderState *senderState;
458 * Push a new sender state to the packet and make the current
459 * sender state the predecessor of the new one. This should be
460 * prefered over direct manipulation of the senderState member
463 * @param sender_state SenderState to push at the top of the stack
465 void pushSenderState(SenderState *sender_state);
468 * Pop the top of the state stack and return a pointer to it. This
469 * assumes the current sender state is not NULL. This should be
470 * preferred over direct manipulation of the senderState member
473 * @return The current top of the stack
475 SenderState *popSenderState();
478 * Go through the sender state stack and return the first instance
479 * that is of type T (as determined by a dynamic_cast). If there
480 * is no sender state of type T, NULL is returned.
482 * @return The topmost state of type T
484 template <typename T>
485 T * findNextSenderState() const
488 SenderState* sender_state = senderState;
489 while (t == NULL && sender_state != NULL) {
490 t = dynamic_cast<T*>(sender_state);
491 sender_state = sender_state->predecessor;
496 /// Return the string name of the cmd field (for debugging and
498 const std::string &cmdString() const { return cmd.toString(); }
500 /// Return the index of this command.
501 inline int cmdToIndex() const { return cmd.toInt(); }
503 bool isRead() const { return cmd.isRead(); }
504 bool isWrite() const { return cmd.isWrite(); }
505 bool isUpgrade() const { return cmd.isUpgrade(); }
506 bool isRequest() const { return cmd.isRequest(); }
507 bool isResponse() const { return cmd.isResponse(); }
508 bool needsWritable() const
510 // we should never check if a response needsWritable, the
511 // request has this flag, and for a response we should rather
512 // look at the hasSharers flag (if not set, the response is to
513 // be considered writable)
515 return cmd.needsWritable();
517 bool needsResponse() const { return cmd.needsResponse(); }
518 bool isInvalidate() const { return cmd.isInvalidate(); }
519 bool isEviction() const { return cmd.isEviction(); }
520 bool fromCache() const { return cmd.fromCache(); }
521 bool isWriteback() const { return cmd.isWriteback(); }
522 bool hasData() const { return cmd.hasData(); }
523 bool hasRespData() const
525 MemCmd resp_cmd = cmd.responseCommand();
526 return resp_cmd.hasData();
528 bool isLLSC() const { return cmd.isLLSC(); }
529 bool isError() const { return cmd.isError(); }
530 bool isPrint() const { return cmd.isPrint(); }
531 bool isFlush() const { return cmd.isFlush(); }
536 * Set the cacheResponding flag. This is used by the caches to
537 * signal another cache that they are responding to a request. A
538 * cache will only respond to snoops if it has the line in either
539 * Modified or Owned state. Note that on snoop hits we always pass
540 * the line as Modified and never Owned. In the case of an Owned
541 * line we proceed to invalidate all other copies.
543 * On a cache fill (see Cache::handleFill), we check hasSharers
544 * first, ignoring the cacheResponding flag if hasSharers is set.
545 * A line is consequently allocated as:
547 * hasSharers cacheResponding state
550 * false false Exclusive
551 * false true Modified
553 void setCacheResponding()
556 assert(!flags.isSet(CACHE_RESPONDING));
557 flags.set(CACHE_RESPONDING);
559 bool cacheResponding() const { return flags.isSet(CACHE_RESPONDING); }
561 * On fills, the hasSharers flag is used by the caches in
562 * combination with the cacheResponding flag, as clarified
563 * above. If the hasSharers flag is not set, the packet is passing
564 * writable. Thus, a response from a memory passes the line as
565 * writable by default.
567 * The hasSharers flag is also used by upstream caches to inform a
568 * downstream cache that they have the block (by calling
569 * setHasSharers on snoop request packets that hit in upstream
570 * cachs tags or MSHRs). If the snoop packet has sharers, a
571 * downstream cache is prevented from passing a dirty line upwards
572 * if it was not explicitly asked for a writable copy. See
573 * Cache::satisfyCpuSideRequest.
575 * The hasSharers flag is also used on writebacks, in
576 * combination with the WritbackClean or WritebackDirty commands,
577 * to allocate the block downstream either as:
579 * command hasSharers state
580 * WritebackDirty false Modified
581 * WritebackDirty true Owned
582 * WritebackClean false Exclusive
583 * WritebackClean true Shared
585 void setHasSharers() { flags.set(HAS_SHARERS); }
586 bool hasSharers() const { return flags.isSet(HAS_SHARERS); }
590 * The express snoop flag is used for two purposes. Firstly, it is
591 * used to bypass flow control for normal (non-snoop) requests
592 * going downstream in the memory system. In cases where a cache
593 * is responding to a snoop from another cache (it had a dirty
594 * line), but the line is not writable (and there are possibly
595 * other copies), the express snoop flag is set by the downstream
596 * cache to invalidate all other copies in zero time. Secondly,
597 * the express snoop flag is also set to be able to distinguish
598 * snoop packets that came from a downstream cache, rather than
599 * snoop packets from neighbouring caches.
601 void setExpressSnoop() { flags.set(EXPRESS_SNOOP); }
602 bool isExpressSnoop() const { return flags.isSet(EXPRESS_SNOOP); }
605 * On responding to a snoop request (which only happens for
606 * Modified or Owned lines), make sure that we can transform an
607 * Owned response to a Modified one. If this flag is not set, the
608 * responding cache had the line in the Owned state, and there are
609 * possibly other Shared copies in the memory system. A downstream
610 * cache helps in orchestrating the invalidation of these copies
611 * by sending out the appropriate express snoops.
613 void setResponderHadWritable()
615 assert(cacheResponding());
616 assert(!responderHadWritable());
617 flags.set(RESPONDER_HAD_WRITABLE);
619 bool responderHadWritable() const
620 { return flags.isSet(RESPONDER_HAD_WRITABLE); }
622 void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); }
623 bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); }
624 void setBlockCached() { flags.set(BLOCK_CACHED); }
625 bool isBlockCached() const { return flags.isSet(BLOCK_CACHED); }
626 void clearBlockCached() { flags.clear(BLOCK_CACHED); }
628 // Network error conditions... encapsulate them as methods since
629 // their encoding keeps changing (from result field to command
634 assert(isResponse());
635 cmd = MemCmd::BadAddressError;
638 void copyError(Packet *pkt) { assert(pkt->isError()); cmd = pkt->cmd; }
640 Addr getAddr() const { assert(flags.isSet(VALID_ADDR)); return addr; }
642 * Update the address of this packet mid-transaction. This is used
643 * by the address mapper to change an already set address to a new
644 * one based on the system configuration. It is intended to remap
645 * an existing address, so it asserts that the current address is
648 void setAddr(Addr _addr) { assert(flags.isSet(VALID_ADDR)); addr = _addr; }
650 unsigned getSize() const { assert(flags.isSet(VALID_SIZE)); return size; }
652 Addr getOffset(unsigned int blk_size) const
654 return getAddr() & Addr(blk_size - 1);
657 Addr getBlockAddr(unsigned int blk_size) const
659 return getAddr() & ~(Addr(blk_size - 1));
662 bool isSecure() const
664 assert(flags.isSet(VALID_ADDR));
669 * Accessor function to atomic op.
671 AtomicOpFunctor *getAtomicOp() const { return req->getAtomicOpFunctor(); }
672 bool isAtomicOp() const { return req->isAtomic(); }
675 * It has been determined that the SC packet should successfully update
676 * memory. Therefore, convert this SC packet to a normal write.
683 cmd = MemCmd::WriteReq;
687 * When ruby is in use, Ruby will monitor the cache line and the
688 * phys memory should treat LL ops as normal reads.
695 cmd = MemCmd::ReadReq;
699 * Constructor. Note that a Request object must be constructed
700 * first, but the Requests's physical address and size fields need
701 * not be valid. The command must be supplied.
703 Packet(const RequestPtr _req, MemCmd _cmd)
704 : cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false),
705 size(0), headerDelay(0), snoopDelay(0), payloadDelay(0),
708 if (req->hasPaddr()) {
709 addr = req->getPaddr();
710 flags.set(VALID_ADDR);
711 _isSecure = req->isSecure();
713 if (req->hasSize()) {
714 size = req->getSize();
715 flags.set(VALID_SIZE);
720 * Alternate constructor if you are trying to create a packet with
721 * a request that is for a whole block, not the address from the
722 * req. this allows for overriding the size/addr of the req.
724 Packet(const RequestPtr _req, MemCmd _cmd, int _blkSize)
725 : cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false),
726 headerDelay(0), snoopDelay(0), payloadDelay(0),
729 if (req->hasPaddr()) {
730 addr = req->getPaddr() & ~(_blkSize - 1);
731 flags.set(VALID_ADDR);
732 _isSecure = req->isSecure();
735 flags.set(VALID_SIZE);
739 * Alternate constructor for copying a packet. Copy all fields
740 * *except* if the original packet's data was dynamic, don't copy
741 * that, as we can't guarantee that the new packet's lifetime is
742 * less than that of the original packet. In this case the new
743 * packet should allocate its own data.
745 Packet(const PacketPtr pkt, bool clear_flags, bool alloc_data)
746 : cmd(pkt->cmd), req(pkt->req),
748 addr(pkt->addr), _isSecure(pkt->_isSecure), size(pkt->size),
749 bytesValid(pkt->bytesValid),
750 headerDelay(pkt->headerDelay),
752 payloadDelay(pkt->payloadDelay),
753 senderState(pkt->senderState)
756 flags.set(pkt->flags & COPY_FLAGS);
758 flags.set(pkt->flags & (VALID_ADDR|VALID_SIZE));
760 // should we allocate space for data, or not, the express
761 // snoops do not need to carry any data as they only serve to
762 // co-ordinate state changes
764 // even if asked to allocate data, if the original packet
765 // holds static data, then the sender will not be doing
766 // any memcpy on receiving the response, thus we simply
767 // carry the pointer forward
768 if (pkt->flags.isSet(STATIC_DATA)) {
770 flags.set(STATIC_DATA);
778 * Generate the appropriate read MemCmd based on the Request flags.
781 makeReadCmd(const RequestPtr req)
784 return MemCmd::LoadLockedReq;
785 else if (req->isPrefetch())
786 return MemCmd::SoftPFReq;
788 return MemCmd::ReadReq;
792 * Generate the appropriate write MemCmd based on the Request flags.
795 makeWriteCmd(const RequestPtr req)
798 return MemCmd::StoreCondReq;
799 else if (req->isSwap())
800 return MemCmd::SwapReq;
802 return MemCmd::WriteReq;
806 * Constructor-like methods that return Packets based on Request objects.
807 * Fine-tune the MemCmd type if it's not a vanilla read or write.
810 createRead(const RequestPtr req)
812 return new Packet(req, makeReadCmd(req));
816 createWrite(const RequestPtr req)
818 return new Packet(req, makeWriteCmd(req));
822 * clean up packet variables
826 // Delete the request object if this is a request packet which
827 // does not need a response, because the requester will not get
828 // a chance. If the request packet needs a response then the
829 // request will be deleted on receipt of the response
830 // packet. We also make sure to never delete the request for
831 // express snoops, even for cases when responses are not
832 // needed (CleanEvict and Writeback), since the snoop packet
833 // re-uses the same request.
834 if (req && isRequest() && !needsResponse() &&
842 * Take a request packet and modify it in place to be suitable for
843 * returning as a response to that request.
848 assert(needsResponse());
850 cmd = cmd.responseCommand();
852 // responses are never express, even if the snoop that
853 // triggered them was
854 flags.clear(EXPRESS_SNOOP);
870 setFunctionalResponseStatus(bool success)
874 cmd = MemCmd::FunctionalWriteError;
876 cmd = MemCmd::FunctionalReadError;
882 setSize(unsigned size)
884 assert(!flags.isSet(VALID_SIZE));
887 flags.set(VALID_SIZE);
894 * @name Data accessor mehtods
898 * Set the data pointer to the following value that should not be
899 * freed. Static data allows us to do a single memcpy even if
900 * multiple packets are required to get from source to destination
901 * and back. In essence the pointer is set calling dataStatic on
902 * the original packet, and whenever this packet is copied and
903 * forwarded the same pointer is passed on. When a packet
904 * eventually reaches the destination holding the data, it is
905 * copied once into the location originally set. On the way back
906 * to the source, no copies are necessary.
908 template <typename T>
912 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
913 data = (PacketDataPtr)p;
914 flags.set(STATIC_DATA);
918 * Set the data pointer to the following value that should not be
919 * freed. This version of the function allows the pointer passed
920 * to us to be const. To avoid issues down the line we cast the
921 * constness away, the alternative would be to keep both a const
922 * and non-const data pointer and cleverly choose between
923 * them. Note that this is only allowed for static data.
925 template <typename T>
927 dataStaticConst(const T *p)
929 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
930 data = const_cast<PacketDataPtr>(p);
931 flags.set(STATIC_DATA);
935 * Set the data pointer to a value that should have delete []
936 * called on it. Dynamic data is local to this packet, and as the
937 * packet travels from source to destination, forwarded packets
938 * will allocate their own data. When a packet reaches the final
939 * destination it will populate the dynamic data of that specific
940 * packet, and on the way back towards the source, memcpy will be
941 * invoked in every step where a new packet was created e.g. in
942 * the caches. Ultimately when the response reaches the source a
943 * final memcpy is needed to extract the data from the packet
944 * before it is deallocated.
946 template <typename T>
950 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
951 data = (PacketDataPtr)p;
952 flags.set(DYNAMIC_DATA);
956 * get a pointer to the data ptr.
958 template <typename T>
962 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA));
966 template <typename T>
970 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA));
971 return (const T*)data;
975 * Get the data in the packet byte swapped from big endian to
978 template <typename T>
982 * Get the data in the packet byte swapped from little endian to
985 template <typename T>
989 * Get the data in the packet byte swapped from the specified
992 template <typename T>
993 T get(ByteOrder endian) const;
996 * Get the data in the packet byte swapped from guest to host
999 template <typename T>
1002 /** Set the value in the data pointer to v as big endian. */
1003 template <typename T>
1006 /** Set the value in the data pointer to v as little endian. */
1007 template <typename T>
1011 * Set the value in the data pointer to v using the specified
1014 template <typename T>
1015 void set(T v, ByteOrder endian);
1017 /** Set the value in the data pointer to v as guest endian. */
1018 template <typename T>
1022 * Copy data into the packet from the provided pointer.
1025 setData(const uint8_t *p)
1027 // we should never be copying data onto itself, which means we
1028 // must idenfity packets with static data, as they carry the
1029 // same pointer from source to destination and back
1030 assert(p != getPtr<uint8_t>() || flags.isSet(STATIC_DATA));
1032 if (p != getPtr<uint8_t>())
1033 // for packet with allocated dynamic data, we copy data from
1034 // one to the other, e.g. a forwarded response to a response
1035 std::memcpy(getPtr<uint8_t>(), p, getSize());
1039 * Copy data into the packet from the provided block pointer,
1040 * which is aligned to the given block size.
1043 setDataFromBlock(const uint8_t *blk_data, int blkSize)
1045 setData(blk_data + getOffset(blkSize));
1049 * Copy data from the packet to the provided block pointer, which
1050 * is aligned to the given block size.
1053 writeData(uint8_t *p) const
1055 std::memcpy(p, getConstPtr<uint8_t>(), getSize());
1059 * Copy data from the packet to the memory at the provided pointer.
1062 writeDataToBlock(uint8_t *blk_data, int blkSize) const
1064 writeData(blk_data + getOffset(blkSize));
1068 * delete the data pointed to in the data pointer. Ok to call to
1069 * matter how data was allocted.
1074 if (flags.isSet(DYNAMIC_DATA))
1077 flags.clear(STATIC_DATA|DYNAMIC_DATA);
1081 /** Allocate memory for the packet. */
1085 // if either this command or the response command has a data
1086 // payload, actually allocate space
1087 if (hasData() || hasRespData()) {
1088 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
1089 flags.set(DYNAMIC_DATA);
1090 data = new uint8_t[getSize()];
1096 private: // Private data accessor methods
1097 /** Get the data in the packet without byte swapping. */
1098 template <typename T>
1101 /** Set the value in the data pointer to v without byte swapping. */
1102 template <typename T>
1107 * Check a functional request against a memory value stored in
1108 * another packet (i.e. an in-transit request or
1109 * response). Returns true if the current packet is a read, and
1110 * the other packet provides the data, which is then copied to the
1111 * current packet. If the current packet is a write, and the other
1112 * packet intersects this one, then we update the data
1116 checkFunctional(PacketPtr other)
1118 // all packets that are carrying a payload should have a valid
1120 return checkFunctional(other, other->getAddr(), other->isSecure(),
1123 other->getPtr<uint8_t>() : NULL);
1127 * Does the request need to check for cached copies of the same block
1128 * in the memory hierarchy above.
1131 mustCheckAbove() const
1133 return cmd == MemCmd::HardPFReq || isEviction();
1137 * Is this packet a clean eviction, including both actual clean
1138 * evict packets, but also clean writebacks.
1141 isCleanEviction() const
1143 return cmd == MemCmd::CleanEvict || cmd == MemCmd::WritebackClean;
1147 * Check a functional request against a memory value represented
1148 * by a base/size pair and an associated data array. If the
1149 * current packet is a read, it may be satisfied by the memory
1150 * value. If the current packet is a write, it may update the
1154 checkFunctional(Printable *obj, Addr base, bool is_secure, int size,
1158 * Push label for PrintReq (safe to call unconditionally).
1161 pushLabel(const std::string &lbl)
1164 safe_cast<PrintReqState*>(senderState)->pushLabel(lbl);
1168 * Pop label for PrintReq (safe to call unconditionally).
1174 safe_cast<PrintReqState*>(senderState)->popLabel();
1177 void print(std::ostream &o, int verbosity = 0,
1178 const std::string &prefix = "") const;
1181 * A no-args wrapper of print(std::ostream...)
1182 * meant to be invoked from DPRINTFs
1183 * avoiding string overheads in fast mode
1184 * @return string with the request's type and start<->end addresses
1186 std::string print() const;
1189 #endif //__MEM_PACKET_HH