mem: Add support for cache maintenance operation requests
[gem5.git] / src / mem / packet.hh
1 /*
2 * Copyright (c) 2012-2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 * Andreas Hansson
45 */
46
47 /**
48 * @file
49 * Declaration of the Packet class.
50 */
51
52 #ifndef __MEM_PACKET_HH__
53 #define __MEM_PACKET_HH__
54
55 #include <bitset>
56 #include <cassert>
57 #include <list>
58
59 #include "base/cast.hh"
60 #include "base/compiler.hh"
61 #include "base/flags.hh"
62 #include "base/logging.hh"
63 #include "base/printable.hh"
64 #include "base/types.hh"
65 #include "mem/request.hh"
66 #include "sim/core.hh"
67
68 class Packet;
69 typedef Packet *PacketPtr;
70 typedef uint8_t* PacketDataPtr;
71 typedef std::list<PacketPtr> PacketList;
72
73 class MemCmd
74 {
75 friend class Packet;
76
77 public:
78 /**
79 * List of all commands associated with a packet.
80 */
81 enum Command
82 {
83 InvalidCmd,
84 ReadReq,
85 ReadResp,
86 ReadRespWithInvalidate,
87 WriteReq,
88 WriteResp,
89 WritebackDirty,
90 WritebackClean,
91 WriteClean, // writes dirty data below without evicting
92 CleanEvict,
93 SoftPFReq,
94 HardPFReq,
95 SoftPFResp,
96 HardPFResp,
97 WriteLineReq,
98 UpgradeReq,
99 SCUpgradeReq, // Special "weak" upgrade for StoreCond
100 UpgradeResp,
101 SCUpgradeFailReq, // Failed SCUpgradeReq in MSHR (never sent)
102 UpgradeFailResp, // Valid for SCUpgradeReq only
103 ReadExReq,
104 ReadExResp,
105 ReadCleanReq,
106 ReadSharedReq,
107 LoadLockedReq,
108 StoreCondReq,
109 StoreCondFailReq, // Failed StoreCondReq in MSHR (never sent)
110 StoreCondResp,
111 SwapReq,
112 SwapResp,
113 MessageReq,
114 MessageResp,
115 MemFenceReq,
116 MemFenceResp,
117 CleanSharedReq,
118 CleanSharedResp,
119 CleanInvalidReq,
120 CleanInvalidResp,
121 // Error responses
122 // @TODO these should be classified as responses rather than
123 // requests; coding them as requests initially for backwards
124 // compatibility
125 InvalidDestError, // packet dest field invalid
126 BadAddressError, // memory address invalid
127 FunctionalReadError, // unable to fulfill functional read
128 FunctionalWriteError, // unable to fulfill functional write
129 // Fake simulator-only commands
130 PrintReq, // Print state matching address
131 FlushReq, //request for a cache flush
132 InvalidateReq, // request for address to be invalidated
133 InvalidateResp,
134 NUM_MEM_CMDS
135 };
136
137 private:
138 /**
139 * List of command attributes.
140 */
141 enum Attribute
142 {
143 IsRead, //!< Data flows from responder to requester
144 IsWrite, //!< Data flows from requester to responder
145 IsUpgrade,
146 IsInvalidate,
147 IsClean, //!< Cleans any existing dirty blocks
148 NeedsWritable, //!< Requires writable copy to complete in-cache
149 IsRequest, //!< Issued by requester
150 IsResponse, //!< Issue by responder
151 NeedsResponse, //!< Requester needs response from target
152 IsEviction,
153 IsSWPrefetch,
154 IsHWPrefetch,
155 IsLlsc, //!< Alpha/MIPS LL or SC access
156 HasData, //!< There is an associated payload
157 IsError, //!< Error response
158 IsPrint, //!< Print state matching address (for debugging)
159 IsFlush, //!< Flush the address from caches
160 FromCache, //!< Request originated from a caching agent
161 NUM_COMMAND_ATTRIBUTES
162 };
163
164 /**
165 * Structure that defines attributes and other data associated
166 * with a Command.
167 */
168 struct CommandInfo
169 {
170 /// Set of attribute flags.
171 const std::bitset<NUM_COMMAND_ATTRIBUTES> attributes;
172 /// Corresponding response for requests; InvalidCmd if no
173 /// response is applicable.
174 const Command response;
175 /// String representation (for printing)
176 const std::string str;
177 };
178
179 /// Array to map Command enum to associated info.
180 static const CommandInfo commandInfo[];
181
182 private:
183
184 Command cmd;
185
186 bool
187 testCmdAttrib(MemCmd::Attribute attrib) const
188 {
189 return commandInfo[cmd].attributes[attrib] != 0;
190 }
191
192 public:
193
194 bool isRead() const { return testCmdAttrib(IsRead); }
195 bool isWrite() const { return testCmdAttrib(IsWrite); }
196 bool isUpgrade() const { return testCmdAttrib(IsUpgrade); }
197 bool isRequest() const { return testCmdAttrib(IsRequest); }
198 bool isResponse() const { return testCmdAttrib(IsResponse); }
199 bool needsWritable() const { return testCmdAttrib(NeedsWritable); }
200 bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
201 bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
202 bool isEviction() const { return testCmdAttrib(IsEviction); }
203 bool isClean() const { return testCmdAttrib(IsClean); }
204 bool fromCache() const { return testCmdAttrib(FromCache); }
205
206 /**
207 * A writeback is an eviction that carries data.
208 */
209 bool isWriteback() const { return testCmdAttrib(IsEviction) &&
210 testCmdAttrib(HasData); }
211
212 /**
213 * Check if this particular packet type carries payload data. Note
214 * that this does not reflect if the data pointer of the packet is
215 * valid or not.
216 */
217 bool hasData() const { return testCmdAttrib(HasData); }
218 bool isLLSC() const { return testCmdAttrib(IsLlsc); }
219 bool isSWPrefetch() const { return testCmdAttrib(IsSWPrefetch); }
220 bool isHWPrefetch() const { return testCmdAttrib(IsHWPrefetch); }
221 bool isPrefetch() const { return testCmdAttrib(IsSWPrefetch) ||
222 testCmdAttrib(IsHWPrefetch); }
223 bool isError() const { return testCmdAttrib(IsError); }
224 bool isPrint() const { return testCmdAttrib(IsPrint); }
225 bool isFlush() const { return testCmdAttrib(IsFlush); }
226
227 Command
228 responseCommand() const
229 {
230 return commandInfo[cmd].response;
231 }
232
233 /// Return the string to a cmd given by idx.
234 const std::string &toString() const { return commandInfo[cmd].str; }
235 int toInt() const { return (int)cmd; }
236
237 MemCmd(Command _cmd) : cmd(_cmd) { }
238 MemCmd(int _cmd) : cmd((Command)_cmd) { }
239 MemCmd() : cmd(InvalidCmd) { }
240
241 bool operator==(MemCmd c2) const { return (cmd == c2.cmd); }
242 bool operator!=(MemCmd c2) const { return (cmd != c2.cmd); }
243 };
244
245 /**
246 * A Packet is used to encapsulate a transfer between two objects in
247 * the memory system (e.g., the L1 and L2 cache). (In contrast, a
248 * single Request travels all the way from the requester to the
249 * ultimate destination and back, possibly being conveyed by several
250 * different Packets along the way.)
251 */
252 class Packet : public Printable
253 {
254 public:
255 typedef uint32_t FlagsType;
256 typedef ::Flags<FlagsType> Flags;
257
258 private:
259
260 enum : FlagsType {
261 // Flags to transfer across when copying a packet
262 COPY_FLAGS = 0x0000001F,
263
264 // Does this packet have sharers (which means it should not be
265 // considered writable) or not. See setHasSharers below.
266 HAS_SHARERS = 0x00000001,
267
268 // Special control flags
269 /// Special timing-mode atomic snoop for multi-level coherence.
270 EXPRESS_SNOOP = 0x00000002,
271
272 /// Allow a responding cache to inform the cache hierarchy
273 /// that it had a writable copy before responding. See
274 /// setResponderHadWritable below.
275 RESPONDER_HAD_WRITABLE = 0x00000004,
276
277 // Snoop co-ordination flag to indicate that a cache is
278 // responding to a snoop. See setCacheResponding below.
279 CACHE_RESPONDING = 0x00000008,
280
281 // The writeback/writeclean should be propagated further
282 // downstream by the receiver
283 WRITE_THROUGH = 0x00000010,
284
285 /// Are the 'addr' and 'size' fields valid?
286 VALID_ADDR = 0x00000100,
287 VALID_SIZE = 0x00000200,
288
289 /// Is the data pointer set to a value that shouldn't be freed
290 /// when the packet is destroyed?
291 STATIC_DATA = 0x00001000,
292 /// The data pointer points to a value that should be freed when
293 /// the packet is destroyed. The pointer is assumed to be pointing
294 /// to an array, and delete [] is consequently called
295 DYNAMIC_DATA = 0x00002000,
296
297 /// suppress the error if this packet encounters a functional
298 /// access failure.
299 SUPPRESS_FUNC_ERROR = 0x00008000,
300
301 // Signal block present to squash prefetch and cache evict packets
302 // through express snoop flag
303 BLOCK_CACHED = 0x00010000
304 };
305
306 Flags flags;
307
308 public:
309 typedef MemCmd::Command Command;
310
311 /// The command field of the packet.
312 MemCmd cmd;
313
314 /// A pointer to the original request.
315 const RequestPtr req;
316
317 private:
318 /**
319 * A pointer to the data being transfered. It can be differnt
320 * sizes at each level of the heirarchy so it belongs in the
321 * packet, not request. This may or may not be populated when a
322 * responder recieves the packet. If not populated it memory should
323 * be allocated.
324 */
325 PacketDataPtr data;
326
327 /// The address of the request. This address could be virtual or
328 /// physical, depending on the system configuration.
329 Addr addr;
330
331 /// True if the request targets the secure memory space.
332 bool _isSecure;
333
334 /// The size of the request or transfer.
335 unsigned size;
336
337 /**
338 * Track the bytes found that satisfy a functional read.
339 */
340 std::vector<bool> bytesValid;
341
342 public:
343
344 /**
345 * The extra delay from seeing the packet until the header is
346 * transmitted. This delay is used to communicate the crossbar
347 * forwarding latency to the neighbouring object (e.g. a cache)
348 * that actually makes the packet wait. As the delay is relative,
349 * a 32-bit unsigned should be sufficient.
350 */
351 uint32_t headerDelay;
352
353 /**
354 * Keep track of the extra delay incurred by snooping upwards
355 * before sending a request down the memory system. This is used
356 * by the coherent crossbar to account for the additional request
357 * delay.
358 */
359 uint32_t snoopDelay;
360
361 /**
362 * The extra pipelining delay from seeing the packet until the end of
363 * payload is transmitted by the component that provided it (if
364 * any). This includes the header delay. Similar to the header
365 * delay, this is used to make up for the fact that the
366 * crossbar does not make the packet wait. As the delay is
367 * relative, a 32-bit unsigned should be sufficient.
368 */
369 uint32_t payloadDelay;
370
371 /**
372 * A virtual base opaque structure used to hold state associated
373 * with the packet (e.g., an MSHR), specific to a MemObject that
374 * sees the packet. A pointer to this state is returned in the
375 * packet's response so that the MemObject in question can quickly
376 * look up the state needed to process it. A specific subclass
377 * would be derived from this to carry state specific to a
378 * particular sending device.
379 *
380 * As multiple MemObjects may add their SenderState throughout the
381 * memory system, the SenderStates create a stack, where a
382 * MemObject can add a new Senderstate, as long as the
383 * predecessing SenderState is restored when the response comes
384 * back. For this reason, the predecessor should always be
385 * populated with the current SenderState of a packet before
386 * modifying the senderState field in the request packet.
387 */
388 struct SenderState
389 {
390 SenderState* predecessor;
391 SenderState() : predecessor(NULL) {}
392 virtual ~SenderState() {}
393 };
394
395 /**
396 * Object used to maintain state of a PrintReq. The senderState
397 * field of a PrintReq should always be of this type.
398 */
399 class PrintReqState : public SenderState
400 {
401 private:
402 /**
403 * An entry in the label stack.
404 */
405 struct LabelStackEntry
406 {
407 const std::string label;
408 std::string *prefix;
409 bool labelPrinted;
410 LabelStackEntry(const std::string &_label, std::string *_prefix);
411 };
412
413 typedef std::list<LabelStackEntry> LabelStack;
414 LabelStack labelStack;
415
416 std::string *curPrefixPtr;
417
418 public:
419 std::ostream &os;
420 const int verbosity;
421
422 PrintReqState(std::ostream &os, int verbosity = 0);
423 ~PrintReqState();
424
425 /**
426 * Returns the current line prefix.
427 */
428 const std::string &curPrefix() { return *curPrefixPtr; }
429
430 /**
431 * Push a label onto the label stack, and prepend the given
432 * prefix string onto the current prefix. Labels will only be
433 * printed if an object within the label's scope is printed.
434 */
435 void pushLabel(const std::string &lbl,
436 const std::string &prefix = " ");
437
438 /**
439 * Pop a label off the label stack.
440 */
441 void popLabel();
442
443 /**
444 * Print all of the pending unprinted labels on the
445 * stack. Called by printObj(), so normally not called by
446 * users unless bypassing printObj().
447 */
448 void printLabels();
449
450 /**
451 * Print a Printable object to os, because it matched the
452 * address on a PrintReq.
453 */
454 void printObj(Printable *obj);
455 };
456
457 /**
458 * This packet's sender state. Devices should use dynamic_cast<>
459 * to cast to the state appropriate to the sender. The intent of
460 * this variable is to allow a device to attach extra information
461 * to a request. A response packet must return the sender state
462 * that was attached to the original request (even if a new packet
463 * is created).
464 */
465 SenderState *senderState;
466
467 /**
468 * Push a new sender state to the packet and make the current
469 * sender state the predecessor of the new one. This should be
470 * prefered over direct manipulation of the senderState member
471 * variable.
472 *
473 * @param sender_state SenderState to push at the top of the stack
474 */
475 void pushSenderState(SenderState *sender_state);
476
477 /**
478 * Pop the top of the state stack and return a pointer to it. This
479 * assumes the current sender state is not NULL. This should be
480 * preferred over direct manipulation of the senderState member
481 * variable.
482 *
483 * @return The current top of the stack
484 */
485 SenderState *popSenderState();
486
487 /**
488 * Go through the sender state stack and return the first instance
489 * that is of type T (as determined by a dynamic_cast). If there
490 * is no sender state of type T, NULL is returned.
491 *
492 * @return The topmost state of type T
493 */
494 template <typename T>
495 T * findNextSenderState() const
496 {
497 T *t = NULL;
498 SenderState* sender_state = senderState;
499 while (t == NULL && sender_state != NULL) {
500 t = dynamic_cast<T*>(sender_state);
501 sender_state = sender_state->predecessor;
502 }
503 return t;
504 }
505
506 /// Return the string name of the cmd field (for debugging and
507 /// tracing).
508 const std::string &cmdString() const { return cmd.toString(); }
509
510 /// Return the index of this command.
511 inline int cmdToIndex() const { return cmd.toInt(); }
512
513 bool isRead() const { return cmd.isRead(); }
514 bool isWrite() const { return cmd.isWrite(); }
515 bool isUpgrade() const { return cmd.isUpgrade(); }
516 bool isRequest() const { return cmd.isRequest(); }
517 bool isResponse() const { return cmd.isResponse(); }
518 bool needsWritable() const
519 {
520 // we should never check if a response needsWritable, the
521 // request has this flag, and for a response we should rather
522 // look at the hasSharers flag (if not set, the response is to
523 // be considered writable)
524 assert(isRequest());
525 return cmd.needsWritable();
526 }
527 bool needsResponse() const { return cmd.needsResponse(); }
528 bool isInvalidate() const { return cmd.isInvalidate(); }
529 bool isEviction() const { return cmd.isEviction(); }
530 bool isClean() const { return cmd.isClean(); }
531 bool fromCache() const { return cmd.fromCache(); }
532 bool isWriteback() const { return cmd.isWriteback(); }
533 bool hasData() const { return cmd.hasData(); }
534 bool hasRespData() const
535 {
536 MemCmd resp_cmd = cmd.responseCommand();
537 return resp_cmd.hasData();
538 }
539 bool isLLSC() const { return cmd.isLLSC(); }
540 bool isError() const { return cmd.isError(); }
541 bool isPrint() const { return cmd.isPrint(); }
542 bool isFlush() const { return cmd.isFlush(); }
543
544 //@{
545 /// Snoop flags
546 /**
547 * Set the cacheResponding flag. This is used by the caches to
548 * signal another cache that they are responding to a request. A
549 * cache will only respond to snoops if it has the line in either
550 * Modified or Owned state. Note that on snoop hits we always pass
551 * the line as Modified and never Owned. In the case of an Owned
552 * line we proceed to invalidate all other copies.
553 *
554 * On a cache fill (see Cache::handleFill), we check hasSharers
555 * first, ignoring the cacheResponding flag if hasSharers is set.
556 * A line is consequently allocated as:
557 *
558 * hasSharers cacheResponding state
559 * true false Shared
560 * true true Shared
561 * false false Exclusive
562 * false true Modified
563 */
564 void setCacheResponding()
565 {
566 assert(isRequest());
567 assert(!flags.isSet(CACHE_RESPONDING));
568 flags.set(CACHE_RESPONDING);
569 }
570 bool cacheResponding() const { return flags.isSet(CACHE_RESPONDING); }
571 /**
572 * On fills, the hasSharers flag is used by the caches in
573 * combination with the cacheResponding flag, as clarified
574 * above. If the hasSharers flag is not set, the packet is passing
575 * writable. Thus, a response from a memory passes the line as
576 * writable by default.
577 *
578 * The hasSharers flag is also used by upstream caches to inform a
579 * downstream cache that they have the block (by calling
580 * setHasSharers on snoop request packets that hit in upstream
581 * cachs tags or MSHRs). If the snoop packet has sharers, a
582 * downstream cache is prevented from passing a dirty line upwards
583 * if it was not explicitly asked for a writable copy. See
584 * Cache::satisfyCpuSideRequest.
585 *
586 * The hasSharers flag is also used on writebacks, in
587 * combination with the WritbackClean or WritebackDirty commands,
588 * to allocate the block downstream either as:
589 *
590 * command hasSharers state
591 * WritebackDirty false Modified
592 * WritebackDirty true Owned
593 * WritebackClean false Exclusive
594 * WritebackClean true Shared
595 */
596 void setHasSharers() { flags.set(HAS_SHARERS); }
597 bool hasSharers() const { return flags.isSet(HAS_SHARERS); }
598 //@}
599
600 /**
601 * The express snoop flag is used for two purposes. Firstly, it is
602 * used to bypass flow control for normal (non-snoop) requests
603 * going downstream in the memory system. In cases where a cache
604 * is responding to a snoop from another cache (it had a dirty
605 * line), but the line is not writable (and there are possibly
606 * other copies), the express snoop flag is set by the downstream
607 * cache to invalidate all other copies in zero time. Secondly,
608 * the express snoop flag is also set to be able to distinguish
609 * snoop packets that came from a downstream cache, rather than
610 * snoop packets from neighbouring caches.
611 */
612 void setExpressSnoop() { flags.set(EXPRESS_SNOOP); }
613 bool isExpressSnoop() const { return flags.isSet(EXPRESS_SNOOP); }
614
615 /**
616 * On responding to a snoop request (which only happens for
617 * Modified or Owned lines), make sure that we can transform an
618 * Owned response to a Modified one. If this flag is not set, the
619 * responding cache had the line in the Owned state, and there are
620 * possibly other Shared copies in the memory system. A downstream
621 * cache helps in orchestrating the invalidation of these copies
622 * by sending out the appropriate express snoops.
623 */
624 void setResponderHadWritable()
625 {
626 assert(cacheResponding());
627 assert(!responderHadWritable());
628 flags.set(RESPONDER_HAD_WRITABLE);
629 }
630 bool responderHadWritable() const
631 { return flags.isSet(RESPONDER_HAD_WRITABLE); }
632
633 /**
634 * A writeback/writeclean cmd gets propagated further downstream
635 * by the receiver when the flag is set.
636 */
637 void setWriteThrough()
638 {
639 assert(cmd.isWrite() &&
640 (cmd.isEviction() || cmd == MemCmd::WriteClean));
641 flags.set(WRITE_THROUGH);
642 }
643 void clearWriteThrough() { flags.clear(WRITE_THROUGH); }
644 bool writeThrough() const { return flags.isSet(WRITE_THROUGH); }
645
646 void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); }
647 bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); }
648 void setBlockCached() { flags.set(BLOCK_CACHED); }
649 bool isBlockCached() const { return flags.isSet(BLOCK_CACHED); }
650 void clearBlockCached() { flags.clear(BLOCK_CACHED); }
651
652 // Network error conditions... encapsulate them as methods since
653 // their encoding keeps changing (from result field to command
654 // field, etc.)
655 void
656 setBadAddress()
657 {
658 assert(isResponse());
659 cmd = MemCmd::BadAddressError;
660 }
661
662 void copyError(Packet *pkt) { assert(pkt->isError()); cmd = pkt->cmd; }
663
664 Addr getAddr() const { assert(flags.isSet(VALID_ADDR)); return addr; }
665 /**
666 * Update the address of this packet mid-transaction. This is used
667 * by the address mapper to change an already set address to a new
668 * one based on the system configuration. It is intended to remap
669 * an existing address, so it asserts that the current address is
670 * valid.
671 */
672 void setAddr(Addr _addr) { assert(flags.isSet(VALID_ADDR)); addr = _addr; }
673
674 unsigned getSize() const { assert(flags.isSet(VALID_SIZE)); return size; }
675
676 Addr getOffset(unsigned int blk_size) const
677 {
678 return getAddr() & Addr(blk_size - 1);
679 }
680
681 Addr getBlockAddr(unsigned int blk_size) const
682 {
683 return getAddr() & ~(Addr(blk_size - 1));
684 }
685
686 bool isSecure() const
687 {
688 assert(flags.isSet(VALID_ADDR));
689 return _isSecure;
690 }
691
692 /**
693 * Accessor function to atomic op.
694 */
695 AtomicOpFunctor *getAtomicOp() const { return req->getAtomicOpFunctor(); }
696 bool isAtomicOp() const { return req->isAtomic(); }
697
698 /**
699 * It has been determined that the SC packet should successfully update
700 * memory. Therefore, convert this SC packet to a normal write.
701 */
702 void
703 convertScToWrite()
704 {
705 assert(isLLSC());
706 assert(isWrite());
707 cmd = MemCmd::WriteReq;
708 }
709
710 /**
711 * When ruby is in use, Ruby will monitor the cache line and the
712 * phys memory should treat LL ops as normal reads.
713 */
714 void
715 convertLlToRead()
716 {
717 assert(isLLSC());
718 assert(isRead());
719 cmd = MemCmd::ReadReq;
720 }
721
722 /**
723 * Constructor. Note that a Request object must be constructed
724 * first, but the Requests's physical address and size fields need
725 * not be valid. The command must be supplied.
726 */
727 Packet(const RequestPtr _req, MemCmd _cmd)
728 : cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false),
729 size(0), headerDelay(0), snoopDelay(0), payloadDelay(0),
730 senderState(NULL)
731 {
732 if (req->hasPaddr()) {
733 addr = req->getPaddr();
734 flags.set(VALID_ADDR);
735 _isSecure = req->isSecure();
736 }
737 if (req->hasSize()) {
738 size = req->getSize();
739 flags.set(VALID_SIZE);
740 }
741 }
742
743 /**
744 * Alternate constructor if you are trying to create a packet with
745 * a request that is for a whole block, not the address from the
746 * req. this allows for overriding the size/addr of the req.
747 */
748 Packet(const RequestPtr _req, MemCmd _cmd, int _blkSize)
749 : cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false),
750 headerDelay(0), snoopDelay(0), payloadDelay(0),
751 senderState(NULL)
752 {
753 if (req->hasPaddr()) {
754 addr = req->getPaddr() & ~(_blkSize - 1);
755 flags.set(VALID_ADDR);
756 _isSecure = req->isSecure();
757 }
758 size = _blkSize;
759 flags.set(VALID_SIZE);
760 }
761
762 /**
763 * Alternate constructor for copying a packet. Copy all fields
764 * *except* if the original packet's data was dynamic, don't copy
765 * that, as we can't guarantee that the new packet's lifetime is
766 * less than that of the original packet. In this case the new
767 * packet should allocate its own data.
768 */
769 Packet(const PacketPtr pkt, bool clear_flags, bool alloc_data)
770 : cmd(pkt->cmd), req(pkt->req),
771 data(nullptr),
772 addr(pkt->addr), _isSecure(pkt->_isSecure), size(pkt->size),
773 bytesValid(pkt->bytesValid),
774 headerDelay(pkt->headerDelay),
775 snoopDelay(0),
776 payloadDelay(pkt->payloadDelay),
777 senderState(pkt->senderState)
778 {
779 if (!clear_flags)
780 flags.set(pkt->flags & COPY_FLAGS);
781
782 flags.set(pkt->flags & (VALID_ADDR|VALID_SIZE));
783
784 // should we allocate space for data, or not, the express
785 // snoops do not need to carry any data as they only serve to
786 // co-ordinate state changes
787 if (alloc_data) {
788 // even if asked to allocate data, if the original packet
789 // holds static data, then the sender will not be doing
790 // any memcpy on receiving the response, thus we simply
791 // carry the pointer forward
792 if (pkt->flags.isSet(STATIC_DATA)) {
793 data = pkt->data;
794 flags.set(STATIC_DATA);
795 } else {
796 allocate();
797 }
798 }
799 }
800
801 /**
802 * Generate the appropriate read MemCmd based on the Request flags.
803 */
804 static MemCmd
805 makeReadCmd(const RequestPtr req)
806 {
807 if (req->isLLSC())
808 return MemCmd::LoadLockedReq;
809 else if (req->isPrefetch())
810 return MemCmd::SoftPFReq;
811 else
812 return MemCmd::ReadReq;
813 }
814
815 /**
816 * Generate the appropriate write MemCmd based on the Request flags.
817 */
818 static MemCmd
819 makeWriteCmd(const RequestPtr req)
820 {
821 if (req->isLLSC())
822 return MemCmd::StoreCondReq;
823 else if (req->isSwap())
824 return MemCmd::SwapReq;
825 else if (req->isCacheInvalidate()) {
826 return req->isCacheClean() ? MemCmd::CleanInvalidReq :
827 MemCmd::InvalidateReq;
828 } else if (req->isCacheClean()) {
829 return MemCmd::CleanSharedReq;
830 } else
831 return MemCmd::WriteReq;
832 }
833
834 /**
835 * Constructor-like methods that return Packets based on Request objects.
836 * Fine-tune the MemCmd type if it's not a vanilla read or write.
837 */
838 static PacketPtr
839 createRead(const RequestPtr req)
840 {
841 return new Packet(req, makeReadCmd(req));
842 }
843
844 static PacketPtr
845 createWrite(const RequestPtr req)
846 {
847 return new Packet(req, makeWriteCmd(req));
848 }
849
850 /**
851 * clean up packet variables
852 */
853 ~Packet()
854 {
855 // Delete the request object if this is a request packet which
856 // does not need a response, because the requester will not get
857 // a chance. If the request packet needs a response then the
858 // request will be deleted on receipt of the response
859 // packet. We also make sure to never delete the request for
860 // express snoops, even for cases when responses are not
861 // needed (CleanEvict and Writeback), since the snoop packet
862 // re-uses the same request.
863 if (req && isRequest() && !needsResponse() &&
864 !isExpressSnoop()) {
865 delete req;
866 }
867 deleteData();
868 }
869
870 /**
871 * Take a request packet and modify it in place to be suitable for
872 * returning as a response to that request.
873 */
874 void
875 makeResponse()
876 {
877 assert(needsResponse());
878 assert(isRequest());
879 cmd = cmd.responseCommand();
880
881 // responses are never express, even if the snoop that
882 // triggered them was
883 flags.clear(EXPRESS_SNOOP);
884 }
885
886 void
887 makeAtomicResponse()
888 {
889 makeResponse();
890 }
891
892 void
893 makeTimingResponse()
894 {
895 makeResponse();
896 }
897
898 void
899 setFunctionalResponseStatus(bool success)
900 {
901 if (!success) {
902 if (isWrite()) {
903 cmd = MemCmd::FunctionalWriteError;
904 } else {
905 cmd = MemCmd::FunctionalReadError;
906 }
907 }
908 }
909
910 void
911 setSize(unsigned size)
912 {
913 assert(!flags.isSet(VALID_SIZE));
914
915 this->size = size;
916 flags.set(VALID_SIZE);
917 }
918
919
920 public:
921 /**
922 * @{
923 * @name Data accessor mehtods
924 */
925
926 /**
927 * Set the data pointer to the following value that should not be
928 * freed. Static data allows us to do a single memcpy even if
929 * multiple packets are required to get from source to destination
930 * and back. In essence the pointer is set calling dataStatic on
931 * the original packet, and whenever this packet is copied and
932 * forwarded the same pointer is passed on. When a packet
933 * eventually reaches the destination holding the data, it is
934 * copied once into the location originally set. On the way back
935 * to the source, no copies are necessary.
936 */
937 template <typename T>
938 void
939 dataStatic(T *p)
940 {
941 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
942 data = (PacketDataPtr)p;
943 flags.set(STATIC_DATA);
944 }
945
946 /**
947 * Set the data pointer to the following value that should not be
948 * freed. This version of the function allows the pointer passed
949 * to us to be const. To avoid issues down the line we cast the
950 * constness away, the alternative would be to keep both a const
951 * and non-const data pointer and cleverly choose between
952 * them. Note that this is only allowed for static data.
953 */
954 template <typename T>
955 void
956 dataStaticConst(const T *p)
957 {
958 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
959 data = const_cast<PacketDataPtr>(p);
960 flags.set(STATIC_DATA);
961 }
962
963 /**
964 * Set the data pointer to a value that should have delete []
965 * called on it. Dynamic data is local to this packet, and as the
966 * packet travels from source to destination, forwarded packets
967 * will allocate their own data. When a packet reaches the final
968 * destination it will populate the dynamic data of that specific
969 * packet, and on the way back towards the source, memcpy will be
970 * invoked in every step where a new packet was created e.g. in
971 * the caches. Ultimately when the response reaches the source a
972 * final memcpy is needed to extract the data from the packet
973 * before it is deallocated.
974 */
975 template <typename T>
976 void
977 dataDynamic(T *p)
978 {
979 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
980 data = (PacketDataPtr)p;
981 flags.set(DYNAMIC_DATA);
982 }
983
984 /**
985 * get a pointer to the data ptr.
986 */
987 template <typename T>
988 T*
989 getPtr()
990 {
991 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA));
992 return (T*)data;
993 }
994
995 template <typename T>
996 const T*
997 getConstPtr() const
998 {
999 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA));
1000 return (const T*)data;
1001 }
1002
1003 /**
1004 * Get the data in the packet byte swapped from big endian to
1005 * host endian.
1006 */
1007 template <typename T>
1008 T getBE() const;
1009
1010 /**
1011 * Get the data in the packet byte swapped from little endian to
1012 * host endian.
1013 */
1014 template <typename T>
1015 T getLE() const;
1016
1017 /**
1018 * Get the data in the packet byte swapped from the specified
1019 * endianness.
1020 */
1021 template <typename T>
1022 T get(ByteOrder endian) const;
1023
1024 /**
1025 * Get the data in the packet byte swapped from guest to host
1026 * endian.
1027 */
1028 template <typename T>
1029 T get() const;
1030
1031 /** Set the value in the data pointer to v as big endian. */
1032 template <typename T>
1033 void setBE(T v);
1034
1035 /** Set the value in the data pointer to v as little endian. */
1036 template <typename T>
1037 void setLE(T v);
1038
1039 /**
1040 * Set the value in the data pointer to v using the specified
1041 * endianness.
1042 */
1043 template <typename T>
1044 void set(T v, ByteOrder endian);
1045
1046 /** Set the value in the data pointer to v as guest endian. */
1047 template <typename T>
1048 void set(T v);
1049
1050 /**
1051 * Copy data into the packet from the provided pointer.
1052 */
1053 void
1054 setData(const uint8_t *p)
1055 {
1056 // we should never be copying data onto itself, which means we
1057 // must idenfity packets with static data, as they carry the
1058 // same pointer from source to destination and back
1059 assert(p != getPtr<uint8_t>() || flags.isSet(STATIC_DATA));
1060
1061 if (p != getPtr<uint8_t>())
1062 // for packet with allocated dynamic data, we copy data from
1063 // one to the other, e.g. a forwarded response to a response
1064 std::memcpy(getPtr<uint8_t>(), p, getSize());
1065 }
1066
1067 /**
1068 * Copy data into the packet from the provided block pointer,
1069 * which is aligned to the given block size.
1070 */
1071 void
1072 setDataFromBlock(const uint8_t *blk_data, int blkSize)
1073 {
1074 setData(blk_data + getOffset(blkSize));
1075 }
1076
1077 /**
1078 * Copy data from the packet to the provided block pointer, which
1079 * is aligned to the given block size.
1080 */
1081 void
1082 writeData(uint8_t *p) const
1083 {
1084 std::memcpy(p, getConstPtr<uint8_t>(), getSize());
1085 }
1086
1087 /**
1088 * Copy data from the packet to the memory at the provided pointer.
1089 */
1090 void
1091 writeDataToBlock(uint8_t *blk_data, int blkSize) const
1092 {
1093 writeData(blk_data + getOffset(blkSize));
1094 }
1095
1096 /**
1097 * delete the data pointed to in the data pointer. Ok to call to
1098 * matter how data was allocted.
1099 */
1100 void
1101 deleteData()
1102 {
1103 if (flags.isSet(DYNAMIC_DATA))
1104 delete [] data;
1105
1106 flags.clear(STATIC_DATA|DYNAMIC_DATA);
1107 data = NULL;
1108 }
1109
1110 /** Allocate memory for the packet. */
1111 void
1112 allocate()
1113 {
1114 // if either this command or the response command has a data
1115 // payload, actually allocate space
1116 if (hasData() || hasRespData()) {
1117 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
1118 flags.set(DYNAMIC_DATA);
1119 data = new uint8_t[getSize()];
1120 }
1121 }
1122
1123 /** @} */
1124
1125 private: // Private data accessor methods
1126 /** Get the data in the packet without byte swapping. */
1127 template <typename T>
1128 T getRaw() const;
1129
1130 /** Set the value in the data pointer to v without byte swapping. */
1131 template <typename T>
1132 void setRaw(T v);
1133
1134 public:
1135 /**
1136 * Check a functional request against a memory value stored in
1137 * another packet (i.e. an in-transit request or
1138 * response). Returns true if the current packet is a read, and
1139 * the other packet provides the data, which is then copied to the
1140 * current packet. If the current packet is a write, and the other
1141 * packet intersects this one, then we update the data
1142 * accordingly.
1143 */
1144 bool
1145 checkFunctional(PacketPtr other)
1146 {
1147 // all packets that are carrying a payload should have a valid
1148 // data pointer
1149 return checkFunctional(other, other->getAddr(), other->isSecure(),
1150 other->getSize(),
1151 other->hasData() ?
1152 other->getPtr<uint8_t>() : NULL);
1153 }
1154
1155 /**
1156 * Does the request need to check for cached copies of the same block
1157 * in the memory hierarchy above.
1158 **/
1159 bool
1160 mustCheckAbove() const
1161 {
1162 return cmd == MemCmd::HardPFReq || isEviction();
1163 }
1164
1165 /**
1166 * Is this packet a clean eviction, including both actual clean
1167 * evict packets, but also clean writebacks.
1168 */
1169 bool
1170 isCleanEviction() const
1171 {
1172 return cmd == MemCmd::CleanEvict || cmd == MemCmd::WritebackClean;
1173 }
1174
1175 /**
1176 * Check a functional request against a memory value represented
1177 * by a base/size pair and an associated data array. If the
1178 * current packet is a read, it may be satisfied by the memory
1179 * value. If the current packet is a write, it may update the
1180 * memory value.
1181 */
1182 bool
1183 checkFunctional(Printable *obj, Addr base, bool is_secure, int size,
1184 uint8_t *_data);
1185
1186 /**
1187 * Push label for PrintReq (safe to call unconditionally).
1188 */
1189 void
1190 pushLabel(const std::string &lbl)
1191 {
1192 if (isPrint())
1193 safe_cast<PrintReqState*>(senderState)->pushLabel(lbl);
1194 }
1195
1196 /**
1197 * Pop label for PrintReq (safe to call unconditionally).
1198 */
1199 void
1200 popLabel()
1201 {
1202 if (isPrint())
1203 safe_cast<PrintReqState*>(senderState)->popLabel();
1204 }
1205
1206 void print(std::ostream &o, int verbosity = 0,
1207 const std::string &prefix = "") const;
1208
1209 /**
1210 * A no-args wrapper of print(std::ostream...)
1211 * meant to be invoked from DPRINTFs
1212 * avoiding string overheads in fast mode
1213 * @return string with the request's type and start<->end addresses
1214 */
1215 std::string print() const;
1216 };
1217
1218 #endif //__MEM_PACKET_HH