misc: Merge branch v20.1.0.3 hotfix into develop
[gem5.git] / src / mem / request.hh
1 /*
2 * Copyright (c) 2012-2013,2017-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 /**
43 * @file
44 * Declaration of a request, the overall memory request consisting of
45 the parts of the request that are persistent throughout the transaction.
46 */
47
48 #ifndef __MEM_REQUEST_HH__
49 #define __MEM_REQUEST_HH__
50
51 #include <algorithm>
52 #include <cassert>
53 #include <cstdint>
54 #include <functional>
55 #include <limits>
56 #include <memory>
57 #include <vector>
58
59 #include "base/amo.hh"
60 #include "base/flags.hh"
61 #include "base/types.hh"
62 #include "cpu/inst_seq.hh"
63 #include "mem/htm.hh"
64 #include "sim/core.hh"
65
66 /**
67 * Special TaskIds that are used for per-context-switch stats dumps
68 * and Cache Occupancy. Having too many tasks seems to be a problem
69 * with vector stats. 1024 seems to be a reasonable number that
70 * doesn't cause a problem with stats and is large enough to realistic
71 * benchmarks (Linux/Android boot, BBench, etc.)
72 */
73
74 namespace ContextSwitchTaskId {
75 enum TaskId {
76 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
77 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
78 DMA = 1023, /* Mostly Table Walker */
79 Unknown = 1024,
80 NumTaskId
81 };
82 }
83
84 class Packet;
85 class Request;
86 class ThreadContext;
87
88 typedef std::shared_ptr<Request> RequestPtr;
89 typedef uint16_t RequestorID;
90
91 class Request
92 {
93 public:
94 typedef uint64_t FlagsType;
95 typedef uint8_t ArchFlagsType;
96 typedef ::Flags<FlagsType> Flags;
97
98 enum : FlagsType {
99 /**
100 * Architecture specific flags.
101 *
102 * These bits int the flag field are reserved for
103 * architecture-specific code. For example, SPARC uses them to
104 * represent ASIs.
105 */
106 ARCH_BITS = 0x000000FF,
107 /** The request was an instruction fetch. */
108 INST_FETCH = 0x00000100,
109 /** The virtual address is also the physical address. */
110 PHYSICAL = 0x00000200,
111 /**
112 * The request is to an uncacheable address.
113 *
114 * @note Uncacheable accesses may be reordered by CPU models. The
115 * STRICT_ORDER flag should be set if such reordering is
116 * undesirable.
117 */
118 UNCACHEABLE = 0x00000400,
119 /**
120 * The request is required to be strictly ordered by <i>CPU
121 * models</i> and is non-speculative.
122 *
123 * A strictly ordered request is guaranteed to never be
124 * re-ordered or executed speculatively by a CPU model. The
125 * memory system may still reorder requests in caches unless
126 * the UNCACHEABLE flag is set as well.
127 */
128 STRICT_ORDER = 0x00000800,
129 /** This request is made in privileged mode. */
130 PRIVILEGED = 0x00008000,
131
132 /**
133 * This is a write that is targeted and zeroing an entire
134 * cache block. There is no need for a read/modify/write
135 */
136 CACHE_BLOCK_ZERO = 0x00010000,
137
138 /** The request should not cause a memory access. */
139 NO_ACCESS = 0x00080000,
140 /**
141 * This request will lock or unlock the accessed memory. When
142 * used with a load, the access locks the particular chunk of
143 * memory. When used with a store, it unlocks. The rule is
144 * that locked accesses have to be made up of a locked load,
145 * some operation on the data, and then a locked store.
146 */
147 LOCKED_RMW = 0x00100000,
148 /** The request is a Load locked/store conditional. */
149 LLSC = 0x00200000,
150 /** This request is for a memory swap. */
151 MEM_SWAP = 0x00400000,
152 MEM_SWAP_COND = 0x00800000,
153
154 /** The request is a prefetch. */
155 PREFETCH = 0x01000000,
156 /** The request should be prefetched into the exclusive state. */
157 PF_EXCLUSIVE = 0x02000000,
158 /** The request should be marked as LRU. */
159 EVICT_NEXT = 0x04000000,
160 /** The request should be marked with ACQUIRE. */
161 ACQUIRE = 0x00020000,
162 /** The request should be marked with RELEASE. */
163 RELEASE = 0x00040000,
164
165 /** The request is an atomic that returns data. */
166 ATOMIC_RETURN_OP = 0x40000000,
167 /** The request is an atomic that does not return data. */
168 ATOMIC_NO_RETURN_OP = 0x80000000,
169
170 /** The request should be marked with KERNEL.
171 * Used to indicate the synchronization associated with a GPU kernel
172 * launch or completion.
173 */
174 KERNEL = 0x00001000,
175
176 /** The request targets the secure memory space. */
177 SECURE = 0x10000000,
178 /** The request is a page table walk */
179 PT_WALK = 0x20000000,
180
181 /** The request invalidates a memory location */
182 INVALIDATE = 0x0000000100000000,
183 /** The request cleans a memory location */
184 CLEAN = 0x0000000200000000,
185
186 /** The request targets the point of unification */
187 DST_POU = 0x0000001000000000,
188
189 /** The request targets the point of coherence */
190 DST_POC = 0x0000002000000000,
191
192 /** Bits to define the destination of a request */
193 DST_BITS = 0x0000003000000000,
194
195 /** hardware transactional memory **/
196
197 /** The request starts a HTM transaction */
198 HTM_START = 0x0000010000000000,
199
200 /** The request commits a HTM transaction */
201 HTM_COMMIT = 0x0000020000000000,
202
203 /** The request cancels a HTM transaction */
204 HTM_CANCEL = 0x0000040000000000,
205
206 /** The request aborts a HTM transaction */
207 HTM_ABORT = 0x0000080000000000,
208
209 // What is the different between HTM cancel and abort?
210 //
211 // HTM_CANCEL will originate from a user instruction, e.g.
212 // Arm's TCANCEL or x86's XABORT. This is an explicit request
213 // to end a transaction and restore from the last checkpoint.
214 //
215 // HTM_ABORT is an internally generated request used to synchronize
216 // a transaction's failure between the core and memory subsystem.
217 // If a transaction fails in the core, e.g. because an instruction
218 // within the transaction generates an exception, the core will prepare
219 // itself to stop fetching/executing more instructions and send an
220 // HTM_ABORT to the memory subsystem before restoring the checkpoint.
221 // Similarly, the transaction could fail in the memory subsystem and
222 // this will be communicated to the core via the Packet object.
223 // Once the core notices, it will do the same as the above and send
224 // a HTM_ABORT to the memory subsystem.
225 // A HTM_CANCEL sent to the memory subsystem will ultimately return
226 // to the core which in turn will send a HTM_ABORT.
227 //
228 // This separation is necessary to ensure the disjoint components
229 // of the system work correctly together.
230
231 /**
232 * These flags are *not* cleared when a Request object is
233 * reused (assigned a new address).
234 */
235 STICKY_FLAGS = INST_FETCH
236 };
237 static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
238 CLEAN | INVALIDATE;
239
240 static const FlagsType HTM_CMD = HTM_START | HTM_COMMIT |
241 HTM_CANCEL | HTM_ABORT;
242
243 /** Requestor Ids that are statically allocated
244 * @{*/
245 enum : RequestorID {
246 /** This requestor id is used for writeback requests by the caches */
247 wbRequestorId = 0,
248 /**
249 * This requestor id is used for functional requests that
250 * don't come from a particular device
251 */
252 funcRequestorId = 1,
253 /** This requestor id is used for message signaled interrupts */
254 intRequestorId = 2,
255 /**
256 * Invalid requestor id for assertion checking only. It is
257 * invalid behavior to ever send this id as part of a request.
258 */
259 invldRequestorId = std::numeric_limits<RequestorID>::max()
260 };
261 /** @} */
262
263 typedef uint64_t CacheCoherenceFlagsType;
264 typedef ::Flags<CacheCoherenceFlagsType> CacheCoherenceFlags;
265
266 /**
267 * These bits are used to set the coherence policy for the GPU and are
268 * encoded in the GCN3 instructions. The GCN3 ISA defines two cache levels
269 * See the AMD GCN3 ISA Architecture Manual for more details.
270 *
271 * INV_L1: L1 cache invalidation
272 * FLUSH_L2: L2 cache flush
273 *
274 * Invalidation means to simply discard all cache contents. This can be
275 * done in the L1 since it is implemented as a write-through cache and
276 * there are other copies elsewhere in the hierarchy.
277 *
278 * For flush the contents of the cache need to be written back to memory
279 * when dirty and can be discarded otherwise. This operation is more
280 * involved than invalidation and therefore we do not flush caches with
281 * redundant copies of data.
282 *
283 * SLC: System Level Coherent. Accesses are forced to miss in the L2 cache
284 * and are coherent with system memory.
285 *
286 * GLC: Globally Coherent. Controls how reads and writes are handled by
287 * the L1 cache. Global here referes to the data being visible
288 * globally on the GPU (i.e., visible to all WGs).
289 *
290 * For atomics, the GLC bit is used to distinguish between between atomic
291 * return/no-return operations. These flags are used by GPUDynInst.
292 */
293 enum : CacheCoherenceFlagsType {
294 /** mem_sync_op flags */
295 INV_L1 = 0x00000001,
296 FLUSH_L2 = 0x00000020,
297 /** user-policy flags */
298 SLC_BIT = 0x00000080,
299 GLC_BIT = 0x00000100,
300 };
301
302 using LocalAccessor =
303 std::function<Cycles(ThreadContext *tc, Packet *pkt)>;
304
305 private:
306 typedef uint16_t PrivateFlagsType;
307 typedef ::Flags<PrivateFlagsType> PrivateFlags;
308
309 enum : PrivateFlagsType {
310 /** Whether or not the size is valid. */
311 VALID_SIZE = 0x00000001,
312 /** Whether or not paddr is valid (has been written yet). */
313 VALID_PADDR = 0x00000002,
314 /** Whether or not the vaddr is valid. */
315 VALID_VADDR = 0x00000004,
316 /** Whether or not the instruction sequence number is valid. */
317 VALID_INST_SEQ_NUM = 0x00000008,
318 /** Whether or not the pc is valid. */
319 VALID_PC = 0x00000010,
320 /** Whether or not the context ID is valid. */
321 VALID_CONTEXT_ID = 0x00000020,
322 /** Whether or not the sc result is valid. */
323 VALID_EXTRA_DATA = 0x00000080,
324 /** Whether or not the stream ID and substream ID is valid. */
325 VALID_STREAM_ID = 0x00000100,
326 VALID_SUBSTREAM_ID = 0x00000200,
327 // hardware transactional memory
328 /** Whether or not the abort cause is valid. */
329 VALID_HTM_ABORT_CAUSE = 0x00000400,
330 /** Whether or not the instruction count is valid. */
331 VALID_INST_COUNT = 0x00000800,
332 /**
333 * These flags are *not* cleared when a Request object is reused
334 * (assigned a new address).
335 */
336 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
337 };
338
339 private:
340
341 /**
342 * The physical address of the request. Valid only if validPaddr
343 * is set.
344 */
345 Addr _paddr = 0;
346
347 /**
348 * The size of the request. This field must be set when vaddr or
349 * paddr is written via setVirt() or a phys basec constructor, so it is
350 * always valid as long as one of the address fields is valid.
351 */
352 unsigned _size = 0;
353
354 /** Byte-enable mask for writes. */
355 std::vector<bool> _byteEnable;
356
357 /** The requestor ID which is unique in the system for all ports
358 * that are capable of issuing a transaction
359 */
360 RequestorID _requestorId = invldRequestorId;
361
362 /** Flag structure for the request. */
363 Flags _flags;
364
365 /** Flags that control how downstream cache system maintains coherence*/
366 CacheCoherenceFlags _cacheCoherenceFlags;
367
368 /** Private flags for field validity checking. */
369 PrivateFlags privateFlags;
370
371 /**
372 * The time this request was started. Used to calculate
373 * latencies. This field is set to curTick() any time paddr or vaddr
374 * is written.
375 */
376 Tick _time = MaxTick;
377
378 /**
379 * The task id associated with this request
380 */
381 uint32_t _taskId = ContextSwitchTaskId::Unknown;
382
383 /**
384 * The stream ID uniquely identifies a device behind the
385 * SMMU/IOMMU Each transaction arriving at the SMMU/IOMMU is
386 * associated with exactly one stream ID.
387 */
388 uint32_t _streamId = 0;
389
390 /**
391 * The substream ID identifies an "execution context" within a
392 * device behind an SMMU/IOMMU. It's intended to map 1-to-1 to
393 * PCIe PASID (Process Address Space ID). The presence of a
394 * substream ID is optional.
395 */
396 uint32_t _substreamId = 0;
397
398 /** The virtual address of the request. */
399 Addr _vaddr = MaxAddr;
400
401 /**
402 * Extra data for the request, such as the return value of
403 * store conditional or the compare value for a CAS. */
404 uint64_t _extraData = 0;
405
406 /** The context ID (for statistics, locks, and wakeups). */
407 ContextID _contextId = InvalidContextID;
408
409 /** program counter of initiating access; for tracing/debugging */
410 Addr _pc = MaxAddr;
411
412 /** Sequence number of the instruction that creates the request */
413 InstSeqNum _reqInstSeqNum = 0;
414
415 /** A pointer to an atomic operation */
416 AtomicOpFunctorPtr atomicOpFunctor = nullptr;
417
418 LocalAccessor _localAccessor;
419
420 /** The instruction count at the time this request is created */
421 Counter _instCount = 0;
422
423 /** The cause for HTM transaction abort */
424 HtmFailureFaultCause _htmAbortCause = HtmFailureFaultCause::INVALID;
425
426 public:
427
428 /**
429 * Minimal constructor. No fields are initialized. (Note that
430 * _flags and privateFlags are cleared by Flags default
431 * constructor.)
432 */
433 Request() {}
434
435 /**
436 * Constructor for physical (e.g. device) requests. Initializes
437 * just physical address, size, flags, and timestamp (to curTick()).
438 * These fields are adequate to perform a request.
439 */
440 Request(Addr paddr, unsigned size, Flags flags, RequestorID id) :
441 _paddr(paddr), _size(size), _requestorId(id), _time(curTick())
442 {
443 _flags.set(flags);
444 privateFlags.set(VALID_PADDR|VALID_SIZE);
445 _byteEnable = std::vector<bool>(size, true);
446 }
447
448 Request(Addr vaddr, unsigned size, Flags flags,
449 RequestorID id, Addr pc, ContextID cid,
450 AtomicOpFunctorPtr atomic_op=nullptr)
451 {
452 setVirt(vaddr, size, flags, id, pc, std::move(atomic_op));
453 setContext(cid);
454 _byteEnable = std::vector<bool>(size, true);
455 }
456
457 Request(const Request& other)
458 : _paddr(other._paddr), _size(other._size),
459 _byteEnable(other._byteEnable),
460 _requestorId(other._requestorId),
461 _flags(other._flags),
462 _cacheCoherenceFlags(other._cacheCoherenceFlags),
463 privateFlags(other.privateFlags),
464 _time(other._time),
465 _taskId(other._taskId), _vaddr(other._vaddr),
466 _extraData(other._extraData), _contextId(other._contextId),
467 _pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
468 _localAccessor(other._localAccessor),
469 translateDelta(other.translateDelta),
470 accessDelta(other.accessDelta), depth(other.depth)
471 {
472 atomicOpFunctor.reset(other.atomicOpFunctor ?
473 other.atomicOpFunctor->clone() : nullptr);
474 }
475
476 ~Request() {}
477
478 /**
479 * Set up Context numbers.
480 */
481 void
482 setContext(ContextID context_id)
483 {
484 _contextId = context_id;
485 privateFlags.set(VALID_CONTEXT_ID);
486 }
487
488 void
489 setStreamId(uint32_t sid)
490 {
491 _streamId = sid;
492 privateFlags.set(VALID_STREAM_ID);
493 }
494
495 void
496 setSubstreamId(uint32_t ssid)
497 {
498 assert(hasStreamId());
499 _substreamId = ssid;
500 privateFlags.set(VALID_SUBSTREAM_ID);
501 }
502
503 /**
504 * Set up a virtual (e.g., CPU) request in a previously
505 * allocated Request object.
506 */
507 void
508 setVirt(Addr vaddr, unsigned size, Flags flags, RequestorID id, Addr pc,
509 AtomicOpFunctorPtr amo_op=nullptr)
510 {
511 _vaddr = vaddr;
512 _size = size;
513 _requestorId = id;
514 _pc = pc;
515 _time = curTick();
516
517 _flags.clear(~STICKY_FLAGS);
518 _flags.set(flags);
519 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
520 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
521 depth = 0;
522 accessDelta = 0;
523 translateDelta = 0;
524 atomicOpFunctor = std::move(amo_op);
525 _localAccessor = nullptr;
526 }
527
528 /**
529 * Set just the physical address. This usually used to record the
530 * result of a translation.
531 */
532 void
533 setPaddr(Addr paddr)
534 {
535 _paddr = paddr;
536 privateFlags.set(VALID_PADDR);
537 }
538
539 /**
540 * Generate two requests as if this request had been split into two
541 * pieces. The original request can't have been translated already.
542 */
543 // TODO: this function is still required by TimingSimpleCPU - should be
544 // removed once TimingSimpleCPU will support arbitrarily long multi-line
545 // mem. accesses
546 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
547 {
548 assert(hasVaddr());
549 assert(!hasPaddr());
550 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
551 req1 = std::make_shared<Request>(*this);
552 req2 = std::make_shared<Request>(*this);
553 req1->_size = split_addr - _vaddr;
554 req2->_vaddr = split_addr;
555 req2->_size = _size - req1->_size;
556 req1->_byteEnable = std::vector<bool>(
557 _byteEnable.begin(),
558 _byteEnable.begin() + req1->_size);
559 req2->_byteEnable = std::vector<bool>(
560 _byteEnable.begin() + req1->_size,
561 _byteEnable.end());
562 }
563
564 /**
565 * Accessor for paddr.
566 */
567 bool
568 hasPaddr() const
569 {
570 return privateFlags.isSet(VALID_PADDR);
571 }
572
573 Addr
574 getPaddr() const
575 {
576 assert(hasPaddr());
577 return _paddr;
578 }
579
580 /**
581 * Accessor for instruction count.
582 */
583 bool
584 hasInstCount() const
585 {
586 return privateFlags.isSet(VALID_INST_COUNT);
587 }
588
589 Counter getInstCount() const
590 {
591 assert(hasInstCount());
592 return _instCount;
593 }
594
595 void setInstCount(Counter val)
596 {
597 privateFlags.set(VALID_INST_COUNT);
598 _instCount = val;
599 }
600
601 /**
602 * Time for the TLB/table walker to successfully translate this request.
603 */
604 Tick translateDelta = 0;
605
606 /**
607 * Access latency to complete this memory transaction not including
608 * translation time.
609 */
610 Tick accessDelta = 0;
611
612 /**
613 * Level of the cache hierachy where this request was responded to
614 * (e.g. 0 = L1; 1 = L2).
615 */
616 mutable int depth = 0;
617
618 /**
619 * Accessor for size.
620 */
621 bool
622 hasSize() const
623 {
624 return privateFlags.isSet(VALID_SIZE);
625 }
626
627 unsigned
628 getSize() const
629 {
630 assert(hasSize());
631 return _size;
632 }
633
634 const std::vector<bool>&
635 getByteEnable() const
636 {
637 return _byteEnable;
638 }
639
640 void
641 setByteEnable(const std::vector<bool>& be)
642 {
643 assert(be.size() == _size);
644 _byteEnable = be;
645 }
646
647 /**
648 * Returns true if the memory request is masked, which means
649 * there is at least one byteEnable element which is false
650 * (byte is masked)
651 */
652 bool
653 isMasked() const
654 {
655 return std::find(
656 _byteEnable.begin(),
657 _byteEnable.end(),
658 false) != _byteEnable.end();
659 }
660
661 /** Accessor for time. */
662 Tick
663 time() const
664 {
665 assert(hasPaddr() || hasVaddr());
666 return _time;
667 }
668
669 /** Is this request for a local memory mapped resource/register? */
670 bool isLocalAccess() { return (bool)_localAccessor; }
671 /** Set the function which will enact that access. */
672 void setLocalAccessor(LocalAccessor acc) { _localAccessor = acc; }
673 /** Perform the installed local access. */
674 Cycles
675 localAccessor(ThreadContext *tc, Packet *pkt)
676 {
677 return _localAccessor(tc, pkt);
678 }
679
680 /**
681 * Accessor for atomic-op functor.
682 */
683 bool
684 hasAtomicOpFunctor()
685 {
686 return (bool)atomicOpFunctor;
687 }
688
689 AtomicOpFunctor *
690 getAtomicOpFunctor()
691 {
692 assert(atomicOpFunctor);
693 return atomicOpFunctor.get();
694 }
695
696 /**
697 * Accessor for hardware transactional memory abort cause.
698 */
699 bool
700 hasHtmAbortCause() const
701 {
702 return privateFlags.isSet(VALID_HTM_ABORT_CAUSE);
703 }
704
705 HtmFailureFaultCause
706 getHtmAbortCause() const
707 {
708 assert(hasHtmAbortCause());
709 return _htmAbortCause;
710 }
711
712 void
713 setHtmAbortCause(HtmFailureFaultCause val)
714 {
715 assert(isHTMAbort());
716 privateFlags.set(VALID_HTM_ABORT_CAUSE);
717 _htmAbortCause = val;
718 }
719
720 /** Accessor for flags. */
721 Flags
722 getFlags()
723 {
724 assert(hasPaddr() || hasVaddr());
725 return _flags;
726 }
727
728 /** Note that unlike other accessors, this function sets *specific
729 flags* (ORs them in); it does not assign its argument to the
730 _flags field. Thus this method should rightly be called
731 setFlags() and not just flags(). */
732 void
733 setFlags(Flags flags)
734 {
735 assert(hasPaddr() || hasVaddr());
736 _flags.set(flags);
737 }
738
739 void
740 setCacheCoherenceFlags(CacheCoherenceFlags extraFlags)
741 {
742 // TODO: do mem_sync_op requests have valid paddr/vaddr?
743 assert(hasPaddr() || hasVaddr());
744 _cacheCoherenceFlags.set(extraFlags);
745 }
746
747 /** Accessor function for vaddr.*/
748 bool
749 hasVaddr() const
750 {
751 return privateFlags.isSet(VALID_VADDR);
752 }
753
754 Addr
755 getVaddr() const
756 {
757 assert(privateFlags.isSet(VALID_VADDR));
758 return _vaddr;
759 }
760
761 /** Accesssor for the requestor id. */
762 RequestorID
763 requestorId() const
764 {
765 return _requestorId;
766 }
767
768 uint32_t
769 taskId() const
770 {
771 return _taskId;
772 }
773
774 void
775 taskId(uint32_t id) {
776 _taskId = id;
777 }
778
779 /** Accessor function for architecture-specific flags.*/
780 ArchFlagsType
781 getArchFlags() const
782 {
783 assert(hasPaddr() || hasVaddr());
784 return _flags & ARCH_BITS;
785 }
786
787 /** Accessor function to check if sc result is valid. */
788 bool
789 extraDataValid() const
790 {
791 return privateFlags.isSet(VALID_EXTRA_DATA);
792 }
793
794 /** Accessor function for store conditional return value.*/
795 uint64_t
796 getExtraData() const
797 {
798 assert(extraDataValid());
799 return _extraData;
800 }
801
802 /** Accessor function for store conditional return value.*/
803 void
804 setExtraData(uint64_t extraData)
805 {
806 _extraData = extraData;
807 privateFlags.set(VALID_EXTRA_DATA);
808 }
809
810 bool
811 hasContextId() const
812 {
813 return privateFlags.isSet(VALID_CONTEXT_ID);
814 }
815
816 /** Accessor function for context ID.*/
817 ContextID
818 contextId() const
819 {
820 assert(hasContextId());
821 return _contextId;
822 }
823
824 bool
825 hasStreamId() const
826 {
827 return privateFlags.isSet(VALID_STREAM_ID);
828 }
829
830 uint32_t
831 streamId() const
832 {
833 assert(hasStreamId());
834 return _streamId;
835 }
836
837 bool
838 hasSubstreamId() const
839 {
840 return privateFlags.isSet(VALID_SUBSTREAM_ID);
841 }
842
843 uint32_t
844 substreamId() const
845 {
846 assert(hasSubstreamId());
847 return _substreamId;
848 }
849
850 void
851 setPC(Addr pc)
852 {
853 privateFlags.set(VALID_PC);
854 _pc = pc;
855 }
856
857 bool
858 hasPC() const
859 {
860 return privateFlags.isSet(VALID_PC);
861 }
862
863 /** Accessor function for pc.*/
864 Addr
865 getPC() const
866 {
867 assert(hasPC());
868 return _pc;
869 }
870
871 /**
872 * Increment/Get the depth at which this request is responded to.
873 * This currently happens when the request misses in any cache level.
874 */
875 void incAccessDepth() const { depth++; }
876 int getAccessDepth() const { return depth; }
877
878 /**
879 * Set/Get the time taken for this request to be successfully translated.
880 */
881 void setTranslateLatency() { translateDelta = curTick() - _time; }
882 Tick getTranslateLatency() const { return translateDelta; }
883
884 /**
885 * Set/Get the time taken to complete this request's access, not including
886 * the time to successfully translate the request.
887 */
888 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
889 Tick getAccessLatency() const { return accessDelta; }
890
891 /**
892 * Accessor for the sequence number of instruction that creates the
893 * request.
894 */
895 bool
896 hasInstSeqNum() const
897 {
898 return privateFlags.isSet(VALID_INST_SEQ_NUM);
899 }
900
901 InstSeqNum
902 getReqInstSeqNum() const
903 {
904 assert(hasInstSeqNum());
905 return _reqInstSeqNum;
906 }
907
908 void
909 setReqInstSeqNum(const InstSeqNum seq_num)
910 {
911 privateFlags.set(VALID_INST_SEQ_NUM);
912 _reqInstSeqNum = seq_num;
913 }
914
915 /** Accessor functions for flags. Note that these are for testing
916 only; setting flags should be done via setFlags(). */
917 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
918 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
919 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
920 bool isPrefetch() const { return (_flags.isSet(PREFETCH) ||
921 _flags.isSet(PF_EXCLUSIVE)); }
922 bool isPrefetchEx() const { return _flags.isSet(PF_EXCLUSIVE); }
923 bool isLLSC() const { return _flags.isSet(LLSC); }
924 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
925 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
926 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
927 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
928 bool isSecure() const { return _flags.isSet(SECURE); }
929 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
930 bool isRelease() const { return _flags.isSet(RELEASE); }
931 bool isKernel() const { return _flags.isSet(KERNEL); }
932 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
933 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
934 // hardware transactional memory
935 bool isHTMStart() const { return _flags.isSet(HTM_START); }
936 bool isHTMCommit() const { return _flags.isSet(HTM_COMMIT); }
937 bool isHTMCancel() const { return _flags.isSet(HTM_CANCEL); }
938 bool isHTMAbort() const { return _flags.isSet(HTM_ABORT); }
939 bool
940 isHTMCmd() const
941 {
942 return (isHTMStart() || isHTMCommit() ||
943 isHTMCancel() || isHTMAbort());
944 }
945
946 bool
947 isAtomic() const
948 {
949 return _flags.isSet(ATOMIC_RETURN_OP) ||
950 _flags.isSet(ATOMIC_NO_RETURN_OP);
951 }
952
953 /**
954 * Accessor functions for the destination of a memory request. The
955 * destination flag can specify a point of reference for the
956 * operation (e.g. a cache block clean to the the point of
957 * unification). At the moment the destination is only used by the
958 * cache maintenance operations.
959 */
960 bool isToPOU() const { return _flags.isSet(DST_POU); }
961 bool isToPOC() const { return _flags.isSet(DST_POC); }
962 Flags getDest() const { return _flags & DST_BITS; }
963
964 bool isAcquire() const { return _cacheCoherenceFlags.isSet(ACQUIRE); }
965
966 /**
967 * Accessor functions for the memory space configuration flags and used by
968 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
969 * setting extraFlags should be done via setCacheCoherenceFlags().
970 */
971 bool isInvL1() const { return _cacheCoherenceFlags.isSet(INV_L1); }
972
973 bool
974 isGL2CacheFlush() const
975 {
976 return _cacheCoherenceFlags.isSet(FLUSH_L2);
977 }
978
979 /**
980 * Accessor functions to determine whether this request is part of
981 * a cache maintenance operation. At the moment three operations
982 * are supported:
983
984 * 1) A cache clean operation updates all copies of a memory
985 * location to the point of reference,
986 * 2) A cache invalidate operation invalidates all copies of the
987 * specified block in the memory above the point of reference,
988 * 3) A clean and invalidate operation is a combination of the two
989 * operations.
990 * @{ */
991 bool isCacheClean() const { return _flags.isSet(CLEAN); }
992 bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
993 bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
994 /** @} */
995 };
996
997 #endif // __MEM_REQUEST_HH__