mem-cache: Fix setting prefetch bit
[gem5.git] / src / mem / request.hh
1 /*
2 * Copyright (c) 2012-2013,2017-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 /**
43 * @file
44 * Declaration of a request, the overall memory request consisting of
45 the parts of the request that are persistent throughout the transaction.
46 */
47
48 #ifndef __MEM_REQUEST_HH__
49 #define __MEM_REQUEST_HH__
50
51 #include <cassert>
52 #include <climits>
53
54 #include "base/amo.hh"
55 #include "base/flags.hh"
56 #include "base/logging.hh"
57 #include "base/types.hh"
58 #include "cpu/inst_seq.hh"
59 #include "mem/htm.hh"
60 #include "sim/core.hh"
61
62 /**
63 * Special TaskIds that are used for per-context-switch stats dumps
64 * and Cache Occupancy. Having too many tasks seems to be a problem
65 * with vector stats. 1024 seems to be a reasonable number that
66 * doesn't cause a problem with stats and is large enough to realistic
67 * benchmarks (Linux/Android boot, BBench, etc.)
68 */
69
70 namespace ContextSwitchTaskId {
71 enum TaskId {
72 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
73 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
74 DMA = 1023, /* Mostly Table Walker */
75 Unknown = 1024,
76 NumTaskId
77 };
78 }
79
80 class Packet;
81 class Request;
82 class ThreadContext;
83
84 typedef std::shared_ptr<Request> RequestPtr;
85 typedef uint16_t RequestorID;
86
87 class Request
88 {
89 public:
90 typedef uint64_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is made in privileged mode. */
126 PRIVILEGED = 0x00008000,
127
128 /**
129 * This is a write that is targeted and zeroing an entire
130 * cache block. There is no need for a read/modify/write
131 */
132 CACHE_BLOCK_ZERO = 0x00010000,
133
134 /** The request should not cause a memory access. */
135 NO_ACCESS = 0x00080000,
136 /**
137 * This request will lock or unlock the accessed memory. When
138 * used with a load, the access locks the particular chunk of
139 * memory. When used with a store, it unlocks. The rule is
140 * that locked accesses have to be made up of a locked load,
141 * some operation on the data, and then a locked store.
142 */
143 LOCKED_RMW = 0x00100000,
144 /** The request is a Load locked/store conditional. */
145 LLSC = 0x00200000,
146 /** This request is for a memory swap. */
147 MEM_SWAP = 0x00400000,
148 MEM_SWAP_COND = 0x00800000,
149
150 /** The request is a prefetch. */
151 PREFETCH = 0x01000000,
152 /** The request should be prefetched into the exclusive state. */
153 PF_EXCLUSIVE = 0x02000000,
154 /** The request should be marked as LRU. */
155 EVICT_NEXT = 0x04000000,
156 /** The request should be marked with ACQUIRE. */
157 ACQUIRE = 0x00020000,
158 /** The request should be marked with RELEASE. */
159 RELEASE = 0x00040000,
160
161 /** The request is an atomic that returns data. */
162 ATOMIC_RETURN_OP = 0x40000000,
163 /** The request is an atomic that does not return data. */
164 ATOMIC_NO_RETURN_OP = 0x80000000,
165
166 /** The request should be marked with KERNEL.
167 * Used to indicate the synchronization associated with a GPU kernel
168 * launch or completion.
169 */
170 KERNEL = 0x00001000,
171
172 /** The request targets the secure memory space. */
173 SECURE = 0x10000000,
174 /** The request is a page table walk */
175 PT_WALK = 0x20000000,
176
177 /** The request invalidates a memory location */
178 INVALIDATE = 0x0000000100000000,
179 /** The request cleans a memory location */
180 CLEAN = 0x0000000200000000,
181
182 /** The request targets the point of unification */
183 DST_POU = 0x0000001000000000,
184
185 /** The request targets the point of coherence */
186 DST_POC = 0x0000002000000000,
187
188 /** Bits to define the destination of a request */
189 DST_BITS = 0x0000003000000000,
190
191 /** hardware transactional memory **/
192
193 /** The request starts a HTM transaction */
194 HTM_START = 0x0000010000000000,
195
196 /** The request commits a HTM transaction */
197 HTM_COMMIT = 0x0000020000000000,
198
199 /** The request cancels a HTM transaction */
200 HTM_CANCEL = 0x0000040000000000,
201
202 /** The request aborts a HTM transaction */
203 HTM_ABORT = 0x0000080000000000,
204
205 // What is the different between HTM cancel and abort?
206 //
207 // HTM_CANCEL will originate from a user instruction, e.g.
208 // Arm's TCANCEL or x86's XABORT. This is an explicit request
209 // to end a transaction and restore from the last checkpoint.
210 //
211 // HTM_ABORT is an internally generated request used to synchronize
212 // a transaction's failure between the core and memory subsystem.
213 // If a transaction fails in the core, e.g. because an instruction
214 // within the transaction generates an exception, the core will prepare
215 // itself to stop fetching/executing more instructions and send an
216 // HTM_ABORT to the memory subsystem before restoring the checkpoint.
217 // Similarly, the transaction could fail in the memory subsystem and
218 // this will be communicated to the core via the Packet object.
219 // Once the core notices, it will do the same as the above and send
220 // a HTM_ABORT to the memory subsystem.
221 // A HTM_CANCEL sent to the memory subsystem will ultimately return
222 // to the core which in turn will send a HTM_ABORT.
223 //
224 // This separation is necessary to ensure the disjoint components
225 // of the system work correctly together.
226
227 /**
228 * These flags are *not* cleared when a Request object is
229 * reused (assigned a new address).
230 */
231 STICKY_FLAGS = INST_FETCH
232 };
233 static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
234 CLEAN | INVALIDATE;
235
236 static const FlagsType HTM_CMD = HTM_START | HTM_COMMIT |
237 HTM_CANCEL | HTM_ABORT;
238
239 /** Requestor Ids that are statically allocated
240 * @{*/
241 enum : RequestorID {
242 /** This requestor id is used for writeback requests by the caches */
243 wbRequestorId = 0,
244 /**
245 * This requestor id is used for functional requests that
246 * don't come from a particular device
247 */
248 funcRequestorId = 1,
249 /** This requestor id is used for message signaled interrupts */
250 intRequestorId = 2,
251 /**
252 * Invalid requestor id for assertion checking only. It is
253 * invalid behavior to ever send this id as part of a request.
254 */
255 invldRequestorId = std::numeric_limits<RequestorID>::max()
256 };
257 /** @} */
258
259 typedef uint64_t CacheCoherenceFlagsType;
260 typedef ::Flags<CacheCoherenceFlagsType> CacheCoherenceFlags;
261
262 /**
263 * These bits are used to set the coherence policy for the GPU and are
264 * encoded in the GCN3 instructions. The GCN3 ISA defines two cache levels
265 * See the AMD GCN3 ISA Architecture Manual for more details.
266 *
267 * INV_L1: L1 cache invalidation
268 * FLUSH_L2: L2 cache flush
269 *
270 * Invalidation means to simply discard all cache contents. This can be
271 * done in the L1 since it is implemented as a write-through cache and
272 * there are other copies elsewhere in the hierarchy.
273 *
274 * For flush the contents of the cache need to be written back to memory
275 * when dirty and can be discarded otherwise. This operation is more
276 * involved than invalidation and therefore we do not flush caches with
277 * redundant copies of data.
278 *
279 * SLC: System Level Coherent. Accesses are forced to miss in the L2 cache
280 * and are coherent with system memory.
281 *
282 * GLC: Globally Coherent. Controls how reads and writes are handled by
283 * the L1 cache. Global here referes to the data being visible
284 * globally on the GPU (i.e., visible to all WGs).
285 *
286 * For atomics, the GLC bit is used to distinguish between between atomic
287 * return/no-return operations. These flags are used by GPUDynInst.
288 */
289 enum : CacheCoherenceFlagsType {
290 /** mem_sync_op flags */
291 INV_L1 = 0x00000001,
292 FLUSH_L2 = 0x00000020,
293 /** user-policy flags */
294 SLC_BIT = 0x00000080,
295 GLC_BIT = 0x00000100,
296 };
297
298 using LocalAccessor =
299 std::function<Cycles(ThreadContext *tc, Packet *pkt)>;
300
301 private:
302 typedef uint16_t PrivateFlagsType;
303 typedef ::Flags<PrivateFlagsType> PrivateFlags;
304
305 enum : PrivateFlagsType {
306 /** Whether or not the size is valid. */
307 VALID_SIZE = 0x00000001,
308 /** Whether or not paddr is valid (has been written yet). */
309 VALID_PADDR = 0x00000002,
310 /** Whether or not the vaddr is valid. */
311 VALID_VADDR = 0x00000004,
312 /** Whether or not the instruction sequence number is valid. */
313 VALID_INST_SEQ_NUM = 0x00000008,
314 /** Whether or not the pc is valid. */
315 VALID_PC = 0x00000010,
316 /** Whether or not the context ID is valid. */
317 VALID_CONTEXT_ID = 0x00000020,
318 /** Whether or not the sc result is valid. */
319 VALID_EXTRA_DATA = 0x00000080,
320 /** Whether or not the stream ID and substream ID is valid. */
321 VALID_STREAM_ID = 0x00000100,
322 VALID_SUBSTREAM_ID = 0x00000200,
323 // hardware transactional memory
324 /** Whether or not the abort cause is valid. */
325 VALID_HTM_ABORT_CAUSE = 0x00000400,
326 /** Whether or not the instruction count is valid. */
327 VALID_INST_COUNT = 0x00000800,
328 /**
329 * These flags are *not* cleared when a Request object is reused
330 * (assigned a new address).
331 */
332 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
333 };
334
335 private:
336
337 /**
338 * The physical address of the request. Valid only if validPaddr
339 * is set.
340 */
341 Addr _paddr = 0;
342
343 /**
344 * The size of the request. This field must be set when vaddr or
345 * paddr is written via setVirt() or a phys basec constructor, so it is
346 * always valid as long as one of the address fields is valid.
347 */
348 unsigned _size = 0;
349
350 /** Byte-enable mask for writes. */
351 std::vector<bool> _byteEnable;
352
353 /** The requestor ID which is unique in the system for all ports
354 * that are capable of issuing a transaction
355 */
356 RequestorID _requestorId = invldRequestorId;
357
358 /** Flag structure for the request. */
359 Flags _flags;
360
361 /** Flags that control how downstream cache system maintains coherence*/
362 CacheCoherenceFlags _cacheCoherenceFlags;
363
364 /** Private flags for field validity checking. */
365 PrivateFlags privateFlags;
366
367 /**
368 * The time this request was started. Used to calculate
369 * latencies. This field is set to curTick() any time paddr or vaddr
370 * is written.
371 */
372 Tick _time = MaxTick;
373
374 /**
375 * The task id associated with this request
376 */
377 uint32_t _taskId = ContextSwitchTaskId::Unknown;
378
379 /**
380 * The stream ID uniquely identifies a device behind the
381 * SMMU/IOMMU Each transaction arriving at the SMMU/IOMMU is
382 * associated with exactly one stream ID.
383 */
384 uint32_t _streamId = 0;
385
386 /**
387 * The substream ID identifies an "execution context" within a
388 * device behind an SMMU/IOMMU. It's intended to map 1-to-1 to
389 * PCIe PASID (Process Address Space ID). The presence of a
390 * substream ID is optional.
391 */
392 uint32_t _substreamId = 0;
393
394 /** The virtual address of the request. */
395 Addr _vaddr = MaxAddr;
396
397 /**
398 * Extra data for the request, such as the return value of
399 * store conditional or the compare value for a CAS. */
400 uint64_t _extraData = 0;
401
402 /** The context ID (for statistics, locks, and wakeups). */
403 ContextID _contextId = InvalidContextID;
404
405 /** program counter of initiating access; for tracing/debugging */
406 Addr _pc = MaxAddr;
407
408 /** Sequence number of the instruction that creates the request */
409 InstSeqNum _reqInstSeqNum = 0;
410
411 /** A pointer to an atomic operation */
412 AtomicOpFunctorPtr atomicOpFunctor = nullptr;
413
414 LocalAccessor _localAccessor;
415
416 /** The instruction count at the time this request is created */
417 Counter _instCount = 0;
418
419 /** The cause for HTM transaction abort */
420 HtmFailureFaultCause _htmAbortCause = HtmFailureFaultCause::INVALID;
421
422 public:
423
424 /**
425 * Minimal constructor. No fields are initialized. (Note that
426 * _flags and privateFlags are cleared by Flags default
427 * constructor.)
428 */
429 Request() {}
430
431 /**
432 * Constructor for physical (e.g. device) requests. Initializes
433 * just physical address, size, flags, and timestamp (to curTick()).
434 * These fields are adequate to perform a request.
435 */
436 Request(Addr paddr, unsigned size, Flags flags, RequestorID id) :
437 _paddr(paddr), _size(size), _requestorId(id), _time(curTick())
438 {
439 _flags.set(flags);
440 privateFlags.set(VALID_PADDR|VALID_SIZE);
441 _byteEnable = std::vector<bool>(size, true);
442 }
443
444 Request(Addr vaddr, unsigned size, Flags flags,
445 RequestorID id, Addr pc, ContextID cid,
446 AtomicOpFunctorPtr atomic_op=nullptr)
447 {
448 setVirt(vaddr, size, flags, id, pc, std::move(atomic_op));
449 setContext(cid);
450 _byteEnable = std::vector<bool>(size, true);
451 }
452
453 Request(const Request& other)
454 : _paddr(other._paddr), _size(other._size),
455 _byteEnable(other._byteEnable),
456 _requestorId(other._requestorId),
457 _flags(other._flags),
458 _cacheCoherenceFlags(other._cacheCoherenceFlags),
459 privateFlags(other.privateFlags),
460 _time(other._time),
461 _taskId(other._taskId), _vaddr(other._vaddr),
462 _extraData(other._extraData), _contextId(other._contextId),
463 _pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
464 _localAccessor(other._localAccessor),
465 translateDelta(other.translateDelta),
466 accessDelta(other.accessDelta), depth(other.depth)
467 {
468 atomicOpFunctor.reset(other.atomicOpFunctor ?
469 other.atomicOpFunctor->clone() : nullptr);
470 }
471
472 ~Request() {}
473
474 /**
475 * Set up Context numbers.
476 */
477 void
478 setContext(ContextID context_id)
479 {
480 _contextId = context_id;
481 privateFlags.set(VALID_CONTEXT_ID);
482 }
483
484 void
485 setStreamId(uint32_t sid)
486 {
487 _streamId = sid;
488 privateFlags.set(VALID_STREAM_ID);
489 }
490
491 void
492 setSubStreamId(uint32_t ssid)
493 {
494 assert(privateFlags.isSet(VALID_STREAM_ID));
495 _substreamId = ssid;
496 privateFlags.set(VALID_SUBSTREAM_ID);
497 }
498
499 /**
500 * Set up a virtual (e.g., CPU) request in a previously
501 * allocated Request object.
502 */
503 void
504 setVirt(Addr vaddr, unsigned size, Flags flags, RequestorID id, Addr pc,
505 AtomicOpFunctorPtr amo_op=nullptr)
506 {
507 _vaddr = vaddr;
508 _size = size;
509 _requestorId = id;
510 _pc = pc;
511 _time = curTick();
512
513 _flags.clear(~STICKY_FLAGS);
514 _flags.set(flags);
515 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
516 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
517 depth = 0;
518 accessDelta = 0;
519 translateDelta = 0;
520 atomicOpFunctor = std::move(amo_op);
521 _localAccessor = nullptr;
522 }
523
524 /**
525 * Set just the physical address. This usually used to record the
526 * result of a translation.
527 */
528 void
529 setPaddr(Addr paddr)
530 {
531 _paddr = paddr;
532 privateFlags.set(VALID_PADDR);
533 }
534
535 /**
536 * Generate two requests as if this request had been split into two
537 * pieces. The original request can't have been translated already.
538 */
539 // TODO: this function is still required by TimingSimpleCPU - should be
540 // removed once TimingSimpleCPU will support arbitrarily long multi-line
541 // mem. accesses
542 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
543 {
544 assert(privateFlags.isSet(VALID_VADDR));
545 assert(privateFlags.noneSet(VALID_PADDR));
546 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
547 req1 = std::make_shared<Request>(*this);
548 req2 = std::make_shared<Request>(*this);
549 req1->_size = split_addr - _vaddr;
550 req2->_vaddr = split_addr;
551 req2->_size = _size - req1->_size;
552 req1->_byteEnable = std::vector<bool>(
553 _byteEnable.begin(),
554 _byteEnable.begin() + req1->_size);
555 req2->_byteEnable = std::vector<bool>(
556 _byteEnable.begin() + req1->_size,
557 _byteEnable.end());
558 }
559
560 /**
561 * Accessor for paddr.
562 */
563 bool
564 hasPaddr() const
565 {
566 return privateFlags.isSet(VALID_PADDR);
567 }
568
569 Addr
570 getPaddr() const
571 {
572 assert(privateFlags.isSet(VALID_PADDR));
573 return _paddr;
574 }
575
576 /**
577 * Accessor for instruction count.
578 */
579 Counter getInstCount() const
580 {
581 assert(privateFlags.isSet(VALID_INST_COUNT));
582 return _instCount;
583 }
584
585 void setInstCount(Counter val)
586 {
587 privateFlags.set(VALID_INST_COUNT);
588 _instCount = val;
589 }
590
591 /**
592 * Time for the TLB/table walker to successfully translate this request.
593 */
594 Tick translateDelta = 0;
595
596 /**
597 * Access latency to complete this memory transaction not including
598 * translation time.
599 */
600 Tick accessDelta = 0;
601
602 /**
603 * Level of the cache hierachy where this request was responded to
604 * (e.g. 0 = L1; 1 = L2).
605 */
606 mutable int depth = 0;
607
608 /**
609 * Accessor for size.
610 */
611 bool
612 hasSize() const
613 {
614 return privateFlags.isSet(VALID_SIZE);
615 }
616
617 unsigned
618 getSize() const
619 {
620 assert(privateFlags.isSet(VALID_SIZE));
621 return _size;
622 }
623
624 const std::vector<bool>&
625 getByteEnable() const
626 {
627 return _byteEnable;
628 }
629
630 void
631 setByteEnable(const std::vector<bool>& be)
632 {
633 assert(be.size() == _size);
634 _byteEnable = be;
635 }
636
637 /**
638 * Returns true if the memory request is masked, which means
639 * there is at least one byteEnable element which is false
640 * (byte is masked)
641 */
642 bool
643 isMasked() const
644 {
645 return std::find(
646 _byteEnable.begin(),
647 _byteEnable.end(),
648 false) != _byteEnable.end();
649 }
650
651 /** Accessor for time. */
652 Tick
653 time() const
654 {
655 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
656 return _time;
657 }
658
659 /** Is this request for a local memory mapped resource/register? */
660 bool isLocalAccess() { return (bool)_localAccessor; }
661 /** Set the function which will enact that access. */
662 void setLocalAccessor(LocalAccessor acc) { _localAccessor = acc; }
663 /** Perform the installed local access. */
664 Cycles
665 localAccessor(ThreadContext *tc, Packet *pkt)
666 {
667 return _localAccessor(tc, pkt);
668 }
669
670 /**
671 * Accessor for atomic-op functor.
672 */
673 bool
674 hasAtomicOpFunctor()
675 {
676 return (bool)atomicOpFunctor;
677 }
678
679 AtomicOpFunctor *
680 getAtomicOpFunctor()
681 {
682 assert(atomicOpFunctor);
683 return atomicOpFunctor.get();
684 }
685
686 /**
687 * Accessor for hardware transactional memory abort cause.
688 */
689 HtmFailureFaultCause
690 getHtmAbortCause() const
691 {
692 assert(privateFlags.isSet(VALID_HTM_ABORT_CAUSE));
693 return _htmAbortCause;
694 }
695
696 void
697 setHtmAbortCause(HtmFailureFaultCause val)
698 {
699 assert(isHTMAbort());
700 privateFlags.set(VALID_HTM_ABORT_CAUSE);
701 _htmAbortCause = val;
702 }
703
704 /** Accessor for flags. */
705 Flags
706 getFlags()
707 {
708 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
709 return _flags;
710 }
711
712 /** Note that unlike other accessors, this function sets *specific
713 flags* (ORs them in); it does not assign its argument to the
714 _flags field. Thus this method should rightly be called
715 setFlags() and not just flags(). */
716 void
717 setFlags(Flags flags)
718 {
719 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
720 _flags.set(flags);
721 }
722
723 void
724 setCacheCoherenceFlags(CacheCoherenceFlags extraFlags)
725 {
726 // TODO: do mem_sync_op requests have valid paddr/vaddr?
727 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
728 _cacheCoherenceFlags.set(extraFlags);
729 }
730
731 /** Accessor function for vaddr.*/
732 bool
733 hasVaddr() const
734 {
735 return privateFlags.isSet(VALID_VADDR);
736 }
737
738 Addr
739 getVaddr() const
740 {
741 assert(privateFlags.isSet(VALID_VADDR));
742 return _vaddr;
743 }
744
745 /** Accesssor for the requestor id. */
746 RequestorID
747 requestorId() const
748 {
749 return _requestorId;
750 }
751
752 uint32_t
753 taskId() const
754 {
755 return _taskId;
756 }
757
758 void
759 taskId(uint32_t id) {
760 _taskId = id;
761 }
762
763 /** Accessor function for architecture-specific flags.*/
764 ArchFlagsType
765 getArchFlags() const
766 {
767 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
768 return _flags & ARCH_BITS;
769 }
770
771 /** Accessor function to check if sc result is valid. */
772 bool
773 extraDataValid() const
774 {
775 return privateFlags.isSet(VALID_EXTRA_DATA);
776 }
777
778 /** Accessor function for store conditional return value.*/
779 uint64_t
780 getExtraData() const
781 {
782 assert(privateFlags.isSet(VALID_EXTRA_DATA));
783 return _extraData;
784 }
785
786 /** Accessor function for store conditional return value.*/
787 void
788 setExtraData(uint64_t extraData)
789 {
790 _extraData = extraData;
791 privateFlags.set(VALID_EXTRA_DATA);
792 }
793
794 bool
795 hasContextId() const
796 {
797 return privateFlags.isSet(VALID_CONTEXT_ID);
798 }
799
800 /** Accessor function for context ID.*/
801 ContextID
802 contextId() const
803 {
804 assert(privateFlags.isSet(VALID_CONTEXT_ID));
805 return _contextId;
806 }
807
808 uint32_t
809 streamId() const
810 {
811 assert(privateFlags.isSet(VALID_STREAM_ID));
812 return _streamId;
813 }
814
815 bool
816 hasSubstreamId() const
817 {
818 return privateFlags.isSet(VALID_SUBSTREAM_ID);
819 }
820
821 uint32_t
822 substreamId() const
823 {
824 assert(privateFlags.isSet(VALID_SUBSTREAM_ID));
825 return _substreamId;
826 }
827
828 void
829 setPC(Addr pc)
830 {
831 privateFlags.set(VALID_PC);
832 _pc = pc;
833 }
834
835 bool
836 hasPC() const
837 {
838 return privateFlags.isSet(VALID_PC);
839 }
840
841 /** Accessor function for pc.*/
842 Addr
843 getPC() const
844 {
845 assert(privateFlags.isSet(VALID_PC));
846 return _pc;
847 }
848
849 /**
850 * Increment/Get the depth at which this request is responded to.
851 * This currently happens when the request misses in any cache level.
852 */
853 void incAccessDepth() const { depth++; }
854 int getAccessDepth() const { return depth; }
855
856 /**
857 * Set/Get the time taken for this request to be successfully translated.
858 */
859 void setTranslateLatency() { translateDelta = curTick() - _time; }
860 Tick getTranslateLatency() const { return translateDelta; }
861
862 /**
863 * Set/Get the time taken to complete this request's access, not including
864 * the time to successfully translate the request.
865 */
866 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
867 Tick getAccessLatency() const { return accessDelta; }
868
869 /**
870 * Accessor for the sequence number of instruction that creates the
871 * request.
872 */
873 bool
874 hasInstSeqNum() const
875 {
876 return privateFlags.isSet(VALID_INST_SEQ_NUM);
877 }
878
879 InstSeqNum
880 getReqInstSeqNum() const
881 {
882 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
883 return _reqInstSeqNum;
884 }
885
886 void
887 setReqInstSeqNum(const InstSeqNum seq_num)
888 {
889 privateFlags.set(VALID_INST_SEQ_NUM);
890 _reqInstSeqNum = seq_num;
891 }
892
893 /** Accessor functions for flags. Note that these are for testing
894 only; setting flags should be done via setFlags(). */
895 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
896 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
897 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
898 bool isPrefetch() const { return (_flags.isSet(PREFETCH) ||
899 _flags.isSet(PF_EXCLUSIVE)); }
900 bool isPrefetchEx() const { return _flags.isSet(PF_EXCLUSIVE); }
901 bool isLLSC() const { return _flags.isSet(LLSC); }
902 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
903 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
904 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
905 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
906 bool isSecure() const { return _flags.isSet(SECURE); }
907 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
908 bool isRelease() const { return _flags.isSet(RELEASE); }
909 bool isKernel() const { return _flags.isSet(KERNEL); }
910 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
911 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
912 // hardware transactional memory
913 bool isHTMStart() const { return _flags.isSet(HTM_START); }
914 bool isHTMCommit() const { return _flags.isSet(HTM_COMMIT); }
915 bool isHTMCancel() const { return _flags.isSet(HTM_CANCEL); }
916 bool isHTMAbort() const { return _flags.isSet(HTM_ABORT); }
917 bool
918 isHTMCmd() const
919 {
920 return (isHTMStart() || isHTMCommit() ||
921 isHTMCancel() || isHTMAbort());
922 }
923
924 bool
925 isAtomic() const
926 {
927 return _flags.isSet(ATOMIC_RETURN_OP) ||
928 _flags.isSet(ATOMIC_NO_RETURN_OP);
929 }
930
931 /**
932 * Accessor functions for the destination of a memory request. The
933 * destination flag can specify a point of reference for the
934 * operation (e.g. a cache block clean to the the point of
935 * unification). At the moment the destination is only used by the
936 * cache maintenance operations.
937 */
938 bool isToPOU() const { return _flags.isSet(DST_POU); }
939 bool isToPOC() const { return _flags.isSet(DST_POC); }
940 Flags getDest() const { return _flags & DST_BITS; }
941
942 bool isAcquire() const { return _cacheCoherenceFlags.isSet(ACQUIRE); }
943
944 /**
945 * Accessor functions for the memory space configuration flags and used by
946 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
947 * setting extraFlags should be done via setCacheCoherenceFlags().
948 */
949 bool isInvL1() const { return _cacheCoherenceFlags.isSet(INV_L1); }
950
951 bool
952 isGL2CacheFlush() const
953 {
954 return _cacheCoherenceFlags.isSet(FLUSH_L2);
955 }
956
957 /**
958 * Accessor functions to determine whether this request is part of
959 * a cache maintenance operation. At the moment three operations
960 * are supported:
961
962 * 1) A cache clean operation updates all copies of a memory
963 * location to the point of reference,
964 * 2) A cache invalidate operation invalidates all copies of the
965 * specified block in the memory above the point of reference,
966 * 3) A clean and invalidate operation is a combination of the two
967 * operations.
968 * @{ */
969 bool isCacheClean() const { return _flags.isSet(CLEAN); }
970 bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
971 bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
972 /** @} */
973 };
974
975 #endif // __MEM_REQUEST_HH__