debf8a1ddcd3032227c2c21f44cc27157cc5791e
[gem5.git] / src / mem / request.hh
1 /*
2 * Copyright (c) 2012-2013,2017-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 /**
43 * @file
44 * Declaration of a request, the overall memory request consisting of
45 the parts of the request that are persistent throughout the transaction.
46 */
47
48 #ifndef __MEM_REQUEST_HH__
49 #define __MEM_REQUEST_HH__
50
51 #include <cassert>
52 #include <climits>
53
54 #include "base/amo.hh"
55 #include "base/flags.hh"
56 #include "base/logging.hh"
57 #include "base/types.hh"
58 #include "cpu/inst_seq.hh"
59 #include "sim/core.hh"
60
61 /**
62 * Special TaskIds that are used for per-context-switch stats dumps
63 * and Cache Occupancy. Having too many tasks seems to be a problem
64 * with vector stats. 1024 seems to be a reasonable number that
65 * doesn't cause a problem with stats and is large enough to realistic
66 * benchmarks (Linux/Android boot, BBench, etc.)
67 */
68
69 namespace ContextSwitchTaskId {
70 enum TaskId {
71 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
72 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
73 DMA = 1023, /* Mostly Table Walker */
74 Unknown = 1024,
75 NumTaskId
76 };
77 }
78
79 class Request;
80
81 typedef std::shared_ptr<Request> RequestPtr;
82 typedef uint16_t MasterID;
83
84 class Request
85 {
86 public:
87 typedef uint64_t FlagsType;
88 typedef uint8_t ArchFlagsType;
89 typedef ::Flags<FlagsType> Flags;
90
91 enum : FlagsType {
92 /**
93 * Architecture specific flags.
94 *
95 * These bits int the flag field are reserved for
96 * architecture-specific code. For example, SPARC uses them to
97 * represent ASIs.
98 */
99 ARCH_BITS = 0x000000FF,
100 /** The request was an instruction fetch. */
101 INST_FETCH = 0x00000100,
102 /** The virtual address is also the physical address. */
103 PHYSICAL = 0x00000200,
104 /**
105 * The request is to an uncacheable address.
106 *
107 * @note Uncacheable accesses may be reordered by CPU models. The
108 * STRICT_ORDER flag should be set if such reordering is
109 * undesirable.
110 */
111 UNCACHEABLE = 0x00000400,
112 /**
113 * The request is required to be strictly ordered by <i>CPU
114 * models</i> and is non-speculative.
115 *
116 * A strictly ordered request is guaranteed to never be
117 * re-ordered or executed speculatively by a CPU model. The
118 * memory system may still reorder requests in caches unless
119 * the UNCACHEABLE flag is set as well.
120 */
121 STRICT_ORDER = 0x00000800,
122 /** This request is to a memory mapped register. */
123 MMAPPED_IPR = 0x00002000,
124 /** This request is made in privileged mode. */
125 PRIVILEGED = 0x00008000,
126
127 /**
128 * This is a write that is targeted and zeroing an entire
129 * cache block. There is no need for a read/modify/write
130 */
131 CACHE_BLOCK_ZERO = 0x00010000,
132
133 /** The request should not cause a memory access. */
134 NO_ACCESS = 0x00080000,
135 /**
136 * This request will lock or unlock the accessed memory. When
137 * used with a load, the access locks the particular chunk of
138 * memory. When used with a store, it unlocks. The rule is
139 * that locked accesses have to be made up of a locked load,
140 * some operation on the data, and then a locked store.
141 */
142 LOCKED_RMW = 0x00100000,
143 /** The request is a Load locked/store conditional. */
144 LLSC = 0x00200000,
145 /** This request is for a memory swap. */
146 MEM_SWAP = 0x00400000,
147 MEM_SWAP_COND = 0x00800000,
148
149 /** The request is a prefetch. */
150 PREFETCH = 0x01000000,
151 /** The request should be prefetched into the exclusive state. */
152 PF_EXCLUSIVE = 0x02000000,
153 /** The request should be marked as LRU. */
154 EVICT_NEXT = 0x04000000,
155 /** The request should be marked with ACQUIRE. */
156 ACQUIRE = 0x00020000,
157 /** The request should be marked with RELEASE. */
158 RELEASE = 0x00040000,
159
160 /** The request is an atomic that returns data. */
161 ATOMIC_RETURN_OP = 0x40000000,
162 /** The request is an atomic that does not return data. */
163 ATOMIC_NO_RETURN_OP = 0x80000000,
164
165 /** The request should be marked with KERNEL.
166 * Used to indicate the synchronization associated with a GPU kernel
167 * launch or completion.
168 */
169 KERNEL = 0x00001000,
170
171 /** The request targets the secure memory space. */
172 SECURE = 0x10000000,
173 /** The request is a page table walk */
174 PT_WALK = 0x20000000,
175
176 /** The request invalidates a memory location */
177 INVALIDATE = 0x0000000100000000,
178 /** The request cleans a memory location */
179 CLEAN = 0x0000000200000000,
180
181 /** The request targets the point of unification */
182 DST_POU = 0x0000001000000000,
183
184 /** The request targets the point of coherence */
185 DST_POC = 0x0000002000000000,
186
187 /** Bits to define the destination of a request */
188 DST_BITS = 0x0000003000000000,
189
190 /**
191 * These flags are *not* cleared when a Request object is
192 * reused (assigned a new address).
193 */
194 STICKY_FLAGS = INST_FETCH
195 };
196 static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
197 CLEAN | INVALIDATE;
198
199 /** Master Ids that are statically allocated
200 * @{*/
201 enum : MasterID {
202 /** This master id is used for writeback requests by the caches */
203 wbMasterId = 0,
204 /**
205 * This master id is used for functional requests that
206 * don't come from a particular device
207 */
208 funcMasterId = 1,
209 /** This master id is used for message signaled interrupts */
210 intMasterId = 2,
211 /**
212 * Invalid master id for assertion checking only. It is
213 * invalid behavior to ever send this id as part of a request.
214 */
215 invldMasterId = std::numeric_limits<MasterID>::max()
216 };
217 /** @} */
218
219 typedef uint32_t MemSpaceConfigFlagsType;
220 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
221
222 enum : MemSpaceConfigFlagsType {
223 /** Has a synchronization scope been set? */
224 SCOPE_VALID = 0x00000001,
225 /** Access has Wavefront scope visibility */
226 WAVEFRONT_SCOPE = 0x00000002,
227 /** Access has Workgroup scope visibility */
228 WORKGROUP_SCOPE = 0x00000004,
229 /** Access has Device (e.g., GPU) scope visibility */
230 DEVICE_SCOPE = 0x00000008,
231 /** Access has System (e.g., CPU + GPU) scope visibility */
232 SYSTEM_SCOPE = 0x00000010,
233
234 /** Global Segment */
235 GLOBAL_SEGMENT = 0x00000020,
236 /** Group Segment */
237 GROUP_SEGMENT = 0x00000040,
238 /** Private Segment */
239 PRIVATE_SEGMENT = 0x00000080,
240 /** Kergarg Segment */
241 KERNARG_SEGMENT = 0x00000100,
242 /** Readonly Segment */
243 READONLY_SEGMENT = 0x00000200,
244 /** Spill Segment */
245 SPILL_SEGMENT = 0x00000400,
246 /** Arg Segment */
247 ARG_SEGMENT = 0x00000800,
248 };
249
250 private:
251 typedef uint16_t PrivateFlagsType;
252 typedef ::Flags<PrivateFlagsType> PrivateFlags;
253
254 enum : PrivateFlagsType {
255 /** Whether or not the size is valid. */
256 VALID_SIZE = 0x00000001,
257 /** Whether or not paddr is valid (has been written yet). */
258 VALID_PADDR = 0x00000002,
259 /** Whether or not the vaddr & asid are valid. */
260 VALID_VADDR = 0x00000004,
261 /** Whether or not the instruction sequence number is valid. */
262 VALID_INST_SEQ_NUM = 0x00000008,
263 /** Whether or not the pc is valid. */
264 VALID_PC = 0x00000010,
265 /** Whether or not the context ID is valid. */
266 VALID_CONTEXT_ID = 0x00000020,
267 /** Whether or not the sc result is valid. */
268 VALID_EXTRA_DATA = 0x00000080,
269 /** Whether or not the stream ID and substream ID is valid. */
270 VALID_STREAM_ID = 0x00000100,
271 VALID_SUBSTREAM_ID = 0x00000200,
272 /**
273 * These flags are *not* cleared when a Request object is reused
274 * (assigned a new address).
275 */
276 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
277 };
278
279 private:
280
281 /**
282 * Set up a physical (e.g. device) request in a previously
283 * allocated Request object.
284 */
285 void
286 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
287 {
288 _paddr = paddr;
289 _size = size;
290 _time = time;
291 _masterId = mid;
292 _flags.clear(~STICKY_FLAGS);
293 _flags.set(flags);
294 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
295 privateFlags.set(VALID_PADDR|VALID_SIZE);
296 depth = 0;
297 accessDelta = 0;
298 //translateDelta = 0;
299 }
300
301 /**
302 * The physical address of the request. Valid only if validPaddr
303 * is set.
304 */
305 Addr _paddr;
306
307 /**
308 * The size of the request. This field must be set when vaddr or
309 * paddr is written via setVirt() or setPhys(), so it is always
310 * valid as long as one of the address fields is valid.
311 */
312 unsigned _size;
313
314 /** Byte-enable mask for writes. */
315 std::vector<bool> _byteEnable;
316
317 /** The requestor ID which is unique in the system for all ports
318 * that are capable of issuing a transaction
319 */
320 MasterID _masterId;
321
322 /** Flag structure for the request. */
323 Flags _flags;
324
325 /** Memory space configuraiton flag structure for the request. */
326 MemSpaceConfigFlags _memSpaceConfigFlags;
327
328 /** Private flags for field validity checking. */
329 PrivateFlags privateFlags;
330
331 /**
332 * The time this request was started. Used to calculate
333 * latencies. This field is set to curTick() any time paddr or vaddr
334 * is written.
335 */
336 Tick _time;
337
338 /**
339 * The task id associated with this request
340 */
341 uint32_t _taskId;
342
343 union {
344 struct {
345 /**
346 * The stream ID uniquely identifies a device behind the
347 * SMMU/IOMMU Each transaction arriving at the SMMU/IOMMU is
348 * associated with exactly one stream ID.
349 */
350 uint32_t _streamId;
351
352 /**
353 * The substream ID identifies an "execution context" within a
354 * device behind an SMMU/IOMMU. It's intended to map 1-to-1 to
355 * PCIe PASID (Process Address Space ID). The presence of a
356 * substream ID is optional.
357 */
358 uint32_t _substreamId;
359 };
360
361 /** The address space ID. */
362 uint64_t _asid;
363 };
364
365 /** The virtual address of the request. */
366 Addr _vaddr;
367
368 /**
369 * Extra data for the request, such as the return value of
370 * store conditional or the compare value for a CAS. */
371 uint64_t _extraData;
372
373 /** The context ID (for statistics, locks, and wakeups). */
374 ContextID _contextId;
375
376 /** program counter of initiating access; for tracing/debugging */
377 Addr _pc;
378
379 /** Sequence number of the instruction that creates the request */
380 InstSeqNum _reqInstSeqNum;
381
382 /** A pointer to an atomic operation */
383 AtomicOpFunctorPtr atomicOpFunctor;
384
385 public:
386
387 /**
388 * Minimal constructor. No fields are initialized. (Note that
389 * _flags and privateFlags are cleared by Flags default
390 * constructor.)
391 */
392 Request()
393 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
394 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
395 _extraData(0), _contextId(0), _pc(0),
396 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
397 accessDelta(0), depth(0)
398 {}
399
400 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
401 InstSeqNum seq_num, ContextID cid)
402 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
403 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
404 _extraData(0), _contextId(0), _pc(0),
405 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
406 accessDelta(0), depth(0)
407 {
408 setPhys(paddr, size, flags, mid, curTick());
409 setContext(cid);
410 privateFlags.set(VALID_INST_SEQ_NUM);
411 }
412
413 /**
414 * Constructor for physical (e.g. device) requests. Initializes
415 * just physical address, size, flags, and timestamp (to curTick()).
416 * These fields are adequate to perform a request.
417 */
418 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
419 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
420 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
421 _extraData(0), _contextId(0), _pc(0),
422 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
423 accessDelta(0), depth(0)
424 {
425 setPhys(paddr, size, flags, mid, curTick());
426 }
427
428 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
429 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
430 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
431 _extraData(0), _contextId(0), _pc(0),
432 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
433 accessDelta(0), depth(0)
434 {
435 setPhys(paddr, size, flags, mid, time);
436 }
437
438 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
439 Addr pc)
440 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
441 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
442 _extraData(0), _contextId(0), _pc(pc),
443 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
444 accessDelta(0), depth(0)
445 {
446 setPhys(paddr, size, flags, mid, time);
447 privateFlags.set(VALID_PC);
448 }
449
450 Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
451 MasterID mid, Addr pc, ContextID cid)
452 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
453 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
454 _extraData(0), _contextId(0), _pc(0),
455 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
456 accessDelta(0), depth(0)
457 {
458 setVirt(asid, vaddr, size, flags, mid, pc);
459 setContext(cid);
460 }
461
462 Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
463 MasterID mid, Addr pc, ContextID cid,
464 AtomicOpFunctorPtr atomic_op)
465 {
466 setVirt(asid, vaddr, size, flags, mid, pc, std::move(atomic_op));
467 setContext(cid);
468 }
469
470 Request(const Request& other)
471 : _paddr(other._paddr), _size(other._size),
472 _byteEnable(other._byteEnable),
473 _masterId(other._masterId),
474 _flags(other._flags),
475 _memSpaceConfigFlags(other._memSpaceConfigFlags),
476 privateFlags(other.privateFlags),
477 _time(other._time),
478 _taskId(other._taskId), _asid(other._asid), _vaddr(other._vaddr),
479 _extraData(other._extraData), _contextId(other._contextId),
480 _pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
481 translateDelta(other.translateDelta),
482 accessDelta(other.accessDelta), depth(other.depth)
483 {
484
485 atomicOpFunctor.reset(other.atomicOpFunctor ?
486 other.atomicOpFunctor->clone() : nullptr);
487 }
488
489 ~Request() {}
490
491 /**
492 * Set up Context numbers.
493 */
494 void
495 setContext(ContextID context_id)
496 {
497 _contextId = context_id;
498 privateFlags.set(VALID_CONTEXT_ID);
499 }
500
501 void
502 setStreamId(uint32_t sid)
503 {
504 _streamId = sid;
505 privateFlags.set(VALID_STREAM_ID);
506 }
507
508 void
509 setSubStreamId(uint32_t ssid)
510 {
511 assert(privateFlags.isSet(VALID_STREAM_ID));
512 _substreamId = ssid;
513 privateFlags.set(VALID_SUBSTREAM_ID);
514 }
515
516 /**
517 * Set up a virtual (e.g., CPU) request in a previously
518 * allocated Request object.
519 */
520 void
521 setVirt(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
522 MasterID mid, Addr pc, AtomicOpFunctorPtr amo_op = nullptr)
523 {
524 _asid = asid;
525 _vaddr = vaddr;
526 _size = size;
527 _masterId = mid;
528 _pc = pc;
529 _time = curTick();
530
531 _flags.clear(~STICKY_FLAGS);
532 _flags.set(flags);
533 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
534 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
535 depth = 0;
536 accessDelta = 0;
537 translateDelta = 0;
538 atomicOpFunctor = std::move(amo_op);
539 }
540
541 /**
542 * Set just the physical address. This usually used to record the
543 * result of a translation. However, when using virtualized CPUs
544 * setPhys() is sometimes called to finalize a physical address
545 * without a virtual address, so we can't check if the virtual
546 * address is valid.
547 */
548 void
549 setPaddr(Addr paddr)
550 {
551 _paddr = paddr;
552 privateFlags.set(VALID_PADDR);
553 }
554
555 /**
556 * Generate two requests as if this request had been split into two
557 * pieces. The original request can't have been translated already.
558 */
559 // TODO: this function is still required by TimingSimpleCPU - should be
560 // removed once TimingSimpleCPU will support arbitrarily long multi-line
561 // mem. accesses
562 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
563 {
564 assert(privateFlags.isSet(VALID_VADDR));
565 assert(privateFlags.noneSet(VALID_PADDR));
566 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
567 req1 = std::make_shared<Request>(*this);
568 req2 = std::make_shared<Request>(*this);
569 req1->_size = split_addr - _vaddr;
570 req2->_vaddr = split_addr;
571 req2->_size = _size - req1->_size;
572 if (!_byteEnable.empty()) {
573 req1->_byteEnable = std::vector<bool>(
574 _byteEnable.begin(),
575 _byteEnable.begin() + req1->_size);
576 req2->_byteEnable = std::vector<bool>(
577 _byteEnable.begin() + req1->_size,
578 _byteEnable.end());
579 }
580 }
581
582 /**
583 * Accessor for paddr.
584 */
585 bool
586 hasPaddr() const
587 {
588 return privateFlags.isSet(VALID_PADDR);
589 }
590
591 Addr
592 getPaddr() const
593 {
594 assert(privateFlags.isSet(VALID_PADDR));
595 return _paddr;
596 }
597
598 /**
599 * Time for the TLB/table walker to successfully translate this request.
600 */
601 Tick translateDelta;
602
603 /**
604 * Access latency to complete this memory transaction not including
605 * translation time.
606 */
607 Tick accessDelta;
608
609 /**
610 * Level of the cache hierachy where this request was responded to
611 * (e.g. 0 = L1; 1 = L2).
612 */
613 mutable int depth;
614
615 /**
616 * Accessor for size.
617 */
618 bool
619 hasSize() const
620 {
621 return privateFlags.isSet(VALID_SIZE);
622 }
623
624 unsigned
625 getSize() const
626 {
627 assert(privateFlags.isSet(VALID_SIZE));
628 return _size;
629 }
630
631 const std::vector<bool>&
632 getByteEnable() const
633 {
634 return _byteEnable;
635 }
636
637 void
638 setByteEnable(const std::vector<bool>& be)
639 {
640 assert(be.empty() || be.size() == _size);
641 _byteEnable = be;
642 }
643
644 /**
645 * Returns true if the memory request is masked, which means
646 * there is at least one byteEnable element which is false
647 * (byte is masked)
648 */
649 bool
650 isMasked() const
651 {
652 return std::find(
653 _byteEnable.begin(),
654 _byteEnable.end(),
655 false) != _byteEnable.end();
656 }
657
658 /** Accessor for time. */
659 Tick
660 time() const
661 {
662 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
663 return _time;
664 }
665
666 /**
667 * Accessor for atomic-op functor.
668 */
669 bool
670 hasAtomicOpFunctor()
671 {
672 return (bool)atomicOpFunctor;
673 }
674
675 AtomicOpFunctor *
676 getAtomicOpFunctor()
677 {
678 assert(atomicOpFunctor);
679 return atomicOpFunctor.get();
680 }
681
682 /** Accessor for flags. */
683 Flags
684 getFlags()
685 {
686 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
687 return _flags;
688 }
689
690 /** Note that unlike other accessors, this function sets *specific
691 flags* (ORs them in); it does not assign its argument to the
692 _flags field. Thus this method should rightly be called
693 setFlags() and not just flags(). */
694 void
695 setFlags(Flags flags)
696 {
697 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
698 _flags.set(flags);
699 }
700
701 void
702 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
703 {
704 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
705 _memSpaceConfigFlags.set(extraFlags);
706 }
707
708 /** Accessor function for vaddr.*/
709 bool
710 hasVaddr() const
711 {
712 return privateFlags.isSet(VALID_VADDR);
713 }
714
715 Addr
716 getVaddr() const
717 {
718 assert(privateFlags.isSet(VALID_VADDR));
719 return _vaddr;
720 }
721
722 /** Accesssor for the requestor id. */
723 MasterID
724 masterId() const
725 {
726 return _masterId;
727 }
728
729 uint32_t
730 taskId() const
731 {
732 return _taskId;
733 }
734
735 void
736 taskId(uint32_t id) {
737 _taskId = id;
738 }
739
740 /** Accessor function for asid.*/
741 uint64_t
742 getAsid() const
743 {
744 assert(privateFlags.isSet(VALID_VADDR));
745 return _asid;
746 }
747
748 /** Accessor function for asid.*/
749 void
750 setAsid(uint64_t asid)
751 {
752 _asid = asid;
753 }
754
755 /** Accessor function for architecture-specific flags.*/
756 ArchFlagsType
757 getArchFlags() const
758 {
759 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
760 return _flags & ARCH_BITS;
761 }
762
763 /** Accessor function to check if sc result is valid. */
764 bool
765 extraDataValid() const
766 {
767 return privateFlags.isSet(VALID_EXTRA_DATA);
768 }
769
770 /** Accessor function for store conditional return value.*/
771 uint64_t
772 getExtraData() const
773 {
774 assert(privateFlags.isSet(VALID_EXTRA_DATA));
775 return _extraData;
776 }
777
778 /** Accessor function for store conditional return value.*/
779 void
780 setExtraData(uint64_t extraData)
781 {
782 _extraData = extraData;
783 privateFlags.set(VALID_EXTRA_DATA);
784 }
785
786 bool
787 hasContextId() const
788 {
789 return privateFlags.isSet(VALID_CONTEXT_ID);
790 }
791
792 /** Accessor function for context ID.*/
793 ContextID
794 contextId() const
795 {
796 assert(privateFlags.isSet(VALID_CONTEXT_ID));
797 return _contextId;
798 }
799
800 uint32_t
801 streamId() const
802 {
803 assert(privateFlags.isSet(VALID_STREAM_ID));
804 return _streamId;
805 }
806
807 bool
808 hasSubstreamId() const
809 {
810 return privateFlags.isSet(VALID_SUBSTREAM_ID);
811 }
812
813 uint32_t
814 substreamId() const
815 {
816 assert(privateFlags.isSet(VALID_SUBSTREAM_ID));
817 return _substreamId;
818 }
819
820 void
821 setPC(Addr pc)
822 {
823 privateFlags.set(VALID_PC);
824 _pc = pc;
825 }
826
827 bool
828 hasPC() const
829 {
830 return privateFlags.isSet(VALID_PC);
831 }
832
833 /** Accessor function for pc.*/
834 Addr
835 getPC() const
836 {
837 assert(privateFlags.isSet(VALID_PC));
838 return _pc;
839 }
840
841 /**
842 * Increment/Get the depth at which this request is responded to.
843 * This currently happens when the request misses in any cache level.
844 */
845 void incAccessDepth() const { depth++; }
846 int getAccessDepth() const { return depth; }
847
848 /**
849 * Set/Get the time taken for this request to be successfully translated.
850 */
851 void setTranslateLatency() { translateDelta = curTick() - _time; }
852 Tick getTranslateLatency() const { return translateDelta; }
853
854 /**
855 * Set/Get the time taken to complete this request's access, not including
856 * the time to successfully translate the request.
857 */
858 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
859 Tick getAccessLatency() const { return accessDelta; }
860
861 /**
862 * Accessor for the sequence number of instruction that creates the
863 * request.
864 */
865 bool
866 hasInstSeqNum() const
867 {
868 return privateFlags.isSet(VALID_INST_SEQ_NUM);
869 }
870
871 InstSeqNum
872 getReqInstSeqNum() const
873 {
874 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
875 return _reqInstSeqNum;
876 }
877
878 void
879 setReqInstSeqNum(const InstSeqNum seq_num)
880 {
881 privateFlags.set(VALID_INST_SEQ_NUM);
882 _reqInstSeqNum = seq_num;
883 }
884
885 /** Accessor functions for flags. Note that these are for testing
886 only; setting flags should be done via setFlags(). */
887 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
888 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
889 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
890 bool isPrefetch() const { return (_flags.isSet(PREFETCH) ||
891 _flags.isSet(PF_EXCLUSIVE)); }
892 bool isPrefetchEx() const { return _flags.isSet(PF_EXCLUSIVE); }
893 bool isLLSC() const { return _flags.isSet(LLSC); }
894 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
895 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
896 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
897 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
898 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
899 bool isSecure() const { return _flags.isSet(SECURE); }
900 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
901 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
902 bool isRelease() const { return _flags.isSet(RELEASE); }
903 bool isKernel() const { return _flags.isSet(KERNEL); }
904 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
905 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
906
907 bool
908 isAtomic() const
909 {
910 return _flags.isSet(ATOMIC_RETURN_OP) ||
911 _flags.isSet(ATOMIC_NO_RETURN_OP);
912 }
913
914 /**
915 * Accessor functions for the destination of a memory request. The
916 * destination flag can specify a point of reference for the
917 * operation (e.g. a cache block clean to the the point of
918 * unification). At the moment the destination is only used by the
919 * cache maintenance operations.
920 */
921 bool isToPOU() const { return _flags.isSet(DST_POU); }
922 bool isToPOC() const { return _flags.isSet(DST_POC); }
923 Flags getDest() const { return _flags & DST_BITS; }
924
925 /**
926 * Accessor functions for the memory space configuration flags and used by
927 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
928 * these are for testing only; setting extraFlags should be done via
929 * setMemSpaceConfigFlags().
930 */
931 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
932
933 bool
934 isWavefrontScope() const
935 {
936 assert(isScoped());
937 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
938 }
939
940 bool
941 isWorkgroupScope() const
942 {
943 assert(isScoped());
944 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
945 }
946
947 bool
948 isDeviceScope() const
949 {
950 assert(isScoped());
951 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
952 }
953
954 bool
955 isSystemScope() const
956 {
957 assert(isScoped());
958 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
959 }
960
961 bool
962 isGlobalSegment() const
963 {
964 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
965 (!isGroupSegment() && !isPrivateSegment() &&
966 !isKernargSegment() && !isReadonlySegment() &&
967 !isSpillSegment() && !isArgSegment());
968 }
969
970 bool
971 isGroupSegment() const
972 {
973 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
974 }
975
976 bool
977 isPrivateSegment() const
978 {
979 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
980 }
981
982 bool
983 isKernargSegment() const
984 {
985 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
986 }
987
988 bool
989 isReadonlySegment() const
990 {
991 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
992 }
993
994 bool
995 isSpillSegment() const
996 {
997 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
998 }
999
1000 bool
1001 isArgSegment() const
1002 {
1003 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
1004 }
1005
1006 /**
1007 * Accessor functions to determine whether this request is part of
1008 * a cache maintenance operation. At the moment three operations
1009 * are supported:
1010
1011 * 1) A cache clean operation updates all copies of a memory
1012 * location to the point of reference,
1013 * 2) A cache invalidate operation invalidates all copies of the
1014 * specified block in the memory above the point of reference,
1015 * 3) A clean and invalidate operation is a combination of the two
1016 * operations.
1017 * @{ */
1018 bool isCacheClean() const { return _flags.isSet(CLEAN); }
1019 bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
1020 bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
1021 /** @} */
1022 };
1023
1024 #endif // __MEM_REQUEST_HH__