base: Move AtomicOpFunctors to a dedicated header
[gem5.git] / src / mem / request.hh
1 /*
2 * Copyright (c) 2012-2013,2017-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46 /**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52 #ifndef __MEM_REQUEST_HH__
53 #define __MEM_REQUEST_HH__
54
55 #include <cassert>
56 #include <climits>
57
58 #include "base/amo.hh"
59 #include "base/flags.hh"
60 #include "base/logging.hh"
61 #include "base/types.hh"
62 #include "cpu/inst_seq.hh"
63 #include "sim/core.hh"
64
65 /**
66 * Special TaskIds that are used for per-context-switch stats dumps
67 * and Cache Occupancy. Having too many tasks seems to be a problem
68 * with vector stats. 1024 seems to be a reasonable number that
69 * doesn't cause a problem with stats and is large enough to realistic
70 * benchmarks (Linux/Android boot, BBench, etc.)
71 */
72
73 namespace ContextSwitchTaskId {
74 enum TaskId {
75 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
76 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
77 DMA = 1023, /* Mostly Table Walker */
78 Unknown = 1024,
79 NumTaskId
80 };
81 }
82
83 class Request;
84
85 typedef std::shared_ptr<Request> RequestPtr;
86 typedef uint16_t MasterID;
87
88 class Request
89 {
90 public:
91 typedef uint64_t FlagsType;
92 typedef uint8_t ArchFlagsType;
93 typedef ::Flags<FlagsType> Flags;
94
95 enum : FlagsType {
96 /**
97 * Architecture specific flags.
98 *
99 * These bits int the flag field are reserved for
100 * architecture-specific code. For example, SPARC uses them to
101 * represent ASIs.
102 */
103 ARCH_BITS = 0x000000FF,
104 /** The request was an instruction fetch. */
105 INST_FETCH = 0x00000100,
106 /** The virtual address is also the physical address. */
107 PHYSICAL = 0x00000200,
108 /**
109 * The request is to an uncacheable address.
110 *
111 * @note Uncacheable accesses may be reordered by CPU models. The
112 * STRICT_ORDER flag should be set if such reordering is
113 * undesirable.
114 */
115 UNCACHEABLE = 0x00000400,
116 /**
117 * The request is required to be strictly ordered by <i>CPU
118 * models</i> and is non-speculative.
119 *
120 * A strictly ordered request is guaranteed to never be
121 * re-ordered or executed speculatively by a CPU model. The
122 * memory system may still reorder requests in caches unless
123 * the UNCACHEABLE flag is set as well.
124 */
125 STRICT_ORDER = 0x00000800,
126 /** This request is to a memory mapped register. */
127 MMAPPED_IPR = 0x00002000,
128 /** This request is made in privileged mode. */
129 PRIVILEGED = 0x00008000,
130
131 /**
132 * This is a write that is targeted and zeroing an entire
133 * cache block. There is no need for a read/modify/write
134 */
135 CACHE_BLOCK_ZERO = 0x00010000,
136
137 /** The request should not cause a memory access. */
138 NO_ACCESS = 0x00080000,
139 /**
140 * This request will lock or unlock the accessed memory. When
141 * used with a load, the access locks the particular chunk of
142 * memory. When used with a store, it unlocks. The rule is
143 * that locked accesses have to be made up of a locked load,
144 * some operation on the data, and then a locked store.
145 */
146 LOCKED_RMW = 0x00100000,
147 /** The request is a Load locked/store conditional. */
148 LLSC = 0x00200000,
149 /** This request is for a memory swap. */
150 MEM_SWAP = 0x00400000,
151 MEM_SWAP_COND = 0x00800000,
152
153 /** The request is a prefetch. */
154 PREFETCH = 0x01000000,
155 /** The request should be prefetched into the exclusive state. */
156 PF_EXCLUSIVE = 0x02000000,
157 /** The request should be marked as LRU. */
158 EVICT_NEXT = 0x04000000,
159 /** The request should be marked with ACQUIRE. */
160 ACQUIRE = 0x00020000,
161 /** The request should be marked with RELEASE. */
162 RELEASE = 0x00040000,
163
164 /** The request is an atomic that returns data. */
165 ATOMIC_RETURN_OP = 0x40000000,
166 /** The request is an atomic that does not return data. */
167 ATOMIC_NO_RETURN_OP = 0x80000000,
168
169 /** The request should be marked with KERNEL.
170 * Used to indicate the synchronization associated with a GPU kernel
171 * launch or completion.
172 */
173 KERNEL = 0x00001000,
174
175 /**
176 * The request should be handled by the generic IPR code (only
177 * valid together with MMAPPED_IPR)
178 */
179 GENERIC_IPR = 0x08000000,
180
181 /** The request targets the secure memory space. */
182 SECURE = 0x10000000,
183 /** The request is a page table walk */
184 PT_WALK = 0x20000000,
185
186 /** The request invalidates a memory location */
187 INVALIDATE = 0x0000000100000000,
188 /** The request cleans a memory location */
189 CLEAN = 0x0000000200000000,
190
191 /** The request targets the point of unification */
192 DST_POU = 0x0000001000000000,
193
194 /** The request targets the point of coherence */
195 DST_POC = 0x0000002000000000,
196
197 /** Bits to define the destination of a request */
198 DST_BITS = 0x0000003000000000,
199
200 /**
201 * These flags are *not* cleared when a Request object is
202 * reused (assigned a new address).
203 */
204 STICKY_FLAGS = INST_FETCH
205 };
206 static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
207 CLEAN | INVALIDATE;
208
209 /** Master Ids that are statically allocated
210 * @{*/
211 enum : MasterID {
212 /** This master id is used for writeback requests by the caches */
213 wbMasterId = 0,
214 /**
215 * This master id is used for functional requests that
216 * don't come from a particular device
217 */
218 funcMasterId = 1,
219 /** This master id is used for message signaled interrupts */
220 intMasterId = 2,
221 /**
222 * Invalid master id for assertion checking only. It is
223 * invalid behavior to ever send this id as part of a request.
224 */
225 invldMasterId = std::numeric_limits<MasterID>::max()
226 };
227 /** @} */
228
229 typedef uint32_t MemSpaceConfigFlagsType;
230 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
231
232 enum : MemSpaceConfigFlagsType {
233 /** Has a synchronization scope been set? */
234 SCOPE_VALID = 0x00000001,
235 /** Access has Wavefront scope visibility */
236 WAVEFRONT_SCOPE = 0x00000002,
237 /** Access has Workgroup scope visibility */
238 WORKGROUP_SCOPE = 0x00000004,
239 /** Access has Device (e.g., GPU) scope visibility */
240 DEVICE_SCOPE = 0x00000008,
241 /** Access has System (e.g., CPU + GPU) scope visibility */
242 SYSTEM_SCOPE = 0x00000010,
243
244 /** Global Segment */
245 GLOBAL_SEGMENT = 0x00000020,
246 /** Group Segment */
247 GROUP_SEGMENT = 0x00000040,
248 /** Private Segment */
249 PRIVATE_SEGMENT = 0x00000080,
250 /** Kergarg Segment */
251 KERNARG_SEGMENT = 0x00000100,
252 /** Readonly Segment */
253 READONLY_SEGMENT = 0x00000200,
254 /** Spill Segment */
255 SPILL_SEGMENT = 0x00000400,
256 /** Arg Segment */
257 ARG_SEGMENT = 0x00000800,
258 };
259
260 private:
261 typedef uint16_t PrivateFlagsType;
262 typedef ::Flags<PrivateFlagsType> PrivateFlags;
263
264 enum : PrivateFlagsType {
265 /** Whether or not the size is valid. */
266 VALID_SIZE = 0x00000001,
267 /** Whether or not paddr is valid (has been written yet). */
268 VALID_PADDR = 0x00000002,
269 /** Whether or not the vaddr & asid are valid. */
270 VALID_VADDR = 0x00000004,
271 /** Whether or not the instruction sequence number is valid. */
272 VALID_INST_SEQ_NUM = 0x00000008,
273 /** Whether or not the pc is valid. */
274 VALID_PC = 0x00000010,
275 /** Whether or not the context ID is valid. */
276 VALID_CONTEXT_ID = 0x00000020,
277 /** Whether or not the sc result is valid. */
278 VALID_EXTRA_DATA = 0x00000080,
279 /** Whether or not the stream ID and substream ID is valid. */
280 VALID_STREAM_ID = 0x00000100,
281 VALID_SUBSTREAM_ID = 0x00000200,
282 /**
283 * These flags are *not* cleared when a Request object is reused
284 * (assigned a new address).
285 */
286 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
287 };
288
289 private:
290
291 /**
292 * Set up a physical (e.g. device) request in a previously
293 * allocated Request object.
294 */
295 void
296 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
297 {
298 _paddr = paddr;
299 _size = size;
300 _time = time;
301 _masterId = mid;
302 _flags.clear(~STICKY_FLAGS);
303 _flags.set(flags);
304 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
305 privateFlags.set(VALID_PADDR|VALID_SIZE);
306 depth = 0;
307 accessDelta = 0;
308 //translateDelta = 0;
309 }
310
311 /**
312 * The physical address of the request. Valid only if validPaddr
313 * is set.
314 */
315 Addr _paddr;
316
317 /**
318 * The size of the request. This field must be set when vaddr or
319 * paddr is written via setVirt() or setPhys(), so it is always
320 * valid as long as one of the address fields is valid.
321 */
322 unsigned _size;
323
324 /** Byte-enable mask for writes. */
325 std::vector<bool> _byteEnable;
326
327 /** The requestor ID which is unique in the system for all ports
328 * that are capable of issuing a transaction
329 */
330 MasterID _masterId;
331
332 /** Flag structure for the request. */
333 Flags _flags;
334
335 /** Memory space configuraiton flag structure for the request. */
336 MemSpaceConfigFlags _memSpaceConfigFlags;
337
338 /** Private flags for field validity checking. */
339 PrivateFlags privateFlags;
340
341 /**
342 * The time this request was started. Used to calculate
343 * latencies. This field is set to curTick() any time paddr or vaddr
344 * is written.
345 */
346 Tick _time;
347
348 /**
349 * The task id associated with this request
350 */
351 uint32_t _taskId;
352
353 union {
354 struct {
355 /**
356 * The stream ID uniquely identifies a device behind the
357 * SMMU/IOMMU Each transaction arriving at the SMMU/IOMMU is
358 * associated with exactly one stream ID.
359 */
360 uint32_t _streamId;
361
362 /**
363 * The substream ID identifies an "execution context" within a
364 * device behind an SMMU/IOMMU. It's intended to map 1-to-1 to
365 * PCIe PASID (Process Address Space ID). The presence of a
366 * substream ID is optional.
367 */
368 uint32_t _substreamId;
369 };
370
371 /** The address space ID. */
372 uint64_t _asid;
373 };
374
375 /** The virtual address of the request. */
376 Addr _vaddr;
377
378 /**
379 * Extra data for the request, such as the return value of
380 * store conditional or the compare value for a CAS. */
381 uint64_t _extraData;
382
383 /** The context ID (for statistics, locks, and wakeups). */
384 ContextID _contextId;
385
386 /** program counter of initiating access; for tracing/debugging */
387 Addr _pc;
388
389 /** Sequence number of the instruction that creates the request */
390 InstSeqNum _reqInstSeqNum;
391
392 /** A pointer to an atomic operation */
393 AtomicOpFunctorPtr atomicOpFunctor;
394
395 public:
396
397 /**
398 * Minimal constructor. No fields are initialized. (Note that
399 * _flags and privateFlags are cleared by Flags default
400 * constructor.)
401 */
402 Request()
403 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
404 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
405 _extraData(0), _contextId(0), _pc(0),
406 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
407 accessDelta(0), depth(0)
408 {}
409
410 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
411 InstSeqNum seq_num, ContextID cid)
412 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
413 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
414 _extraData(0), _contextId(0), _pc(0),
415 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
416 accessDelta(0), depth(0)
417 {
418 setPhys(paddr, size, flags, mid, curTick());
419 setContext(cid);
420 privateFlags.set(VALID_INST_SEQ_NUM);
421 }
422
423 /**
424 * Constructor for physical (e.g. device) requests. Initializes
425 * just physical address, size, flags, and timestamp (to curTick()).
426 * These fields are adequate to perform a request.
427 */
428 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
429 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
430 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
431 _extraData(0), _contextId(0), _pc(0),
432 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
433 accessDelta(0), depth(0)
434 {
435 setPhys(paddr, size, flags, mid, curTick());
436 }
437
438 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
439 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
440 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
441 _extraData(0), _contextId(0), _pc(0),
442 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
443 accessDelta(0), depth(0)
444 {
445 setPhys(paddr, size, flags, mid, time);
446 }
447
448 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
449 Addr pc)
450 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
451 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
452 _extraData(0), _contextId(0), _pc(pc),
453 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
454 accessDelta(0), depth(0)
455 {
456 setPhys(paddr, size, flags, mid, time);
457 privateFlags.set(VALID_PC);
458 }
459
460 Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
461 MasterID mid, Addr pc, ContextID cid)
462 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
463 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
464 _extraData(0), _contextId(0), _pc(0),
465 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
466 accessDelta(0), depth(0)
467 {
468 setVirt(asid, vaddr, size, flags, mid, pc);
469 setContext(cid);
470 }
471
472 Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
473 MasterID mid, Addr pc, ContextID cid,
474 AtomicOpFunctorPtr atomic_op)
475 {
476 setVirt(asid, vaddr, size, flags, mid, pc, std::move(atomic_op));
477 setContext(cid);
478 }
479
480 Request(const Request& other)
481 : _paddr(other._paddr), _size(other._size),
482 _byteEnable(other._byteEnable),
483 _masterId(other._masterId),
484 _flags(other._flags),
485 _memSpaceConfigFlags(other._memSpaceConfigFlags),
486 privateFlags(other.privateFlags),
487 _time(other._time),
488 _taskId(other._taskId), _asid(other._asid), _vaddr(other._vaddr),
489 _extraData(other._extraData), _contextId(other._contextId),
490 _pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
491 translateDelta(other.translateDelta),
492 accessDelta(other.accessDelta), depth(other.depth)
493 {
494
495 atomicOpFunctor.reset(other.atomicOpFunctor ?
496 other.atomicOpFunctor->clone() : nullptr);
497 }
498
499 ~Request() {}
500
501 /**
502 * Set up Context numbers.
503 */
504 void
505 setContext(ContextID context_id)
506 {
507 _contextId = context_id;
508 privateFlags.set(VALID_CONTEXT_ID);
509 }
510
511 void
512 setStreamId(uint32_t sid)
513 {
514 _streamId = sid;
515 privateFlags.set(VALID_STREAM_ID);
516 }
517
518 void
519 setSubStreamId(uint32_t ssid)
520 {
521 assert(privateFlags.isSet(VALID_STREAM_ID));
522 _substreamId = ssid;
523 privateFlags.set(VALID_SUBSTREAM_ID);
524 }
525
526 /**
527 * Set up a virtual (e.g., CPU) request in a previously
528 * allocated Request object.
529 */
530 void
531 setVirt(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
532 MasterID mid, Addr pc, AtomicOpFunctorPtr amo_op = nullptr)
533 {
534 _asid = asid;
535 _vaddr = vaddr;
536 _size = size;
537 _masterId = mid;
538 _pc = pc;
539 _time = curTick();
540
541 _flags.clear(~STICKY_FLAGS);
542 _flags.set(flags);
543 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
544 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
545 depth = 0;
546 accessDelta = 0;
547 translateDelta = 0;
548 atomicOpFunctor = std::move(amo_op);
549 }
550
551 /**
552 * Set just the physical address. This usually used to record the
553 * result of a translation. However, when using virtualized CPUs
554 * setPhys() is sometimes called to finalize a physical address
555 * without a virtual address, so we can't check if the virtual
556 * address is valid.
557 */
558 void
559 setPaddr(Addr paddr)
560 {
561 _paddr = paddr;
562 privateFlags.set(VALID_PADDR);
563 }
564
565 /**
566 * Generate two requests as if this request had been split into two
567 * pieces. The original request can't have been translated already.
568 */
569 // TODO: this function is still required by TimingSimpleCPU - should be
570 // removed once TimingSimpleCPU will support arbitrarily long multi-line
571 // mem. accesses
572 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
573 {
574 assert(privateFlags.isSet(VALID_VADDR));
575 assert(privateFlags.noneSet(VALID_PADDR));
576 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
577 req1 = std::make_shared<Request>(*this);
578 req2 = std::make_shared<Request>(*this);
579 req1->_size = split_addr - _vaddr;
580 req2->_vaddr = split_addr;
581 req2->_size = _size - req1->_size;
582 if (!_byteEnable.empty()) {
583 req1->_byteEnable = std::vector<bool>(
584 _byteEnable.begin(),
585 _byteEnable.begin() + req1->_size);
586 req2->_byteEnable = std::vector<bool>(
587 _byteEnable.begin() + req1->_size,
588 _byteEnable.end());
589 }
590 }
591
592 /**
593 * Accessor for paddr.
594 */
595 bool
596 hasPaddr() const
597 {
598 return privateFlags.isSet(VALID_PADDR);
599 }
600
601 Addr
602 getPaddr() const
603 {
604 assert(privateFlags.isSet(VALID_PADDR));
605 return _paddr;
606 }
607
608 /**
609 * Time for the TLB/table walker to successfully translate this request.
610 */
611 Tick translateDelta;
612
613 /**
614 * Access latency to complete this memory transaction not including
615 * translation time.
616 */
617 Tick accessDelta;
618
619 /**
620 * Level of the cache hierachy where this request was responded to
621 * (e.g. 0 = L1; 1 = L2).
622 */
623 mutable int depth;
624
625 /**
626 * Accessor for size.
627 */
628 bool
629 hasSize() const
630 {
631 return privateFlags.isSet(VALID_SIZE);
632 }
633
634 unsigned
635 getSize() const
636 {
637 assert(privateFlags.isSet(VALID_SIZE));
638 return _size;
639 }
640
641 const std::vector<bool>&
642 getByteEnable() const
643 {
644 return _byteEnable;
645 }
646
647 void
648 setByteEnable(const std::vector<bool>& be)
649 {
650 assert(be.empty() || be.size() == _size);
651 _byteEnable = be;
652 }
653
654 /**
655 * Returns true if the memory request is masked, which means
656 * there is at least one byteEnable element which is false
657 * (byte is masked)
658 */
659 bool
660 isMasked() const
661 {
662 return std::find(
663 _byteEnable.begin(),
664 _byteEnable.end(),
665 false) != _byteEnable.end();
666 }
667
668 /** Accessor for time. */
669 Tick
670 time() const
671 {
672 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
673 return _time;
674 }
675
676 /**
677 * Accessor for atomic-op functor.
678 */
679 bool
680 hasAtomicOpFunctor()
681 {
682 return (bool)atomicOpFunctor;
683 }
684
685 AtomicOpFunctor *
686 getAtomicOpFunctor()
687 {
688 assert(atomicOpFunctor);
689 return atomicOpFunctor.get();
690 }
691
692 /** Accessor for flags. */
693 Flags
694 getFlags()
695 {
696 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
697 return _flags;
698 }
699
700 /** Note that unlike other accessors, this function sets *specific
701 flags* (ORs them in); it does not assign its argument to the
702 _flags field. Thus this method should rightly be called
703 setFlags() and not just flags(). */
704 void
705 setFlags(Flags flags)
706 {
707 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
708 _flags.set(flags);
709 }
710
711 void
712 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
713 {
714 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
715 _memSpaceConfigFlags.set(extraFlags);
716 }
717
718 /** Accessor function for vaddr.*/
719 bool
720 hasVaddr() const
721 {
722 return privateFlags.isSet(VALID_VADDR);
723 }
724
725 Addr
726 getVaddr() const
727 {
728 assert(privateFlags.isSet(VALID_VADDR));
729 return _vaddr;
730 }
731
732 /** Accesssor for the requestor id. */
733 MasterID
734 masterId() const
735 {
736 return _masterId;
737 }
738
739 uint32_t
740 taskId() const
741 {
742 return _taskId;
743 }
744
745 void
746 taskId(uint32_t id) {
747 _taskId = id;
748 }
749
750 /** Accessor function for asid.*/
751 uint64_t
752 getAsid() const
753 {
754 assert(privateFlags.isSet(VALID_VADDR));
755 return _asid;
756 }
757
758 /** Accessor function for asid.*/
759 void
760 setAsid(uint64_t asid)
761 {
762 _asid = asid;
763 }
764
765 /** Accessor function for architecture-specific flags.*/
766 ArchFlagsType
767 getArchFlags() const
768 {
769 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
770 return _flags & ARCH_BITS;
771 }
772
773 /** Accessor function to check if sc result is valid. */
774 bool
775 extraDataValid() const
776 {
777 return privateFlags.isSet(VALID_EXTRA_DATA);
778 }
779
780 /** Accessor function for store conditional return value.*/
781 uint64_t
782 getExtraData() const
783 {
784 assert(privateFlags.isSet(VALID_EXTRA_DATA));
785 return _extraData;
786 }
787
788 /** Accessor function for store conditional return value.*/
789 void
790 setExtraData(uint64_t extraData)
791 {
792 _extraData = extraData;
793 privateFlags.set(VALID_EXTRA_DATA);
794 }
795
796 bool
797 hasContextId() const
798 {
799 return privateFlags.isSet(VALID_CONTEXT_ID);
800 }
801
802 /** Accessor function for context ID.*/
803 ContextID
804 contextId() const
805 {
806 assert(privateFlags.isSet(VALID_CONTEXT_ID));
807 return _contextId;
808 }
809
810 uint32_t
811 streamId() const
812 {
813 assert(privateFlags.isSet(VALID_STREAM_ID));
814 return _streamId;
815 }
816
817 bool
818 hasSubstreamId() const
819 {
820 return privateFlags.isSet(VALID_SUBSTREAM_ID);
821 }
822
823 uint32_t
824 substreamId() const
825 {
826 assert(privateFlags.isSet(VALID_SUBSTREAM_ID));
827 return _substreamId;
828 }
829
830 void
831 setPC(Addr pc)
832 {
833 privateFlags.set(VALID_PC);
834 _pc = pc;
835 }
836
837 bool
838 hasPC() const
839 {
840 return privateFlags.isSet(VALID_PC);
841 }
842
843 /** Accessor function for pc.*/
844 Addr
845 getPC() const
846 {
847 assert(privateFlags.isSet(VALID_PC));
848 return _pc;
849 }
850
851 /**
852 * Increment/Get the depth at which this request is responded to.
853 * This currently happens when the request misses in any cache level.
854 */
855 void incAccessDepth() const { depth++; }
856 int getAccessDepth() const { return depth; }
857
858 /**
859 * Set/Get the time taken for this request to be successfully translated.
860 */
861 void setTranslateLatency() { translateDelta = curTick() - _time; }
862 Tick getTranslateLatency() const { return translateDelta; }
863
864 /**
865 * Set/Get the time taken to complete this request's access, not including
866 * the time to successfully translate the request.
867 */
868 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
869 Tick getAccessLatency() const { return accessDelta; }
870
871 /**
872 * Accessor for the sequence number of instruction that creates the
873 * request.
874 */
875 bool
876 hasInstSeqNum() const
877 {
878 return privateFlags.isSet(VALID_INST_SEQ_NUM);
879 }
880
881 InstSeqNum
882 getReqInstSeqNum() const
883 {
884 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
885 return _reqInstSeqNum;
886 }
887
888 void
889 setReqInstSeqNum(const InstSeqNum seq_num)
890 {
891 privateFlags.set(VALID_INST_SEQ_NUM);
892 _reqInstSeqNum = seq_num;
893 }
894
895 /** Accessor functions for flags. Note that these are for testing
896 only; setting flags should be done via setFlags(). */
897 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
898 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
899 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
900 bool isPrefetch() const { return (_flags.isSet(PREFETCH) ||
901 _flags.isSet(PF_EXCLUSIVE)); }
902 bool isPrefetchEx() const { return _flags.isSet(PF_EXCLUSIVE); }
903 bool isLLSC() const { return _flags.isSet(LLSC); }
904 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
905 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
906 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
907 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
908 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
909 bool isSecure() const { return _flags.isSet(SECURE); }
910 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
911 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
912 bool isRelease() const { return _flags.isSet(RELEASE); }
913 bool isKernel() const { return _flags.isSet(KERNEL); }
914 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
915 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
916
917 bool
918 isAtomic() const
919 {
920 return _flags.isSet(ATOMIC_RETURN_OP) ||
921 _flags.isSet(ATOMIC_NO_RETURN_OP);
922 }
923
924 /**
925 * Accessor functions for the destination of a memory request. The
926 * destination flag can specify a point of reference for the
927 * operation (e.g. a cache block clean to the the point of
928 * unification). At the moment the destination is only used by the
929 * cache maintenance operations.
930 */
931 bool isToPOU() const { return _flags.isSet(DST_POU); }
932 bool isToPOC() const { return _flags.isSet(DST_POC); }
933 Flags getDest() const { return _flags & DST_BITS; }
934
935 /**
936 * Accessor functions for the memory space configuration flags and used by
937 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
938 * these are for testing only; setting extraFlags should be done via
939 * setMemSpaceConfigFlags().
940 */
941 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
942
943 bool
944 isWavefrontScope() const
945 {
946 assert(isScoped());
947 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
948 }
949
950 bool
951 isWorkgroupScope() const
952 {
953 assert(isScoped());
954 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
955 }
956
957 bool
958 isDeviceScope() const
959 {
960 assert(isScoped());
961 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
962 }
963
964 bool
965 isSystemScope() const
966 {
967 assert(isScoped());
968 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
969 }
970
971 bool
972 isGlobalSegment() const
973 {
974 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
975 (!isGroupSegment() && !isPrivateSegment() &&
976 !isKernargSegment() && !isReadonlySegment() &&
977 !isSpillSegment() && !isArgSegment());
978 }
979
980 bool
981 isGroupSegment() const
982 {
983 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
984 }
985
986 bool
987 isPrivateSegment() const
988 {
989 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
990 }
991
992 bool
993 isKernargSegment() const
994 {
995 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
996 }
997
998 bool
999 isReadonlySegment() const
1000 {
1001 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
1002 }
1003
1004 bool
1005 isSpillSegment() const
1006 {
1007 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
1008 }
1009
1010 bool
1011 isArgSegment() const
1012 {
1013 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
1014 }
1015
1016 /**
1017 * Accessor functions to determine whether this request is part of
1018 * a cache maintenance operation. At the moment three operations
1019 * are supported:
1020
1021 * 1) A cache clean operation updates all copies of a memory
1022 * location to the point of reference,
1023 * 2) A cache invalidate operation invalidates all copies of the
1024 * specified block in the memory above the point of reference,
1025 * 3) A clean and invalidate operation is a combination of the two
1026 * operations.
1027 * @{ */
1028 bool isCacheClean() const { return _flags.isSet(CLEAN); }
1029 bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
1030 bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
1031 /** @} */
1032 };
1033
1034 #endif // __MEM_REQUEST_HH__