scons: Enable -Wextra by default
[gem5.git] / src / mem / request.hh
1 /*
2 * Copyright (c) 2012-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46 /**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52 #ifndef __MEM_REQUEST_HH__
53 #define __MEM_REQUEST_HH__
54
55 #include <cassert>
56 #include <climits>
57
58 #include "base/flags.hh"
59 #include "base/misc.hh"
60 #include "base/types.hh"
61 #include "cpu/inst_seq.hh"
62 #include "sim/core.hh"
63
64 /**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72 namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80 }
81
82 class Request;
83
84 typedef Request* RequestPtr;
85 typedef uint16_t MasterID;
86
87 class Request
88 {
89 public:
90 typedef uint32_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158 /** The request should be marked with ACQUIRE. */
159 ACQUIRE = 0x00020000,
160 /** The request should be marked with RELEASE. */
161 RELEASE = 0x00040000,
162
163 /**
164 * The request should be handled by the generic IPR code (only
165 * valid together with MMAPPED_IPR)
166 */
167 GENERIC_IPR = 0x08000000,
168
169 /** The request targets the secure memory space. */
170 SECURE = 0x10000000,
171 /** The request is a page table walk */
172 PT_WALK = 0x20000000,
173
174 /**
175 * These flags are *not* cleared when a Request object is
176 * reused (assigned a new address).
177 */
178 STICKY_FLAGS = INST_FETCH
179 };
180
181 /** Master Ids that are statically allocated
182 * @{*/
183 enum : MasterID {
184 /** This master id is used for writeback requests by the caches */
185 wbMasterId = 0,
186 /**
187 * This master id is used for functional requests that
188 * don't come from a particular device
189 */
190 funcMasterId = 1,
191 /** This master id is used for message signaled interrupts */
192 intMasterId = 2,
193 /**
194 * Invalid master id for assertion checking only. It is
195 * invalid behavior to ever send this id as part of a request.
196 */
197 invldMasterId = std::numeric_limits<MasterID>::max()
198 };
199 /** @} */
200
201 private:
202 typedef uint8_t PrivateFlagsType;
203 typedef ::Flags<PrivateFlagsType> PrivateFlags;
204
205 enum : PrivateFlagsType {
206 /** Whether or not the size is valid. */
207 VALID_SIZE = 0x00000001,
208 /** Whether or not paddr is valid (has been written yet). */
209 VALID_PADDR = 0x00000002,
210 /** Whether or not the vaddr & asid are valid. */
211 VALID_VADDR = 0x00000004,
212 /** Whether or not the instruction sequence number is valid. */
213 VALID_INST_SEQ_NUM = 0x00000008,
214 /** Whether or not the pc is valid. */
215 VALID_PC = 0x00000010,
216 /** Whether or not the context ID is valid. */
217 VALID_CONTEXT_ID = 0x00000020,
218 VALID_THREAD_ID = 0x00000040,
219 /** Whether or not the sc result is valid. */
220 VALID_EXTRA_DATA = 0x00000080,
221 /**
222 * These flags are *not* cleared when a Request object is reused
223 * (assigned a new address).
224 */
225 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID | VALID_THREAD_ID
226 };
227
228 private:
229
230 /**
231 * Set up a physical (e.g. device) request in a previously
232 * allocated Request object.
233 */
234 void
235 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
236 {
237 _paddr = paddr;
238 _size = size;
239 _time = time;
240 _masterId = mid;
241 _flags.clear(~STICKY_FLAGS);
242 _flags.set(flags);
243 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
244 privateFlags.set(VALID_PADDR|VALID_SIZE);
245 depth = 0;
246 accessDelta = 0;
247 //translateDelta = 0;
248 }
249
250 /**
251 * The physical address of the request. Valid only if validPaddr
252 * is set.
253 */
254 Addr _paddr;
255
256 /**
257 * The size of the request. This field must be set when vaddr or
258 * paddr is written via setVirt() or setPhys(), so it is always
259 * valid as long as one of the address fields is valid.
260 */
261 unsigned _size;
262
263 /** The requestor ID which is unique in the system for all ports
264 * that are capable of issuing a transaction
265 */
266 MasterID _masterId;
267
268 /** Flag structure for the request. */
269 Flags _flags;
270
271 /** Private flags for field validity checking. */
272 PrivateFlags privateFlags;
273
274 /**
275 * The time this request was started. Used to calculate
276 * latencies. This field is set to curTick() any time paddr or vaddr
277 * is written.
278 */
279 Tick _time;
280
281 /**
282 * The task id associated with this request
283 */
284 uint32_t _taskId;
285
286 /** The address space ID. */
287 int _asid;
288
289 /** The virtual address of the request. */
290 Addr _vaddr;
291
292 /**
293 * Extra data for the request, such as the return value of
294 * store conditional or the compare value for a CAS. */
295 uint64_t _extraData;
296
297 /** The context ID (for statistics, typically). */
298 ContextID _contextId;
299 /** The thread ID (id within this CPU) */
300 ThreadID _threadId;
301
302 /** program counter of initiating access; for tracing/debugging */
303 Addr _pc;
304
305 /** Sequence number of the instruction that creates the request */
306 InstSeqNum _reqInstSeqNum;
307
308 public:
309
310 /**
311 * Minimal constructor. No fields are initialized. (Note that
312 * _flags and privateFlags are cleared by Flags default
313 * constructor.)
314 */
315 Request()
316 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
317 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
318 _extraData(0), _contextId(0), _threadId(0), _pc(0),
319 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
320 {}
321
322 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
323 InstSeqNum seq_num, ContextID cid, ThreadID tid)
324 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
325 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
326 _extraData(0), _contextId(0), _threadId(0), _pc(0),
327 _reqInstSeqNum(seq_num), translateDelta(0), accessDelta(0), depth(0)
328 {
329 setPhys(paddr, size, flags, mid, curTick());
330 setThreadContext(cid, tid);
331 privateFlags.set(VALID_INST_SEQ_NUM);
332 }
333
334 /**
335 * Constructor for physical (e.g. device) requests. Initializes
336 * just physical address, size, flags, and timestamp (to curTick()).
337 * These fields are adequate to perform a request.
338 */
339 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
340 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
341 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
342 _extraData(0), _contextId(0), _threadId(0), _pc(0),
343 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
344 {
345 setPhys(paddr, size, flags, mid, curTick());
346 }
347
348 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
349 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
350 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
351 _extraData(0), _contextId(0), _threadId(0), _pc(0),
352 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
353 {
354 setPhys(paddr, size, flags, mid, time);
355 }
356
357 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
358 Addr pc)
359 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
360 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
361 _extraData(0), _contextId(0), _threadId(0), _pc(0),
362 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
363 {
364 setPhys(paddr, size, flags, mid, time);
365 privateFlags.set(VALID_PC);
366 _pc = pc;
367 }
368
369 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
370 Addr pc, ContextID cid, ThreadID tid)
371 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
372 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
373 _extraData(0), _contextId(0), _threadId(0), _pc(0),
374 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
375 {
376 setVirt(asid, vaddr, size, flags, mid, pc);
377 setThreadContext(cid, tid);
378 }
379
380 ~Request() {}
381
382 /**
383 * Set up CPU and thread numbers.
384 */
385 void
386 setThreadContext(ContextID context_id, ThreadID tid)
387 {
388 _contextId = context_id;
389 _threadId = tid;
390 privateFlags.set(VALID_CONTEXT_ID|VALID_THREAD_ID);
391 }
392
393 /**
394 * Set up a virtual (e.g., CPU) request in a previously
395 * allocated Request object.
396 */
397 void
398 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
399 Addr pc)
400 {
401 _asid = asid;
402 _vaddr = vaddr;
403 _size = size;
404 _masterId = mid;
405 _pc = pc;
406 _time = curTick();
407
408 _flags.clear(~STICKY_FLAGS);
409 _flags.set(flags);
410 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
411 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
412 depth = 0;
413 accessDelta = 0;
414 translateDelta = 0;
415 }
416
417 /**
418 * Set just the physical address. This usually used to record the
419 * result of a translation. However, when using virtualized CPUs
420 * setPhys() is sometimes called to finalize a physical address
421 * without a virtual address, so we can't check if the virtual
422 * address is valid.
423 */
424 void
425 setPaddr(Addr paddr)
426 {
427 _paddr = paddr;
428 privateFlags.set(VALID_PADDR);
429 }
430
431 /**
432 * Generate two requests as if this request had been split into two
433 * pieces. The original request can't have been translated already.
434 */
435 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
436 {
437 assert(privateFlags.isSet(VALID_VADDR));
438 assert(privateFlags.noneSet(VALID_PADDR));
439 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
440 req1 = new Request(*this);
441 req2 = new Request(*this);
442 req1->_size = split_addr - _vaddr;
443 req2->_vaddr = split_addr;
444 req2->_size = _size - req1->_size;
445 }
446
447 /**
448 * Accessor for paddr.
449 */
450 bool
451 hasPaddr() const
452 {
453 return privateFlags.isSet(VALID_PADDR);
454 }
455
456 Addr
457 getPaddr() const
458 {
459 assert(privateFlags.isSet(VALID_PADDR));
460 return _paddr;
461 }
462
463 /**
464 * Time for the TLB/table walker to successfully translate this request.
465 */
466 Tick translateDelta;
467
468 /**
469 * Access latency to complete this memory transaction not including
470 * translation time.
471 */
472 Tick accessDelta;
473
474 /**
475 * Level of the cache hierachy where this request was responded to
476 * (e.g. 0 = L1; 1 = L2).
477 */
478 mutable int depth;
479
480 /**
481 * Accessor for size.
482 */
483 bool
484 hasSize() const
485 {
486 return privateFlags.isSet(VALID_SIZE);
487 }
488
489 unsigned
490 getSize() const
491 {
492 assert(privateFlags.isSet(VALID_SIZE));
493 return _size;
494 }
495
496 /** Accessor for time. */
497 Tick
498 time() const
499 {
500 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
501 return _time;
502 }
503
504 /** Accessor for flags. */
505 Flags
506 getFlags()
507 {
508 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
509 return _flags;
510 }
511
512 /** Note that unlike other accessors, this function sets *specific
513 flags* (ORs them in); it does not assign its argument to the
514 _flags field. Thus this method should rightly be called
515 setFlags() and not just flags(). */
516 void
517 setFlags(Flags flags)
518 {
519 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
520 _flags.set(flags);
521 }
522
523 /** Accessor function for vaddr.*/
524 bool
525 hasVaddr() const
526 {
527 return privateFlags.isSet(VALID_VADDR);
528 }
529
530 Addr
531 getVaddr() const
532 {
533 assert(privateFlags.isSet(VALID_VADDR));
534 return _vaddr;
535 }
536
537 /** Accesssor for the requestor id. */
538 MasterID
539 masterId() const
540 {
541 return _masterId;
542 }
543
544 uint32_t
545 taskId() const
546 {
547 return _taskId;
548 }
549
550 void
551 taskId(uint32_t id) {
552 _taskId = id;
553 }
554
555 /** Accessor function for asid.*/
556 int
557 getAsid() const
558 {
559 assert(privateFlags.isSet(VALID_VADDR));
560 return _asid;
561 }
562
563 /** Accessor function for asid.*/
564 void
565 setAsid(int asid)
566 {
567 _asid = asid;
568 }
569
570 /** Accessor function for architecture-specific flags.*/
571 ArchFlagsType
572 getArchFlags() const
573 {
574 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
575 return _flags & ARCH_BITS;
576 }
577
578 /** Accessor function to check if sc result is valid. */
579 bool
580 extraDataValid() const
581 {
582 return privateFlags.isSet(VALID_EXTRA_DATA);
583 }
584
585 /** Accessor function for store conditional return value.*/
586 uint64_t
587 getExtraData() const
588 {
589 assert(privateFlags.isSet(VALID_EXTRA_DATA));
590 return _extraData;
591 }
592
593 /** Accessor function for store conditional return value.*/
594 void
595 setExtraData(uint64_t extraData)
596 {
597 _extraData = extraData;
598 privateFlags.set(VALID_EXTRA_DATA);
599 }
600
601 bool
602 hasContextId() const
603 {
604 return privateFlags.isSet(VALID_CONTEXT_ID);
605 }
606
607 /** Accessor function for context ID.*/
608 ContextID
609 contextId() const
610 {
611 assert(privateFlags.isSet(VALID_CONTEXT_ID));
612 return _contextId;
613 }
614
615 /** Accessor function for thread ID. */
616 ThreadID
617 threadId() const
618 {
619 assert(privateFlags.isSet(VALID_THREAD_ID));
620 return _threadId;
621 }
622
623 void
624 setPC(Addr pc)
625 {
626 privateFlags.set(VALID_PC);
627 _pc = pc;
628 }
629
630 bool
631 hasPC() const
632 {
633 return privateFlags.isSet(VALID_PC);
634 }
635
636 /** Accessor function for pc.*/
637 Addr
638 getPC() const
639 {
640 assert(privateFlags.isSet(VALID_PC));
641 return _pc;
642 }
643
644 /**
645 * Increment/Get the depth at which this request is responded to.
646 * This currently happens when the request misses in any cache level.
647 */
648 void incAccessDepth() const { depth++; }
649 int getAccessDepth() const { return depth; }
650
651 /**
652 * Set/Get the time taken for this request to be successfully translated.
653 */
654 void setTranslateLatency() { translateDelta = curTick() - _time; }
655 Tick getTranslateLatency() const { return translateDelta; }
656
657 /**
658 * Set/Get the time taken to complete this request's access, not including
659 * the time to successfully translate the request.
660 */
661 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
662 Tick getAccessLatency() const { return accessDelta; }
663
664 /**
665 * Accessor for the sequence number of instruction that creates the
666 * request.
667 */
668 bool
669 hasInstSeqNum() const
670 {
671 return privateFlags.isSet(VALID_INST_SEQ_NUM);
672 }
673
674 InstSeqNum
675 getReqInstSeqNum() const
676 {
677 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
678 return _reqInstSeqNum;
679 }
680
681 void
682 setReqInstSeqNum(const InstSeqNum seq_num)
683 {
684 privateFlags.set(VALID_INST_SEQ_NUM);
685 _reqInstSeqNum = seq_num;
686 }
687
688 /** Accessor functions for flags. Note that these are for testing
689 only; setting flags should be done via setFlags(). */
690 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
691 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
692 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
693 bool isPrefetch() const { return _flags.isSet(PREFETCH); }
694 bool isLLSC() const { return _flags.isSet(LLSC); }
695 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
696 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
697 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
698 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
699 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
700 bool isSecure() const { return _flags.isSet(SECURE); }
701 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
702 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
703 bool isRelease() const { return _flags.isSet(RELEASE); }
704 };
705
706 #endif // __MEM_REQUEST_HH__