cpu: Support virtual addr in elastic traces
[gem5.git] / src / mem / request.hh
1 /*
2 * Copyright (c) 2012-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46 /**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52 #ifndef __MEM_REQUEST_HH__
53 #define __MEM_REQUEST_HH__
54
55 #include <cassert>
56 #include <climits>
57
58 #include "base/flags.hh"
59 #include "base/misc.hh"
60 #include "base/types.hh"
61 #include "cpu/inst_seq.hh"
62 #include "sim/core.hh"
63
64 /**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72 namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80 }
81
82 class Request;
83
84 typedef Request* RequestPtr;
85 typedef uint16_t MasterID;
86
87 class Request
88 {
89 public:
90 typedef uint32_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158
159 /**
160 * The request should be handled by the generic IPR code (only
161 * valid together with MMAPPED_IPR)
162 */
163 GENERIC_IPR = 0x08000000,
164
165 /** The request targets the secure memory space. */
166 SECURE = 0x10000000,
167 /** The request is a page table walk */
168 PT_WALK = 0x20000000,
169
170 /**
171 * These flags are *not* cleared when a Request object is
172 * reused (assigned a new address).
173 */
174 STICKY_FLAGS = INST_FETCH
175 };
176
177 /** Master Ids that are statically allocated
178 * @{*/
179 enum : MasterID {
180 /** This master id is used for writeback requests by the caches */
181 wbMasterId = 0,
182 /**
183 * This master id is used for functional requests that
184 * don't come from a particular device
185 */
186 funcMasterId = 1,
187 /** This master id is used for message signaled interrupts */
188 intMasterId = 2,
189 /**
190 * Invalid master id for assertion checking only. It is
191 * invalid behavior to ever send this id as part of a request.
192 */
193 invldMasterId = std::numeric_limits<MasterID>::max()
194 };
195 /** @} */
196
197 private:
198 typedef uint8_t PrivateFlagsType;
199 typedef ::Flags<PrivateFlagsType> PrivateFlags;
200
201 enum : PrivateFlagsType {
202 /** Whether or not the size is valid. */
203 VALID_SIZE = 0x00000001,
204 /** Whether or not paddr is valid (has been written yet). */
205 VALID_PADDR = 0x00000002,
206 /** Whether or not the vaddr & asid are valid. */
207 VALID_VADDR = 0x00000004,
208 /** Whether or not the instruction sequence number is valid. */
209 VALID_INST_SEQ_NUM = 0x00000008,
210 /** Whether or not the pc is valid. */
211 VALID_PC = 0x00000010,
212 /** Whether or not the context ID is valid. */
213 VALID_CONTEXT_ID = 0x00000020,
214 VALID_THREAD_ID = 0x00000040,
215 /** Whether or not the sc result is valid. */
216 VALID_EXTRA_DATA = 0x00000080,
217 /**
218 * These flags are *not* cleared when a Request object is reused
219 * (assigned a new address).
220 */
221 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID | VALID_THREAD_ID
222 };
223
224 private:
225
226 /**
227 * Set up a physical (e.g. device) request in a previously
228 * allocated Request object.
229 */
230 void
231 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
232 {
233 assert(size >= 0);
234 _paddr = paddr;
235 _size = size;
236 _time = time;
237 _masterId = mid;
238 _flags.clear(~STICKY_FLAGS);
239 _flags.set(flags);
240 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
241 privateFlags.set(VALID_PADDR|VALID_SIZE);
242 depth = 0;
243 accessDelta = 0;
244 //translateDelta = 0;
245 }
246
247 /**
248 * The physical address of the request. Valid only if validPaddr
249 * is set.
250 */
251 Addr _paddr;
252
253 /**
254 * The size of the request. This field must be set when vaddr or
255 * paddr is written via setVirt() or setPhys(), so it is always
256 * valid as long as one of the address fields is valid.
257 */
258 unsigned _size;
259
260 /** The requestor ID which is unique in the system for all ports
261 * that are capable of issuing a transaction
262 */
263 MasterID _masterId;
264
265 /** Flag structure for the request. */
266 Flags _flags;
267
268 /** Private flags for field validity checking. */
269 PrivateFlags privateFlags;
270
271 /**
272 * The time this request was started. Used to calculate
273 * latencies. This field is set to curTick() any time paddr or vaddr
274 * is written.
275 */
276 Tick _time;
277
278 /**
279 * The task id associated with this request
280 */
281 uint32_t _taskId;
282
283 /** The address space ID. */
284 int _asid;
285
286 /** The virtual address of the request. */
287 Addr _vaddr;
288
289 /**
290 * Extra data for the request, such as the return value of
291 * store conditional or the compare value for a CAS. */
292 uint64_t _extraData;
293
294 /** The context ID (for statistics, typically). */
295 ContextID _contextId;
296 /** The thread ID (id within this CPU) */
297 ThreadID _threadId;
298
299 /** program counter of initiating access; for tracing/debugging */
300 Addr _pc;
301
302 /** Sequence number of the instruction that creates the request */
303 InstSeqNum _reqInstSeqNum;
304
305 public:
306
307 /**
308 * Minimal constructor. No fields are initialized. (Note that
309 * _flags and privateFlags are cleared by Flags default
310 * constructor.)
311 */
312 Request()
313 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
314 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
315 _extraData(0), _contextId(0), _threadId(0), _pc(0),
316 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
317 {}
318
319 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
320 InstSeqNum seq_num, ContextID cid, ThreadID tid)
321 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
322 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
323 _extraData(0), _contextId(0), _threadId(0), _pc(0),
324 _reqInstSeqNum(seq_num), translateDelta(0), accessDelta(0), depth(0)
325 {
326 setPhys(paddr, size, flags, mid, curTick());
327 setThreadContext(cid, tid);
328 privateFlags.set(VALID_INST_SEQ_NUM);
329 }
330
331 /**
332 * Constructor for physical (e.g. device) requests. Initializes
333 * just physical address, size, flags, and timestamp (to curTick()).
334 * These fields are adequate to perform a request.
335 */
336 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
337 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
338 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
339 _extraData(0), _contextId(0), _threadId(0), _pc(0),
340 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
341 {
342 setPhys(paddr, size, flags, mid, curTick());
343 }
344
345 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
346 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
347 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
348 _extraData(0), _contextId(0), _threadId(0), _pc(0),
349 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
350 {
351 setPhys(paddr, size, flags, mid, time);
352 }
353
354 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
355 Addr pc)
356 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
357 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
358 _extraData(0), _contextId(0), _threadId(0), _pc(0),
359 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
360 {
361 setPhys(paddr, size, flags, mid, time);
362 privateFlags.set(VALID_PC);
363 _pc = pc;
364 }
365
366 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
367 Addr pc, ContextID cid, ThreadID tid)
368 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
369 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
370 _extraData(0), _contextId(0), _threadId(0), _pc(0),
371 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
372 {
373 setVirt(asid, vaddr, size, flags, mid, pc);
374 setThreadContext(cid, tid);
375 }
376
377 ~Request() {}
378
379 /**
380 * Set up CPU and thread numbers.
381 */
382 void
383 setThreadContext(ContextID context_id, ThreadID tid)
384 {
385 _contextId = context_id;
386 _threadId = tid;
387 privateFlags.set(VALID_CONTEXT_ID|VALID_THREAD_ID);
388 }
389
390 /**
391 * Set up a virtual (e.g., CPU) request in a previously
392 * allocated Request object.
393 */
394 void
395 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
396 Addr pc)
397 {
398 _asid = asid;
399 _vaddr = vaddr;
400 _size = size;
401 _masterId = mid;
402 _pc = pc;
403 _time = curTick();
404
405 _flags.clear(~STICKY_FLAGS);
406 _flags.set(flags);
407 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
408 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
409 depth = 0;
410 accessDelta = 0;
411 translateDelta = 0;
412 }
413
414 /**
415 * Set just the physical address. This usually used to record the
416 * result of a translation. However, when using virtualized CPUs
417 * setPhys() is sometimes called to finalize a physical address
418 * without a virtual address, so we can't check if the virtual
419 * address is valid.
420 */
421 void
422 setPaddr(Addr paddr)
423 {
424 _paddr = paddr;
425 privateFlags.set(VALID_PADDR);
426 }
427
428 /**
429 * Generate two requests as if this request had been split into two
430 * pieces. The original request can't have been translated already.
431 */
432 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
433 {
434 assert(privateFlags.isSet(VALID_VADDR));
435 assert(privateFlags.noneSet(VALID_PADDR));
436 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
437 req1 = new Request(*this);
438 req2 = new Request(*this);
439 req1->_size = split_addr - _vaddr;
440 req2->_vaddr = split_addr;
441 req2->_size = _size - req1->_size;
442 }
443
444 /**
445 * Accessor for paddr.
446 */
447 bool
448 hasPaddr() const
449 {
450 return privateFlags.isSet(VALID_PADDR);
451 }
452
453 Addr
454 getPaddr() const
455 {
456 assert(privateFlags.isSet(VALID_PADDR));
457 return _paddr;
458 }
459
460 /**
461 * Time for the TLB/table walker to successfully translate this request.
462 */
463 Tick translateDelta;
464
465 /**
466 * Access latency to complete this memory transaction not including
467 * translation time.
468 */
469 Tick accessDelta;
470
471 /**
472 * Level of the cache hierachy where this request was responded to
473 * (e.g. 0 = L1; 1 = L2).
474 */
475 mutable int depth;
476
477 /**
478 * Accessor for size.
479 */
480 bool
481 hasSize() const
482 {
483 return privateFlags.isSet(VALID_SIZE);
484 }
485
486 unsigned
487 getSize() const
488 {
489 assert(privateFlags.isSet(VALID_SIZE));
490 return _size;
491 }
492
493 /** Accessor for time. */
494 Tick
495 time() const
496 {
497 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
498 return _time;
499 }
500
501 /** Accessor for flags. */
502 Flags
503 getFlags()
504 {
505 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
506 return _flags;
507 }
508
509 /** Note that unlike other accessors, this function sets *specific
510 flags* (ORs them in); it does not assign its argument to the
511 _flags field. Thus this method should rightly be called
512 setFlags() and not just flags(). */
513 void
514 setFlags(Flags flags)
515 {
516 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
517 _flags.set(flags);
518 }
519
520 /** Accessor function for vaddr.*/
521 bool
522 hasVaddr() const
523 {
524 return privateFlags.isSet(VALID_VADDR);
525 }
526
527 Addr
528 getVaddr() const
529 {
530 assert(privateFlags.isSet(VALID_VADDR));
531 return _vaddr;
532 }
533
534 /** Accesssor for the requestor id. */
535 MasterID
536 masterId() const
537 {
538 return _masterId;
539 }
540
541 uint32_t
542 taskId() const
543 {
544 return _taskId;
545 }
546
547 void
548 taskId(uint32_t id) {
549 _taskId = id;
550 }
551
552 /** Accessor function for asid.*/
553 int
554 getAsid() const
555 {
556 assert(privateFlags.isSet(VALID_VADDR));
557 return _asid;
558 }
559
560 /** Accessor function for asid.*/
561 void
562 setAsid(int asid)
563 {
564 _asid = asid;
565 }
566
567 /** Accessor function for architecture-specific flags.*/
568 ArchFlagsType
569 getArchFlags() const
570 {
571 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
572 return _flags & ARCH_BITS;
573 }
574
575 /** Accessor function to check if sc result is valid. */
576 bool
577 extraDataValid() const
578 {
579 return privateFlags.isSet(VALID_EXTRA_DATA);
580 }
581
582 /** Accessor function for store conditional return value.*/
583 uint64_t
584 getExtraData() const
585 {
586 assert(privateFlags.isSet(VALID_EXTRA_DATA));
587 return _extraData;
588 }
589
590 /** Accessor function for store conditional return value.*/
591 void
592 setExtraData(uint64_t extraData)
593 {
594 _extraData = extraData;
595 privateFlags.set(VALID_EXTRA_DATA);
596 }
597
598 bool
599 hasContextId() const
600 {
601 return privateFlags.isSet(VALID_CONTEXT_ID);
602 }
603
604 /** Accessor function for context ID.*/
605 ContextID
606 contextId() const
607 {
608 assert(privateFlags.isSet(VALID_CONTEXT_ID));
609 return _contextId;
610 }
611
612 /** Accessor function for thread ID. */
613 ThreadID
614 threadId() const
615 {
616 assert(privateFlags.isSet(VALID_THREAD_ID));
617 return _threadId;
618 }
619
620 void
621 setPC(Addr pc)
622 {
623 privateFlags.set(VALID_PC);
624 _pc = pc;
625 }
626
627 bool
628 hasPC() const
629 {
630 return privateFlags.isSet(VALID_PC);
631 }
632
633 /** Accessor function for pc.*/
634 Addr
635 getPC() const
636 {
637 assert(privateFlags.isSet(VALID_PC));
638 return _pc;
639 }
640
641 /**
642 * Increment/Get the depth at which this request is responded to.
643 * This currently happens when the request misses in any cache level.
644 */
645 void incAccessDepth() const { depth++; }
646 int getAccessDepth() const { return depth; }
647
648 /**
649 * Set/Get the time taken for this request to be successfully translated.
650 */
651 void setTranslateLatency() { translateDelta = curTick() - _time; }
652 Tick getTranslateLatency() const { return translateDelta; }
653
654 /**
655 * Set/Get the time taken to complete this request's access, not including
656 * the time to successfully translate the request.
657 */
658 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
659 Tick getAccessLatency() const { return accessDelta; }
660
661 /**
662 * Accessor for the sequence number of instruction that creates the
663 * request.
664 */
665 bool
666 hasInstSeqNum() const
667 {
668 return privateFlags.isSet(VALID_INST_SEQ_NUM);
669 }
670
671 InstSeqNum
672 getReqInstSeqNum() const
673 {
674 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
675 return _reqInstSeqNum;
676 }
677
678 void
679 setReqInstSeqNum(const InstSeqNum seq_num)
680 {
681 privateFlags.set(VALID_INST_SEQ_NUM);
682 _reqInstSeqNum = seq_num;
683 }
684
685 /** Accessor functions for flags. Note that these are for testing
686 only; setting flags should be done via setFlags(). */
687 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
688 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
689 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
690 bool isPrefetch() const { return _flags.isSet(PREFETCH); }
691 bool isLLSC() const { return _flags.isSet(LLSC); }
692 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
693 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
694 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
695 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
696 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
697 bool isSecure() const { return _flags.isSet(SECURE); }
698 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
699 };
700
701 #endif // __MEM_REQUEST_HH__