arm: support 16kb vm granules
[gem5.git] / src / arch / arm / table_walker.hh
1 /*
2 * Copyright (c) 2010-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40
41 #ifndef __ARCH_ARM_TABLE_WALKER_HH__
42 #define __ARCH_ARM_TABLE_WALKER_HH__
43
44 #include <list>
45
46 #include "arch/arm/miscregs.hh"
47 #include "arch/arm/system.hh"
48 #include "arch/arm/tlb.hh"
49 #include "dev/dma_device.hh"
50 #include "mem/mem_object.hh"
51 #include "mem/request.hh"
52 #include "params/ArmTableWalker.hh"
53 #include "sim/eventq.hh"
54 #include "sim/fault_fwd.hh"
55
56 class ThreadContext;
57
58 namespace ArmISA {
59 class Translation;
60 class TLB;
61 class Stage2MMU;
62
63 class TableWalker : public MemObject
64 {
65 public:
66 class WalkerState;
67
68 class DescriptorBase {
69 public:
70 /** Current lookup level for this descriptor */
71 LookupLevel lookupLevel;
72
73 virtual Addr pfn() const = 0;
74 virtual TlbEntry::DomainType domain() const = 0;
75 virtual bool xn() const = 0;
76 virtual uint8_t ap() const = 0;
77 virtual bool global(WalkerState *currState) const = 0;
78 virtual uint8_t offsetBits() const = 0;
79 virtual bool secure(bool have_security, WalkerState *currState) const = 0;
80 virtual std::string dbgHeader() const = 0;
81 virtual uint64_t getRawData() const = 0;
82 virtual uint8_t texcb() const
83 {
84 panic("texcb() not implemented for this class\n");
85 }
86 virtual bool shareable() const
87 {
88 panic("shareable() not implemented for this class\n");
89 }
90 };
91
92 class L1Descriptor : public DescriptorBase {
93 public:
94 /** Type of page table entry ARM DDI 0406B: B3-8*/
95 enum EntryType {
96 Ignore,
97 PageTable,
98 Section,
99 Reserved
100 };
101
102 /** The raw bits of the entry */
103 uint32_t data;
104
105 /** This entry has been modified (access flag set) and needs to be
106 * written back to memory */
107 bool _dirty;
108
109 /** Default ctor */
110 L1Descriptor()
111 {
112 lookupLevel = L1;
113 }
114
115 virtual uint64_t getRawData() const
116 {
117 return (data);
118 }
119
120 virtual std::string dbgHeader() const
121 {
122 return "Inserting Section Descriptor into TLB\n";
123 }
124
125 virtual uint8_t offsetBits() const
126 {
127 return 20;
128 }
129
130 EntryType type() const
131 {
132 return (EntryType)(data & 0x3);
133 }
134
135 /** Is the page a Supersection (16MB)?*/
136 bool supersection() const
137 {
138 return bits(data, 18);
139 }
140
141 /** Return the physcal address of the entry, bits in position*/
142 Addr paddr() const
143 {
144 if (supersection())
145 panic("Super sections not implemented\n");
146 return mbits(data, 31, 20);
147 }
148 /** Return the physcal address of the entry, bits in position*/
149 Addr paddr(Addr va) const
150 {
151 if (supersection())
152 panic("Super sections not implemented\n");
153 return mbits(data, 31, 20) | mbits(va, 19, 0);
154 }
155
156
157 /** Return the physical frame, bits shifted right */
158 Addr pfn() const
159 {
160 if (supersection())
161 panic("Super sections not implemented\n");
162 return bits(data, 31, 20);
163 }
164
165 /** Is the translation global (no asid used)? */
166 bool global(WalkerState *currState) const
167 {
168 return !bits(data, 17);
169 }
170
171 /** Is the translation not allow execution? */
172 bool xn() const
173 {
174 return bits(data, 4);
175 }
176
177 /** Three bit access protection flags */
178 uint8_t ap() const
179 {
180 return (bits(data, 15) << 2) | bits(data, 11, 10);
181 }
182
183 /** Domain Client/Manager: ARM DDI 0406B: B3-31 */
184 TlbEntry::DomainType domain() const
185 {
186 return static_cast<TlbEntry::DomainType>(bits(data, 8, 5));
187 }
188
189 /** Address of L2 descriptor if it exists */
190 Addr l2Addr() const
191 {
192 return mbits(data, 31, 10);
193 }
194
195 /** Memory region attributes: ARM DDI 0406B: B3-32.
196 * These bits are largly ignored by M5 and only used to
197 * provide the illusion that the memory system cares about
198 * anything but cachable vs. uncachable.
199 */
200 uint8_t texcb() const
201 {
202 return bits(data, 2) | bits(data, 3) << 1 | bits(data, 14, 12) << 2;
203 }
204
205 /** If the section is shareable. See texcb() comment. */
206 bool shareable() const
207 {
208 return bits(data, 16);
209 }
210
211 /** Set access flag that this entry has been touched. Mark
212 * the entry as requiring a writeback, in the future.
213 */
214 void setAp0()
215 {
216 data |= 1 << 10;
217 _dirty = true;
218 }
219
220 /** This entry needs to be written back to memory */
221 bool dirty() const
222 {
223 return _dirty;
224 }
225
226 /**
227 * Returns true if this entry targets the secure physical address
228 * map.
229 */
230 bool secure(bool have_security, WalkerState *currState) const
231 {
232 if (have_security) {
233 if (type() == PageTable)
234 return !bits(data, 3);
235 else
236 return !bits(data, 19);
237 }
238 return false;
239 }
240 };
241
242 /** Level 2 page table descriptor */
243 class L2Descriptor : public DescriptorBase {
244 public:
245 /** The raw bits of the entry. */
246 uint32_t data;
247 L1Descriptor *l1Parent;
248
249 /** This entry has been modified (access flag set) and needs to be
250 * written back to memory */
251 bool _dirty;
252
253 /** Default ctor */
254 L2Descriptor()
255 {
256 lookupLevel = L2;
257 }
258
259 L2Descriptor(L1Descriptor &parent) : l1Parent(&parent)
260 {
261 lookupLevel = L2;
262 }
263
264 virtual uint64_t getRawData() const
265 {
266 return (data);
267 }
268
269 virtual std::string dbgHeader() const
270 {
271 return "Inserting L2 Descriptor into TLB\n";
272 }
273
274 virtual TlbEntry::DomainType domain() const
275 {
276 return l1Parent->domain();
277 }
278
279 bool secure(bool have_security, WalkerState *currState) const
280 {
281 return l1Parent->secure(have_security, currState);
282 }
283
284 virtual uint8_t offsetBits() const
285 {
286 return large() ? 16 : 12;
287 }
288
289 /** Is the entry invalid */
290 bool invalid() const
291 {
292 return bits(data, 1, 0) == 0;
293 }
294
295 /** What is the size of the mapping? */
296 bool large() const
297 {
298 return bits(data, 1) == 0;
299 }
300
301 /** Is execution allowed on this mapping? */
302 bool xn() const
303 {
304 return large() ? bits(data, 15) : bits(data, 0);
305 }
306
307 /** Is the translation global (no asid used)? */
308 bool global(WalkerState *currState) const
309 {
310 return !bits(data, 11);
311 }
312
313 /** Three bit access protection flags */
314 uint8_t ap() const
315 {
316 return bits(data, 5, 4) | (bits(data, 9) << 2);
317 }
318
319 /** Memory region attributes: ARM DDI 0406B: B3-32 */
320 uint8_t texcb() const
321 {
322 return large() ?
323 (bits(data, 2) | (bits(data, 3) << 1) | (bits(data, 14, 12) << 2)) :
324 (bits(data, 2) | (bits(data, 3) << 1) | (bits(data, 8, 6) << 2));
325 }
326
327 /** Return the physical frame, bits shifted right */
328 Addr pfn() const
329 {
330 return large() ? bits(data, 31, 16) : bits(data, 31, 12);
331 }
332
333 /** Return complete physical address given a VA */
334 Addr paddr(Addr va) const
335 {
336 if (large())
337 return mbits(data, 31, 16) | mbits(va, 15, 0);
338 else
339 return mbits(data, 31, 12) | mbits(va, 11, 0);
340 }
341
342 /** If the section is shareable. See texcb() comment. */
343 bool shareable() const
344 {
345 return bits(data, 10);
346 }
347
348 /** Set access flag that this entry has been touched. Mark
349 * the entry as requiring a writeback, in the future.
350 */
351 void setAp0()
352 {
353 data |= 1 << 4;
354 _dirty = true;
355 }
356
357 /** This entry needs to be written back to memory */
358 bool dirty() const
359 {
360 return _dirty;
361 }
362
363 };
364
365 // Granule sizes for AArch64 long descriptors
366 enum GrainSize {
367 Grain4KB = 12,
368 Grain16KB = 14,
369 Grain64KB = 16,
370 ReservedGrain = 0
371 };
372
373 /** Long-descriptor format (LPAE) */
374 class LongDescriptor : public DescriptorBase {
375 public:
376 /** Descriptor type */
377 enum EntryType {
378 Invalid,
379 Table,
380 Block,
381 Page
382 };
383
384 /** The raw bits of the entry */
385 uint64_t data;
386
387 /** This entry has been modified (access flag set) and needs to be
388 * written back to memory */
389 bool _dirty;
390
391 virtual uint64_t getRawData() const
392 {
393 return (data);
394 }
395
396 virtual std::string dbgHeader() const
397 {
398 if (type() == LongDescriptor::Page) {
399 assert(lookupLevel == L3);
400 return "Inserting Page descriptor into TLB\n";
401 } else {
402 assert(lookupLevel < L3);
403 return "Inserting Block descriptor into TLB\n";
404 }
405 }
406
407 /**
408 * Returns true if this entry targets the secure physical address
409 * map.
410 */
411 bool secure(bool have_security, WalkerState *currState) const
412 {
413 assert(type() == Block || type() == Page);
414 return have_security && (currState->secureLookup && !bits(data, 5));
415 }
416
417 /** True if the current lookup is performed in AArch64 state */
418 bool aarch64;
419
420 /** Width of the granule size in bits */
421 GrainSize grainSize;
422
423 /** Return the descriptor type */
424 EntryType type() const
425 {
426 switch (bits(data, 1, 0)) {
427 case 0x1:
428 // In AArch64 blocks are not allowed at L0 for the 4 KB granule
429 // and at L1 for 16/64 KB granules
430 if (grainSize > Grain4KB)
431 return lookupLevel == L2 ? Block : Invalid;
432 return lookupLevel == L0 || lookupLevel == L3 ? Invalid : Block;
433 case 0x3:
434 return lookupLevel == L3 ? Page : Table;
435 default:
436 return Invalid;
437 }
438 }
439
440 /** Return the bit width of the page/block offset */
441 uint8_t offsetBits() const
442 {
443 if (type() == Block) {
444 switch (grainSize) {
445 case Grain4KB:
446 return lookupLevel == L1 ? 30 /* 1 GB */
447 : 21 /* 2 MB */;
448 case Grain16KB:
449 return 25 /* 32 MB */;
450 case Grain64KB:
451 return 29 /* 512 MB */;
452 default:
453 panic("Invalid AArch64 VM granule size\n");
454 }
455 } else if (type() == Page) {
456 switch (grainSize) {
457 case Grain4KB:
458 case Grain16KB:
459 case Grain64KB:
460 return grainSize; /* enum -> uint okay */
461 default:
462 panic("Invalid AArch64 VM granule size\n");
463 }
464 } else {
465 panic("AArch64 page table entry must be block or page\n");
466 }
467 }
468
469 /** Return the physical frame, bits shifted right */
470 Addr pfn() const
471 {
472 if (aarch64)
473 return bits(data, 47, offsetBits());
474 return bits(data, 39, offsetBits());
475 }
476
477 /** Return the complete physical address given a VA */
478 Addr paddr(Addr va) const
479 {
480 int n = offsetBits();
481 if (aarch64)
482 return mbits(data, 47, n) | mbits(va, n - 1, 0);
483 return mbits(data, 39, n) | mbits(va, n - 1, 0);
484 }
485
486 /** Return the physical address of the entry */
487 Addr paddr() const
488 {
489 if (aarch64)
490 return mbits(data, 47, offsetBits());
491 return mbits(data, 39, offsetBits());
492 }
493
494 /** Return the address of the next page table */
495 Addr nextTableAddr() const
496 {
497 assert(type() == Table);
498 if (aarch64)
499 return mbits(data, 47, grainSize);
500 else
501 return mbits(data, 39, 12);
502 }
503
504 /** Return the address of the next descriptor */
505 Addr nextDescAddr(Addr va) const
506 {
507 assert(type() == Table);
508 Addr pa = 0;
509 if (aarch64) {
510 int stride = grainSize - 3;
511 int va_lo = stride * (3 - (lookupLevel + 1)) + grainSize;
512 int va_hi = va_lo + stride - 1;
513 pa = nextTableAddr() | (bits(va, va_hi, va_lo) << 3);
514 } else {
515 if (lookupLevel == L1)
516 pa = nextTableAddr() | (bits(va, 29, 21) << 3);
517 else // lookupLevel == L2
518 pa = nextTableAddr() | (bits(va, 20, 12) << 3);
519 }
520 return pa;
521 }
522
523 /** Is execution allowed on this mapping? */
524 bool xn() const
525 {
526 assert(type() == Block || type() == Page);
527 return bits(data, 54);
528 }
529
530 /** Is privileged execution allowed on this mapping? (LPAE only) */
531 bool pxn() const
532 {
533 assert(type() == Block || type() == Page);
534 return bits(data, 53);
535 }
536
537 /** Contiguous hint bit. */
538 bool contiguousHint() const
539 {
540 assert(type() == Block || type() == Page);
541 return bits(data, 52);
542 }
543
544 /** Is the translation global (no asid used)? */
545 bool global(WalkerState *currState) const
546 {
547 assert(currState && (type() == Block || type() == Page));
548 if (!currState->aarch64 && (currState->isSecure &&
549 !currState->secureLookup)) {
550 return false; // ARM ARM issue C B3.6.3
551 } else if (currState->aarch64) {
552 if (currState->el == EL2 || currState->el == EL3) {
553 return true; // By default translations are treated as global
554 // in AArch64 EL2 and EL3
555 } else if (currState->isSecure && !currState->secureLookup) {
556 return false;
557 }
558 }
559 return !bits(data, 11);
560 }
561
562 /** Returns true if the access flag (AF) is set. */
563 bool af() const
564 {
565 assert(type() == Block || type() == Page);
566 return bits(data, 10);
567 }
568
569 /** 2-bit shareability field */
570 uint8_t sh() const
571 {
572 assert(type() == Block || type() == Page);
573 return bits(data, 9, 8);
574 }
575
576 /** 2-bit access protection flags */
577 uint8_t ap() const
578 {
579 assert(type() == Block || type() == Page);
580 // Long descriptors only support the AP[2:1] scheme
581 return bits(data, 7, 6);
582 }
583
584 /** Read/write access protection flag */
585 bool rw() const
586 {
587 assert(type() == Block || type() == Page);
588 return !bits(data, 7);
589 }
590
591 /** User/privileged level access protection flag */
592 bool user() const
593 {
594 assert(type() == Block || type() == Page);
595 return bits(data, 6);
596 }
597
598 /** Return the AP bits as compatible with the AP[2:0] format. Utility
599 * function used to simplify the code in the TLB for performing
600 * permission checks. */
601 static uint8_t ap(bool rw, bool user)
602 {
603 return ((!rw) << 2) | (user << 1);
604 }
605
606 TlbEntry::DomainType domain() const
607 {
608 // Long-desc. format only supports Client domain
609 assert(type() == Block || type() == Page);
610 return TlbEntry::DomainType::Client;
611 }
612
613 /** Attribute index */
614 uint8_t attrIndx() const
615 {
616 assert(type() == Block || type() == Page);
617 return bits(data, 4, 2);
618 }
619
620 /** Memory attributes, only used by stage 2 translations */
621 uint8_t memAttr() const
622 {
623 assert(type() == Block || type() == Page);
624 return bits(data, 5, 2);
625 }
626
627 /** Set access flag that this entry has been touched. Mark the entry as
628 * requiring a writeback, in the future. */
629 void setAf()
630 {
631 data |= 1 << 10;
632 _dirty = true;
633 }
634
635 /** This entry needs to be written back to memory */
636 bool dirty() const
637 {
638 return _dirty;
639 }
640
641 /** Whether the subsequent levels of lookup are secure */
642 bool secureTable() const
643 {
644 assert(type() == Table);
645 return !bits(data, 63);
646 }
647
648 /** Two bit access protection flags for subsequent levels of lookup */
649 uint8_t apTable() const
650 {
651 assert(type() == Table);
652 return bits(data, 62, 61);
653 }
654
655 /** R/W protection flag for subsequent levels of lookup */
656 uint8_t rwTable() const
657 {
658 assert(type() == Table);
659 return !bits(data, 62);
660 }
661
662 /** User/privileged mode protection flag for subsequent levels of
663 * lookup */
664 uint8_t userTable() const
665 {
666 assert(type() == Table);
667 return !bits(data, 61);
668 }
669
670 /** Is execution allowed on subsequent lookup levels? */
671 bool xnTable() const
672 {
673 assert(type() == Table);
674 return bits(data, 60);
675 }
676
677 /** Is privileged execution allowed on subsequent lookup levels? */
678 bool pxnTable() const
679 {
680 assert(type() == Table);
681 return bits(data, 59);
682 }
683 };
684
685 class WalkerState
686 {
687 public:
688 /** Thread context that we're doing the walk for */
689 ThreadContext *tc;
690
691 /** If the access is performed in AArch64 state */
692 bool aarch64;
693
694 /** Current exception level */
695 ExceptionLevel el;
696
697 /** Current physical address range in bits */
698 int physAddrRange;
699
700 /** Request that is currently being serviced */
701 RequestPtr req;
702
703 /** ASID that we're servicing the request under */
704 uint16_t asid;
705 uint8_t vmid;
706 bool isHyp;
707
708 /** Translation state for delayed requests */
709 TLB::Translation *transState;
710
711 /** The fault that we are going to return */
712 Fault fault;
713
714 /** The virtual address that is being translated with tagging removed.*/
715 Addr vaddr;
716
717 /** The virtual address that is being translated */
718 Addr vaddr_tainted;
719
720 /** Cached copy of the sctlr as it existed when translation began */
721 SCTLR sctlr;
722
723 /** Cached copy of the scr as it existed when translation began */
724 SCR scr;
725
726 /** Cached copy of the cpsr as it existed when translation began */
727 CPSR cpsr;
728
729 /** Cached copy of ttbcr/tcr as it existed when translation began */
730 union {
731 TTBCR ttbcr; // AArch32 translations
732 TCR tcr; // AArch64 translations
733 };
734
735 /** Cached copy of the htcr as it existed when translation began. */
736 HTCR htcr;
737
738 /** Cached copy of the htcr as it existed when translation began. */
739 HCR hcr;
740
741 /** Cached copy of the vtcr as it existed when translation began. */
742 VTCR_t vtcr;
743
744 /** If the access is a write */
745 bool isWrite;
746
747 /** If the access is a fetch (for execution, and no-exec) must be checked?*/
748 bool isFetch;
749
750 /** If the access comes from the secure state. */
751 bool isSecure;
752
753 /** Helper variables used to implement hierarchical access permissions
754 * when the long-desc. format is used (LPAE only) */
755 bool secureLookup;
756 bool rwTable;
757 bool userTable;
758 bool xnTable;
759 bool pxnTable;
760
761 /** Flag indicating if a second stage of lookup is required */
762 bool stage2Req;
763
764 /** Indicates whether the translation has been passed onto the second
765 * stage mmu, and no more work is required from the first stage.
766 */
767 bool doingStage2;
768
769 /** A pointer to the stage 2 translation that's in progress */
770 TLB::Translation *stage2Tran;
771
772 /** If the mode is timing or atomic */
773 bool timing;
774
775 /** If the atomic mode should be functional */
776 bool functional;
777
778 /** Save mode for use in delayed response */
779 BaseTLB::Mode mode;
780
781 /** The translation type that has been requested */
782 TLB::ArmTranslationType tranType;
783
784 /** Short-format descriptors */
785 L1Descriptor l1Desc;
786 L2Descriptor l2Desc;
787
788 /** Long-format descriptor (LPAE and AArch64) */
789 LongDescriptor longDesc;
790
791 /** Whether the response is delayed in timing mode due to additional
792 * lookups */
793 bool delayed;
794
795 TableWalker *tableWalker;
796
797 void doL1Descriptor();
798 void doL2Descriptor();
799
800 void doLongDescriptor();
801
802 WalkerState();
803
804 std::string name() const { return tableWalker->name(); }
805 };
806
807 protected:
808
809 /**
810 * A snooping DMA port that currently does nothing besides
811 * extending the DMA port to accept snoops without complaining.
812 */
813 class SnoopingDmaPort : public DmaPort
814 {
815
816 protected:
817
818 virtual void recvTimingSnoopReq(PacketPtr pkt)
819 { }
820
821 virtual Tick recvAtomicSnoop(PacketPtr pkt)
822 { return 0; }
823
824 virtual void recvFunctionalSnoop(PacketPtr pkt)
825 { }
826
827 virtual bool isSnooping() const { return true; }
828
829 public:
830
831 /**
832 * A snooping DMA port merely calls the construtor of the DMA
833 * port.
834 */
835 SnoopingDmaPort(MemObject *dev, System *s) :
836 DmaPort(dev, s)
837 { }
838 };
839
840 /** Queues of requests for all the different lookup levels */
841 std::list<WalkerState *> stateQueues[MAX_LOOKUP_LEVELS];
842
843 /** Queue of requests that have passed are waiting because the walker is
844 * currently busy. */
845 std::list<WalkerState *> pendingQueue;
846
847
848 /** Port to issue translation requests from */
849 SnoopingDmaPort port;
850
851 /** If we're draining keep the drain event around until we're drained */
852 DrainManager *drainManager;
853
854 /** The MMU to forward second stage look upts to */
855 Stage2MMU *stage2Mmu;
856
857 /** Indicates whether this table walker is part of the stage 2 mmu */
858 const bool isStage2;
859
860 /** TLB that is initiating these table walks */
861 TLB *tlb;
862
863 /** Cached copy of the sctlr as it existed when translation began */
864 SCTLR sctlr;
865
866 WalkerState *currState;
867
868 /** If a timing translation is currently in progress */
869 bool pending;
870
871 /** Request id for requests generated by this walker */
872 MasterID masterId;
873
874 /** The number of walks belonging to squashed instructions that can be
875 * removed from the pendingQueue per cycle. */
876 unsigned numSquashable;
877
878 /** Cached copies of system-level properties */
879 bool haveSecurity;
880 bool _haveLPAE;
881 bool _haveVirtualization;
882 uint8_t physAddrRange;
883 bool _haveLargeAsid64;
884 ArmSystem *armSys;
885
886 public:
887 typedef ArmTableWalkerParams Params;
888 TableWalker(const Params *p);
889 virtual ~TableWalker();
890
891 const Params *
892 params() const
893 {
894 return dynamic_cast<const Params *>(_params);
895 }
896
897 bool haveLPAE() const { return _haveLPAE; }
898 bool haveVirtualization() const { return _haveVirtualization; }
899 bool haveLargeAsid64() const { return _haveLargeAsid64; }
900 /** Checks if all state is cleared and if so, completes drain */
901 void completeDrain();
902 unsigned int drain(DrainManager *dm);
903 virtual void drainResume();
904 virtual BaseMasterPort& getMasterPort(const std::string &if_name,
905 PortID idx = InvalidPortID);
906
907 /**
908 * Allow the MMU (overseeing both stage 1 and stage 2 TLBs) to
909 * access the table walker port through the TLB so that it can
910 * orchestrate staged translations.
911 *
912 * @return Our DMA port
913 */
914 DmaPort& getWalkerPort() { return port; }
915
916 Fault walk(RequestPtr req, ThreadContext *tc, uint16_t asid, uint8_t _vmid,
917 bool _isHyp, TLB::Mode mode, TLB::Translation *_trans,
918 bool timing, bool functional, bool secure,
919 TLB::ArmTranslationType tranType);
920
921 void setTlb(TLB *_tlb) { tlb = _tlb; }
922 TLB* getTlb() { return tlb; }
923 void setMMU(Stage2MMU *m) { stage2Mmu = m; }
924 void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
925 uint8_t texcb, bool s);
926 void memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
927 LongDescriptor &lDescriptor);
928 void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
929 uint8_t sh);
930
931 static LookupLevel toLookupLevel(uint8_t lookup_level_as_int);
932
933 private:
934
935 void doL1Descriptor();
936 void doL1DescriptorWrapper();
937 EventWrapper<TableWalker,
938 &TableWalker::doL1DescriptorWrapper> doL1DescEvent;
939
940 void doL2Descriptor();
941 void doL2DescriptorWrapper();
942 EventWrapper<TableWalker,
943 &TableWalker::doL2DescriptorWrapper> doL2DescEvent;
944
945 void doLongDescriptor();
946
947 void doL0LongDescriptorWrapper();
948 EventWrapper<TableWalker,
949 &TableWalker::doL0LongDescriptorWrapper> doL0LongDescEvent;
950 void doL1LongDescriptorWrapper();
951 EventWrapper<TableWalker,
952 &TableWalker::doL1LongDescriptorWrapper> doL1LongDescEvent;
953 void doL2LongDescriptorWrapper();
954 EventWrapper<TableWalker,
955 &TableWalker::doL2LongDescriptorWrapper> doL2LongDescEvent;
956 void doL3LongDescriptorWrapper();
957 EventWrapper<TableWalker,
958 &TableWalker::doL3LongDescriptorWrapper> doL3LongDescEvent;
959
960 void doLongDescriptorWrapper(LookupLevel curr_lookup_level);
961
962 bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
963 Request::Flags flags, int queueIndex, Event *event,
964 void (TableWalker::*doDescriptor)());
965
966 void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor);
967
968 Fault processWalk();
969 Fault processWalkLPAE();
970 static unsigned adjustTableSizeAArch64(unsigned tsz);
971 /// Returns true if the address exceeds the range permitted by the
972 /// system-wide setting or by the TCR_ELx IPS/PS setting
973 static bool checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange);
974 Fault processWalkAArch64();
975 void processWalkWrapper();
976 EventWrapper<TableWalker, &TableWalker::processWalkWrapper> doProcessEvent;
977
978 void nextWalk(ThreadContext *tc);
979 };
980
981 } // namespace ArmISA
982
983 #endif //__ARCH_ARM_TABLE_WALKER_HH__
984