2 * Copyright (c) 2010-2013 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include "arch/arm/faults.hh"
50 #include "arch/arm/pagetable.hh"
51 #include "arch/arm/system.hh"
52 #include "arch/arm/table_walker.hh"
53 #include "arch/arm/stage2_lookup.hh"
54 #include "arch/arm/stage2_mmu.hh"
55 #include "arch/arm/tlb.hh"
56 #include "arch/arm/utility.hh"
57 #include "base/inifile.hh"
58 #include "base/str.hh"
59 #include "base/trace.hh"
60 #include "cpu/base.hh"
61 #include "cpu/thread_context.hh"
62 #include "debug/Checkpoint.hh"
63 #include "debug/TLB.hh"
64 #include "debug/TLBVerbose.hh"
65 #include "mem/page_table.hh"
66 #include "params/ArmTLB.hh"
67 #include "sim/full_system.hh"
68 #include "sim/process.hh"
71 using namespace ArmISA
;
73 TLB::TLB(const ArmTLBParams
*p
)
74 : BaseTLB(p
), table(new TlbEntry
[p
->size
]), size(p
->size
),
75 isStage2(p
->is_stage2
), stage2Req(false), _attr(0),
76 directToStage2(false), tableWalker(p
->walker
), stage2Tlb(NULL
),
77 stage2Mmu(NULL
), rangeMRU(1), bootUncacheability(false),
78 miscRegValid(false), curTranType(NormalTran
)
80 tableWalker
->setTlb(this);
82 // Cache system-level properties
83 haveLPAE
= tableWalker
->haveLPAE();
84 haveVirtualization
= tableWalker
->haveVirtualization();
85 haveLargeAsid64
= tableWalker
->haveLargeAsid64();
96 if (stage2Mmu
&& !isStage2
)
97 stage2Tlb
= stage2Mmu
->stage2Tlb();
101 TLB::setMMU(Stage2MMU
*m
)
104 tableWalker
->setMMU(m
);
108 TLB::translateFunctional(ThreadContext
*tc
, Addr va
, Addr
&pa
)
112 if (directToStage2
) {
114 return stage2Tlb
->translateFunctional(tc
, va
, pa
);
117 TlbEntry
*e
= lookup(va
, asid
, vmid
, isHyp
, isSecure
, true, false,
118 aarch64
? aarch64EL
: EL1
);
126 TLB::finalizePhysical(RequestPtr req
, ThreadContext
*tc
, Mode mode
) const
132 TLB::lookup(Addr va
, uint16_t asn
, uint8_t vmid
, bool hyp
, bool secure
,
133 bool functional
, bool ignore_asn
, uint8_t target_el
)
136 TlbEntry
*retval
= NULL
;
138 // Maintaining LRU array
140 while (retval
== NULL
&& x
< size
) {
141 if ((!ignore_asn
&& table
[x
].match(va
, asn
, vmid
, hyp
, secure
, false,
143 (ignore_asn
&& table
[x
].match(va
, vmid
, hyp
, secure
, target_el
))) {
144 // We only move the hit entry ahead when the position is higher
146 if (x
> rangeMRU
&& !functional
) {
147 TlbEntry tmp_entry
= table
[x
];
148 for(int i
= x
; i
> 0; i
--)
149 table
[i
] = table
[i
- 1];
150 table
[0] = tmp_entry
;
160 DPRINTF(TLBVerbose
, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
161 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
163 va
, asn
, retval
? "hit" : "miss", vmid
, hyp
, secure
,
164 retval
? retval
->pfn
: 0, retval
? retval
->size
: 0,
165 retval
? retval
->pAddr(va
) : 0, retval
? retval
->ap
: 0,
166 retval
? retval
->ns
: 0, retval
? retval
->nstid
: 0,
167 retval
? retval
->global
: 0, retval
? retval
->asid
: 0,
168 retval
? retval
->el
: 0);
173 // insert a new TLB entry
175 TLB::insert(Addr addr
, TlbEntry
&entry
)
177 DPRINTF(TLB
, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
178 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
179 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry
.pfn
,
180 entry
.size
, entry
.vpn
, entry
.asid
, entry
.vmid
, entry
.N
,
181 entry
.global
, entry
.valid
, entry
.nonCacheable
, entry
.xn
,
182 entry
.ap
, static_cast<uint8_t>(entry
.domain
), entry
.ns
, entry
.nstid
,
185 if (table
[size
- 1].valid
)
186 DPRINTF(TLB
, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
187 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
188 table
[size
-1].vpn
<< table
[size
-1].N
, table
[size
-1].asid
,
189 table
[size
-1].vmid
, table
[size
-1].pfn
<< table
[size
-1].N
,
190 table
[size
-1].size
, table
[size
-1].ap
, table
[size
-1].ns
,
191 table
[size
-1].nstid
, table
[size
-1].global
, table
[size
-1].isHyp
,
194 //inserting to MRU position and evicting the LRU one
196 for (int i
= size
- 1; i
> 0; --i
)
197 table
[i
] = table
[i
-1];
201 ppRefills
->notify(1);
205 TLB::printTlb() const
209 DPRINTF(TLB
, "Current TLB contents:\n");
213 DPRINTF(TLB
, " * %s\n", te
->print());
219 TLB::flushAllSecurity(bool secure_lookup
, uint8_t target_el
, bool ignore_el
)
221 DPRINTF(TLB
, "Flushing all TLB entries (%s lookup)\n",
222 (secure_lookup
? "secure" : "non-secure"));
227 if (te
->valid
&& secure_lookup
== !te
->nstid
&&
228 (te
->vmid
== vmid
|| secure_lookup
) &&
229 checkELMatch(target_el
, te
->el
, ignore_el
)) {
231 DPRINTF(TLB
, " - %s\n", te
->print());
240 // If there's a second stage TLB (and we're not it) then flush it as well
241 // if we're currently in hyp mode
242 if (!isStage2
&& isHyp
) {
243 stage2Tlb
->flushAllSecurity(secure_lookup
, true);
248 TLB::flushAllNs(bool hyp
, uint8_t target_el
, bool ignore_el
)
250 DPRINTF(TLB
, "Flushing all NS TLB entries (%s lookup)\n",
251 (hyp
? "hyp" : "non-hyp"));
256 if (te
->valid
&& te
->nstid
&& te
->isHyp
== hyp
&&
257 checkELMatch(target_el
, te
->el
, ignore_el
)) {
259 DPRINTF(TLB
, " - %s\n", te
->print());
268 // If there's a second stage TLB (and we're not it) then flush it as well
269 if (!isStage2
&& !hyp
) {
270 stage2Tlb
->flushAllNs(false, true);
275 TLB::flushMvaAsid(Addr mva
, uint64_t asn
, bool secure_lookup
, uint8_t target_el
)
277 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x, asid: %#x "
278 "(%s lookup)\n", mva
, asn
, (secure_lookup
?
279 "secure" : "non-secure"));
280 _flushMva(mva
, asn
, secure_lookup
, false, false, target_el
);
285 TLB::flushAsid(uint64_t asn
, bool secure_lookup
, uint8_t target_el
)
287 DPRINTF(TLB
, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn
,
288 (secure_lookup
? "secure" : "non-secure"));
295 if (te
->valid
&& te
->asid
== asn
&& secure_lookup
== !te
->nstid
&&
296 (te
->vmid
== vmid
|| secure_lookup
) &&
297 checkELMatch(target_el
, te
->el
, false)) {
300 DPRINTF(TLB
, " - %s\n", te
->print());
309 TLB::flushMva(Addr mva
, bool secure_lookup
, bool hyp
, uint8_t target_el
)
311 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva
,
312 (secure_lookup
? "secure" : "non-secure"));
313 _flushMva(mva
, 0xbeef, secure_lookup
, hyp
, true, target_el
);
318 TLB::_flushMva(Addr mva
, uint64_t asn
, bool secure_lookup
, bool hyp
,
319 bool ignore_asn
, uint8_t target_el
)
322 // D5.7.2: Sign-extend address to 64 bits
324 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
327 if (secure_lookup
== !te
->nstid
) {
328 DPRINTF(TLB
, " - %s\n", te
->print());
332 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
338 TLB::checkELMatch(uint8_t target_el
, uint8_t tentry_el
, bool ignore_el
)
342 if (target_el
== 2 || target_el
== 3) {
343 elMatch
= (tentry_el
== target_el
);
345 elMatch
= (tentry_el
== 0) || (tentry_el
== 1);
354 // We might have unserialized something or switched CPUs, so make
355 // sure to re-read the misc regs.
356 miscRegValid
= false;
360 TLB::takeOverFrom(BaseTLB
*_otlb
)
362 TLB
*otlb
= dynamic_cast<TLB
*>(_otlb
);
363 /* Make sure we actually have a valid type */
366 haveLPAE
= otlb
->haveLPAE
;
367 directToStage2
= otlb
->directToStage2
;
368 stage2Req
= otlb
->stage2Req
;
369 bootUncacheability
= otlb
->bootUncacheability
;
371 /* Sync the stage2 MMU if they exist in both
372 * the old CPU and the new
375 stage2Tlb
&& otlb
->stage2Tlb
) {
376 stage2Tlb
->takeOverFrom(otlb
->stage2Tlb
);
379 panic("Incompatible TLB type!");
384 TLB::serialize(ostream
&os
)
386 DPRINTF(Checkpoint
, "Serializing Arm TLB\n");
388 SERIALIZE_SCALAR(_attr
);
389 SERIALIZE_SCALAR(haveLPAE
);
390 SERIALIZE_SCALAR(directToStage2
);
391 SERIALIZE_SCALAR(stage2Req
);
392 SERIALIZE_SCALAR(bootUncacheability
);
394 int num_entries
= size
;
395 SERIALIZE_SCALAR(num_entries
);
396 for(int i
= 0; i
< size
; i
++){
397 nameOut(os
, csprintf("%s.TlbEntry%d", name(), i
));
398 table
[i
].serialize(os
);
403 TLB::unserialize(Checkpoint
*cp
, const string
§ion
)
405 DPRINTF(Checkpoint
, "Unserializing Arm TLB\n");
407 UNSERIALIZE_SCALAR(_attr
);
408 UNSERIALIZE_SCALAR(haveLPAE
);
409 UNSERIALIZE_SCALAR(directToStage2
);
410 UNSERIALIZE_SCALAR(stage2Req
);
411 UNSERIALIZE_SCALAR(bootUncacheability
);
414 UNSERIALIZE_SCALAR(num_entries
);
415 for(int i
= 0; i
< min(size
, num_entries
); i
++){
416 table
[i
].unserialize(cp
, csprintf("%s.TlbEntry%d", section
, i
));
424 .name(name() + ".inst_hits")
425 .desc("ITB inst hits")
429 .name(name() + ".inst_misses")
430 .desc("ITB inst misses")
434 .name(name() + ".inst_accesses")
435 .desc("ITB inst accesses")
439 .name(name() + ".read_hits")
440 .desc("DTB read hits")
444 .name(name() + ".read_misses")
445 .desc("DTB read misses")
449 .name(name() + ".read_accesses")
450 .desc("DTB read accesses")
454 .name(name() + ".write_hits")
455 .desc("DTB write hits")
459 .name(name() + ".write_misses")
460 .desc("DTB write misses")
464 .name(name() + ".write_accesses")
465 .desc("DTB write accesses")
469 .name(name() + ".hits")
474 .name(name() + ".misses")
479 .name(name() + ".accesses")
480 .desc("DTB accesses")
484 .name(name() + ".flush_tlb")
485 .desc("Number of times complete TLB was flushed")
489 .name(name() + ".flush_tlb_mva")
490 .desc("Number of times TLB was flushed by MVA")
494 .name(name() + ".flush_tlb_mva_asid")
495 .desc("Number of times TLB was flushed by MVA & ASID")
499 .name(name() + ".flush_tlb_asid")
500 .desc("Number of times TLB was flushed by ASID")
504 .name(name() + ".flush_entries")
505 .desc("Number of entries that have been flushed from TLB")
509 .name(name() + ".align_faults")
510 .desc("Number of TLB faults due to alignment restrictions")
514 .name(name() + ".prefetch_faults")
515 .desc("Number of TLB faults due to prefetch")
519 .name(name() + ".domain_faults")
520 .desc("Number of TLB faults due to domain restrictions")
524 .name(name() + ".perms_faults")
525 .desc("Number of TLB faults due to permissions restrictions")
528 instAccesses
= instHits
+ instMisses
;
529 readAccesses
= readHits
+ readMisses
;
530 writeAccesses
= writeHits
+ writeMisses
;
531 hits
= readHits
+ writeHits
+ instHits
;
532 misses
= readMisses
+ writeMisses
+ instMisses
;
533 accesses
= readAccesses
+ writeAccesses
+ instAccesses
;
537 TLB::regProbePoints()
539 ppRefills
.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
543 TLB::translateSe(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
544 Translation
*translation
, bool &delay
, bool timing
)
547 Addr vaddr_tainted
= req
->getVaddr();
550 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
);
552 vaddr
= vaddr_tainted
;
553 uint32_t flags
= req
->getFlags();
555 bool is_fetch
= (mode
== Execute
);
556 bool is_write
= (mode
== Write
);
559 assert(flags
& MustBeOne
);
560 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
561 if (vaddr
& mask(flags
& AlignmentMask
)) {
562 // LPAE is always disabled in SE mode
563 return std::make_shared
<DataAbort
>(
565 TlbEntry::DomainType::NoAccess
, is_write
,
566 ArmFault::AlignmentFault
, isStage2
,
573 Process
*p
= tc
->getProcessPtr();
575 if (!p
->pTable
->translate(vaddr
, paddr
))
576 return std::make_shared
<GenericPageTableFault
>(vaddr_tainted
);
577 req
->setPaddr(paddr
);
583 TLB::trickBoxCheck(RequestPtr req
, Mode mode
, TlbEntry::DomainType domain
)
589 TLB::walkTrickBoxCheck(Addr pa
, bool is_secure
, Addr va
, Addr sz
, bool is_exec
,
590 bool is_write
, TlbEntry::DomainType domain
, LookupLevel lookup_level
)
596 TLB::checkPermissions(TlbEntry
*te
, RequestPtr req
, Mode mode
)
598 Addr vaddr
= req
->getVaddr(); // 32-bit don't have to purify
599 uint32_t flags
= req
->getFlags();
600 bool is_fetch
= (mode
== Execute
);
601 bool is_write
= (mode
== Write
);
602 bool is_priv
= isPriv
&& !(flags
& UserMode
);
604 // Get the translation type from the actuall table entry
605 ArmFault::TranMethod tranMethod
= te
->longDescFormat
? ArmFault::LpaeTran
606 : ArmFault::VmsaTran
;
608 // If this is the second stage of translation and the request is for a
609 // stage 1 page table walk then we need to check the HCR.PTW bit. This
610 // allows us to generate a fault if the request targets an area marked
611 // as a device or strongly ordered.
612 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
613 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
614 return std::make_shared
<DataAbort
>(
615 vaddr
, te
->domain
, is_write
,
616 ArmFault::PermissionLL
+ te
->lookupLevel
,
617 isStage2
, tranMethod
);
620 // Generate an alignment fault for unaligned data accesses to device or
621 // strongly ordered memory
623 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
624 if (vaddr
& mask(flags
& AlignmentMask
)) {
626 return std::make_shared
<DataAbort
>(
627 vaddr
, TlbEntry::DomainType::NoAccess
, is_write
,
628 ArmFault::AlignmentFault
, isStage2
,
634 if (te
->nonCacheable
) {
635 // Prevent prefetching from I/O devices.
636 if (req
->isPrefetch()) {
637 // Here we can safely use the fault status for the short
638 // desc. format in all cases
639 return std::make_shared
<PrefetchAbort
>(
640 vaddr
, ArmFault::PrefetchUncacheable
,
641 isStage2
, tranMethod
);
645 if (!te
->longDescFormat
) {
646 switch ((dacr
>> (static_cast<uint8_t>(te
->domain
) * 2)) & 0x3) {
649 DPRINTF(TLB
, "TLB Fault: Data abort on domain. DACR: %#x"
650 " domain: %#x write:%d\n", dacr
,
651 static_cast<uint8_t>(te
->domain
), is_write
);
653 return std::make_shared
<PrefetchAbort
>(
655 ArmFault::DomainLL
+ te
->lookupLevel
,
656 isStage2
, tranMethod
);
658 return std::make_shared
<DataAbort
>(
659 vaddr
, te
->domain
, is_write
,
660 ArmFault::DomainLL
+ te
->lookupLevel
,
661 isStage2
, tranMethod
);
663 // Continue with permissions check
666 panic("UNPRED domain\n");
672 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
673 uint8_t ap
= te
->longDescFormat
? te
->ap
<< 1 : te
->ap
;
674 uint8_t hap
= te
->hap
;
676 if (sctlr
.afe
== 1 || te
->longDescFormat
)
680 bool isWritable
= true;
681 // If this is a stage 2 access (eg for reading stage 1 page table entries)
682 // then don't perform the AP permissions check, we stil do the HAP check
689 DPRINTF(TLB
, "Access permissions 0, checking rs:%#x\n",
692 switch ((int)sctlr
.rs
) {
697 abt
= is_write
|| !is_priv
;
713 abt
= !is_priv
&& is_write
;
714 isWritable
= is_priv
;
720 panic("UNPRED premissions\n");
722 abt
= !is_priv
|| is_write
;
731 panic("Unknown permissions %#x\n", ap
);
735 bool hapAbt
= is_write
? !(hap
& 2) : !(hap
& 1);
736 bool xn
= te
->xn
|| (isWritable
&& sctlr
.wxn
) ||
737 (ap
== 3 && sctlr
.uwxn
&& is_priv
);
738 if (is_fetch
&& (abt
|| xn
||
739 (te
->longDescFormat
&& te
->pxn
&& !is_priv
) ||
740 (isSecure
&& te
->ns
&& scr
.sif
))) {
742 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. AP:%d "
743 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
744 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
,sctlr
.afe
);
745 return std::make_shared
<PrefetchAbort
>(
747 ArmFault::PermissionLL
+ te
->lookupLevel
,
748 isStage2
, tranMethod
);
749 } else if (abt
| hapAbt
) {
751 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
752 " write:%d\n", ap
, is_priv
, is_write
);
753 return std::make_shared
<DataAbort
>(
754 vaddr
, te
->domain
, is_write
,
755 ArmFault::PermissionLL
+ te
->lookupLevel
,
756 isStage2
| !abt
, tranMethod
);
763 TLB::checkPermissions64(TlbEntry
*te
, RequestPtr req
, Mode mode
,
768 Addr vaddr_tainted
= req
->getVaddr();
769 Addr vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
);
771 uint32_t flags
= req
->getFlags();
772 bool is_fetch
= (mode
== Execute
);
773 bool is_write
= (mode
== Write
);
774 bool is_priv M5_VAR_USED
= isPriv
&& !(flags
& UserMode
);
776 updateMiscReg(tc
, curTranType
);
778 // If this is the second stage of translation and the request is for a
779 // stage 1 page table walk then we need to check the HCR.PTW bit. This
780 // allows us to generate a fault if the request targets an area marked
781 // as a device or strongly ordered.
782 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
783 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
784 return std::make_shared
<DataAbort
>(
785 vaddr_tainted
, te
->domain
, is_write
,
786 ArmFault::PermissionLL
+ te
->lookupLevel
,
787 isStage2
, ArmFault::LpaeTran
);
790 // Generate an alignment fault for unaligned accesses to device or
791 // strongly ordered memory
793 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
794 if (vaddr
& mask(flags
& AlignmentMask
)) {
796 return std::make_shared
<DataAbort
>(
798 TlbEntry::DomainType::NoAccess
, is_write
,
799 ArmFault::AlignmentFault
, isStage2
,
805 if (te
->nonCacheable
) {
806 // Prevent prefetching from I/O devices.
807 if (req
->isPrefetch()) {
808 // Here we can safely use the fault status for the short
809 // desc. format in all cases
810 return std::make_shared
<PrefetchAbort
>(
812 ArmFault::PrefetchUncacheable
,
813 isStage2
, ArmFault::LpaeTran
);
817 uint8_t ap
= 0x3 & (te
->ap
); // 2-bit access protection field
821 uint8_t pxn
= te
->pxn
;
822 bool r
= !is_write
&& !is_fetch
;
825 DPRINTF(TLBVerbose
, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
826 "w:%d, x:%d\n", ap
, xn
, pxn
, r
, w
, x
);
829 panic("Virtualization in AArch64 state is not supported yet");
834 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
844 grant
= r
|| w
|| (x
&& !sctlr
.wxn
);
865 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
869 grant
= r
|| w
|| (x
&& !sctlr
.wxn
);
877 // regions that are writeable at EL0 should not be
901 uint8_t perm
= (ap
& 0x2) | xn
;
904 grant
= r
|| w
|| (x
&& !sctlr
.wxn
) ;
926 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. "
927 "AP:%d priv:%d write:%d ns:%d sif:%d "
929 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
, sctlr
.afe
);
930 // Use PC value instead of vaddr because vaddr might be aligned to
931 // cache line and should not be the address reported in FAR
932 return std::make_shared
<PrefetchAbort
>(
934 ArmFault::PermissionLL
+ te
->lookupLevel
,
935 isStage2
, ArmFault::LpaeTran
);
938 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d "
939 "priv:%d write:%d\n", ap
, is_priv
, is_write
);
940 return std::make_shared
<DataAbort
>(
941 vaddr_tainted
, te
->domain
, is_write
,
942 ArmFault::PermissionLL
+ te
->lookupLevel
,
943 isStage2
, ArmFault::LpaeTran
);
951 TLB::translateFs(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
952 Translation
*translation
, bool &delay
, bool timing
,
953 TLB::ArmTranslationType tranType
, bool functional
)
955 // No such thing as a functional timing access
956 assert(!(timing
&& functional
));
958 updateMiscReg(tc
, tranType
);
960 Addr vaddr_tainted
= req
->getVaddr();
963 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
);
965 vaddr
= vaddr_tainted
;
966 uint32_t flags
= req
->getFlags();
968 bool is_fetch
= (mode
== Execute
);
969 bool is_write
= (mode
== Write
);
970 bool long_desc_format
= aarch64
|| (haveLPAE
&& ttbcr
.eae
);
971 ArmFault::TranMethod tranMethod
= long_desc_format
? ArmFault::LpaeTran
972 : ArmFault::VmsaTran
;
976 DPRINTF(TLBVerbose
, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
977 isPriv
, flags
& UserMode
, isSecure
, tranType
& S1S2NsTran
);
979 DPRINTF(TLB
, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
980 "flags %#x tranType 0x%x\n", vaddr_tainted
, mode
, isStage2
,
981 scr
, sctlr
, flags
, tranType
);
983 // Generate an alignment fault for unaligned PC
984 if (aarch64
&& is_fetch
&& (req
->getPC() & mask(2))) {
985 return std::make_shared
<PCAlignmentFault
>(req
->getPC());
988 // If this is a clrex instruction, provide a PA of 0 with no fault
989 // This will force the monitor to set the tracked address to 0
990 // a bit of a hack but this effectively clrears this processors monitor
991 if (flags
& Request::CLEAR_LL
){
992 // @todo: check implications of security extensions
994 req
->setFlags(Request::UNCACHEABLE
);
995 req
->setFlags(Request::CLEAR_LL
);
998 if ((req
->isInstFetch() && (!sctlr
.i
)) ||
999 ((!req
->isInstFetch()) && (!sctlr
.c
))){
1000 req
->setFlags(Request::UNCACHEABLE
);
1003 assert(flags
& MustBeOne
);
1004 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
1005 if (vaddr
& mask(flags
& AlignmentMask
)) {
1007 return std::make_shared
<DataAbort
>(
1009 TlbEntry::DomainType::NoAccess
, is_write
,
1010 ArmFault::AlignmentFault
, isStage2
,
1016 // If guest MMU is off or hcr.vm=0 go straight to stage2
1017 if ((isStage2
&& !hcr
.vm
) || (!isStage2
&& !sctlr
.m
)) {
1019 req
->setPaddr(vaddr
);
1020 // When the MMU is off the security attribute corresponds to the
1021 // security state of the processor
1023 req
->setFlags(Request::SECURE
);
1025 // @todo: double check this (ARM ARM issue C B3.2.1)
1026 if (long_desc_format
|| sctlr
.tre
== 0) {
1027 req
->setFlags(Request::UNCACHEABLE
);
1029 if (nmrr
.ir0
== 0 || nmrr
.or0
== 0 || prrr
.tr0
!= 0x2)
1030 req
->setFlags(Request::UNCACHEABLE
);
1033 // Set memory attributes
1035 temp_te
.ns
= !isSecure
;
1036 if (isStage2
|| hcr
.dc
== 0 || isSecure
||
1037 (isHyp
&& !(tranType
& S1CTran
))) {
1039 temp_te
.mtype
= is_fetch
? TlbEntry::MemoryType::Normal
1040 : TlbEntry::MemoryType::StronglyOrdered
;
1041 temp_te
.innerAttrs
= 0x0;
1042 temp_te
.outerAttrs
= 0x0;
1043 temp_te
.shareable
= true;
1044 temp_te
.outerShareable
= true;
1046 temp_te
.mtype
= TlbEntry::MemoryType::Normal
;
1047 temp_te
.innerAttrs
= 0x3;
1048 temp_te
.outerAttrs
= 0x3;
1049 temp_te
.shareable
= false;
1050 temp_te
.outerShareable
= false;
1052 temp_te
.setAttributes(long_desc_format
);
1053 DPRINTF(TLBVerbose
, "(No MMU) setting memory attributes: shareable: "
1054 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1055 temp_te
.shareable
, temp_te
.innerAttrs
, temp_te
.outerAttrs
,
1057 setAttr(temp_te
.attributes
);
1059 return trickBoxCheck(req
, mode
, TlbEntry::DomainType::NoAccess
);
1062 DPRINTF(TLBVerbose
, "Translating %s=%#x context=%d\n",
1063 isStage2
? "IPA" : "VA", vaddr_tainted
, asid
);
1064 // Translation enabled
1066 TlbEntry
*te
= NULL
;
1068 Fault fault
= getResultTe(&te
, req
, tc
, mode
, translation
, timing
,
1069 functional
, &mergeTe
);
1070 // only proceed if we have a valid table entry
1071 if ((te
== NULL
) && (fault
== NoFault
)) delay
= true;
1073 // If we have the table entry transfer some of the attributes to the
1074 // request that triggered the translation
1076 // Set memory attributes
1078 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1079 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1080 te
->shareable
, te
->innerAttrs
, te
->outerAttrs
,
1081 static_cast<uint8_t>(te
->mtype
), isStage2
);
1082 setAttr(te
->attributes
);
1083 if (te
->nonCacheable
) {
1084 req
->setFlags(Request::UNCACHEABLE
);
1087 if (!bootUncacheability
&&
1088 ((ArmSystem
*)tc
->getSystemPtr())->adderBootUncacheable(vaddr
)) {
1089 req
->setFlags(Request::UNCACHEABLE
);
1092 req
->setPaddr(te
->pAddr(vaddr
));
1093 if (isSecure
&& !te
->ns
) {
1094 req
->setFlags(Request::SECURE
);
1096 if ((!is_fetch
) && (vaddr
& mask(flags
& AlignmentMask
)) &&
1097 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
1098 // Unaligned accesses to Device memory should always cause an
1099 // abort regardless of sctlr.a
1101 return std::make_shared
<DataAbort
>(
1103 TlbEntry::DomainType::NoAccess
, is_write
,
1104 ArmFault::AlignmentFault
, isStage2
,
1108 // Check for a trickbox generated address fault
1109 if (fault
== NoFault
) {
1110 fault
= trickBoxCheck(req
, mode
, te
->domain
);
1114 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1115 if (fault
== NoFault
) {
1116 CPSR cpsr
= tc
->readMiscReg(MISCREG_CPSR
);
1117 if (aarch64
&& is_fetch
&& cpsr
.il
== 1) {
1118 return std::make_shared
<IllegalInstSetStateFault
>();
1126 TLB::translateAtomic(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1127 TLB::ArmTranslationType tranType
)
1129 updateMiscReg(tc
, tranType
);
1131 if (directToStage2
) {
1133 return stage2Tlb
->translateAtomic(req
, tc
, mode
, tranType
);
1139 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
);
1141 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1147 TLB::translateFunctional(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1148 TLB::ArmTranslationType tranType
)
1150 updateMiscReg(tc
, tranType
);
1152 if (directToStage2
) {
1154 return stage2Tlb
->translateFunctional(req
, tc
, mode
, tranType
);
1160 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
, true);
1162 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1168 TLB::translateTiming(RequestPtr req
, ThreadContext
*tc
,
1169 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
)
1171 updateMiscReg(tc
, tranType
);
1173 if (directToStage2
) {
1175 return stage2Tlb
->translateTiming(req
, tc
, translation
, mode
, tranType
);
1178 assert(translation
);
1180 return translateComplete(req
, tc
, translation
, mode
, tranType
, isStage2
);
1184 TLB::translateComplete(RequestPtr req
, ThreadContext
*tc
,
1185 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
,
1191 fault
= translateFs(req
, tc
, mode
, translation
, delay
, true, tranType
);
1193 fault
= translateSe(req
, tc
, mode
, translation
, delay
, true);
1194 DPRINTF(TLBVerbose
, "Translation returning delay=%d fault=%d\n", delay
, fault
!=
1196 // If we have a translation, and we're not in the middle of doing a stage
1197 // 2 translation tell the translation that we've either finished or its
1198 // going to take a while. By not doing this when we're in the middle of a
1199 // stage 2 translation we prevent marking the translation as delayed twice,
1200 // one when the translation starts and again when the stage 1 translation
1202 if (translation
&& (callFromS2
|| !stage2Req
|| req
->hasPaddr() || fault
!= NoFault
)) {
1204 translation
->finish(fault
, req
, tc
, mode
);
1206 translation
->markDelayed();
1212 TLB::getMasterPort()
1214 return &tableWalker
->getMasterPort("port");
1218 TLB::getWalkerPort()
1220 return tableWalker
->getWalkerPort();
1224 TLB::updateMiscReg(ThreadContext
*tc
, ArmTranslationType tranType
)
1226 // check if the regs have changed, or the translation mode is different.
1227 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1228 // one type of translation anyway
1229 if (miscRegValid
&& ((tranType
== curTranType
) || isStage2
)) {
1233 DPRINTF(TLBVerbose
, "TLB variables changed!\n");
1234 CPSR cpsr
= tc
->readMiscReg(MISCREG_CPSR
);
1235 // Dependencies: SCR/SCR_EL3, CPSR
1236 isSecure
= inSecureState(tc
);
1237 isSecure
&= (tranType
& HypMode
) == 0;
1238 isSecure
&= (tranType
& S1S2NsTran
) == 0;
1239 aarch64
= !cpsr
.width
;
1240 if (aarch64
) { // AArch64
1241 aarch64EL
= (ExceptionLevel
) (uint8_t) cpsr
.el
;
1242 switch (aarch64EL
) {
1246 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL1
);
1247 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL1
);
1248 uint64_t ttbr_asid
= ttbcr
.a1
?
1249 tc
->readMiscReg(MISCREG_TTBR1_EL1
) :
1250 tc
->readMiscReg(MISCREG_TTBR0_EL1
);
1251 asid
= bits(ttbr_asid
,
1252 (haveLargeAsid64
&& ttbcr
.as
) ? 63 : 55, 48);
1256 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL2
);
1257 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL2
);
1261 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL3
);
1262 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL3
);
1266 scr
= tc
->readMiscReg(MISCREG_SCR_EL3
);
1267 isPriv
= aarch64EL
!= EL0
;
1268 // @todo: modify this behaviour to support Virtualization in
1272 directToStage2
= false;
1275 sctlr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR
, tc
,
1277 ttbcr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR
, tc
,
1279 scr
= tc
->readMiscReg(MISCREG_SCR
);
1280 isPriv
= cpsr
.mode
!= MODE_USER
;
1281 if (haveLPAE
&& ttbcr
.eae
) {
1282 // Long-descriptor translation table format in use
1283 uint64_t ttbr_asid
= tc
->readMiscReg(
1284 flattenMiscRegNsBanked(ttbcr
.a1
? MISCREG_TTBR1
1287 asid
= bits(ttbr_asid
, 55, 48);
1289 // Short-descriptor translation table format in use
1290 CONTEXTIDR context_id
= tc
->readMiscReg(flattenMiscRegNsBanked(
1291 MISCREG_CONTEXTIDR
, tc
,!isSecure
));
1292 asid
= context_id
.asid
;
1294 prrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR
, tc
,
1296 nmrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR
, tc
,
1298 dacr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR
, tc
,
1300 hcr
= tc
->readMiscReg(MISCREG_HCR
);
1302 if (haveVirtualization
) {
1303 vmid
= bits(tc
->readMiscReg(MISCREG_VTTBR
), 55, 48);
1304 isHyp
= cpsr
.mode
== MODE_HYP
;
1305 isHyp
|= tranType
& HypMode
;
1306 isHyp
&= (tranType
& S1S2NsTran
) == 0;
1307 isHyp
&= (tranType
& S1CTran
) == 0;
1309 sctlr
= tc
->readMiscReg(MISCREG_HSCTLR
);
1311 // Work out if we should skip the first stage of translation and go
1312 // directly to stage 2. This value is cached so we don't have to
1313 // compute it for every translation.
1314 stage2Req
= hcr
.vm
&& !isStage2
&& !isHyp
&& !isSecure
&&
1315 !(tranType
& S1CTran
);
1316 directToStage2
= stage2Req
&& !sctlr
.m
;
1321 directToStage2
= false;
1324 miscRegValid
= true;
1325 curTranType
= tranType
;
1329 TLB::getTE(TlbEntry
**te
, RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1330 Translation
*translation
, bool timing
, bool functional
,
1331 bool is_secure
, TLB::ArmTranslationType tranType
)
1333 bool is_fetch
= (mode
== Execute
);
1334 bool is_write
= (mode
== Write
);
1336 Addr vaddr_tainted
= req
->getVaddr();
1338 ExceptionLevel target_el
= aarch64
? aarch64EL
: EL1
;
1340 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, target_el
);
1342 vaddr
= vaddr_tainted
;
1344 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false, target_el
);
1346 if (req
->isPrefetch()) {
1347 // if the request is a prefetch don't attempt to fill the TLB or go
1348 // any further with the memory access (here we can safely use the
1349 // fault status for the short desc. format in all cases)
1351 return std::make_shared
<PrefetchAbort
>(
1352 vaddr_tainted
, ArmFault::PrefetchTLBMiss
, isStage2
);
1362 // start translation table walk, pass variables rather than
1363 // re-retreaving in table walker for speed
1364 DPRINTF(TLB
, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1365 vaddr_tainted
, asid
, vmid
);
1367 fault
= tableWalker
->walk(req
, tc
, asid
, vmid
, isHyp
, mode
,
1368 translation
, timing
, functional
, is_secure
,
1370 // for timing mode, return and wait for table walk,
1371 if (timing
|| fault
!= NoFault
) {
1375 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false, target_el
);
1391 TLB::getResultTe(TlbEntry
**te
, RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1392 Translation
*translation
, bool timing
, bool functional
,
1396 TlbEntry
*s1Te
= NULL
;
1398 Addr vaddr_tainted
= req
->getVaddr();
1400 // Get the stage 1 table entry
1401 fault
= getTE(&s1Te
, req
, tc
, mode
, translation
, timing
, functional
,
1402 isSecure
, curTranType
);
1403 // only proceed if we have a valid table entry
1404 if ((s1Te
!= NULL
) && (fault
== NoFault
)) {
1405 // Check stage 1 permissions before checking stage 2
1407 fault
= checkPermissions64(s1Te
, req
, mode
, tc
);
1409 fault
= checkPermissions(s1Te
, req
, mode
);
1410 if (stage2Req
& (fault
== NoFault
)) {
1411 Stage2LookUp
*s2Lookup
= new Stage2LookUp(this, stage2Tlb
, *s1Te
,
1412 req
, translation
, mode
, timing
, functional
, curTranType
);
1413 fault
= s2Lookup
->getTe(tc
, mergeTe
);
1414 if (s2Lookup
->isComplete()) {
1416 // We've finished with the lookup so delete it
1419 // The lookup hasn't completed, so we can't delete it now. We
1420 // get round this by asking the object to self delete when the
1421 // translation is complete.
1422 s2Lookup
->setSelfDelete();
1425 // This case deals with an S1 hit (or bypass), followed by
1426 // an S2 hit-but-perms issue
1428 DPRINTF(TLBVerbose
, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1429 vaddr_tainted
, req
->hasPaddr() ? req
->getPaddr() : ~0, fault
);
1430 if (fault
!= NoFault
) {
1431 ArmFault
*armFault
= reinterpret_cast<ArmFault
*>(fault
.get());
1432 armFault
->annotate(ArmFault::S1PTW
, false);
1433 armFault
->annotate(ArmFault::OVA
, vaddr_tainted
);
1443 ArmTLBParams::create()
1445 return new ArmISA::TLB(this);