2 * Copyright (c) 2010-2013 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include "arch/arm/faults.hh"
49 #include "arch/arm/pagetable.hh"
50 #include "arch/arm/system.hh"
51 #include "arch/arm/table_walker.hh"
52 #include "arch/arm/stage2_lookup.hh"
53 #include "arch/arm/stage2_mmu.hh"
54 #include "arch/arm/tlb.hh"
55 #include "arch/arm/utility.hh"
56 #include "base/inifile.hh"
57 #include "base/str.hh"
58 #include "base/trace.hh"
59 #include "cpu/base.hh"
60 #include "cpu/thread_context.hh"
61 #include "debug/Checkpoint.hh"
62 #include "debug/TLB.hh"
63 #include "debug/TLBVerbose.hh"
64 #include "mem/page_table.hh"
65 #include "params/ArmTLB.hh"
66 #include "sim/full_system.hh"
67 #include "sim/process.hh"
70 using namespace ArmISA
;
72 TLB::TLB(const ArmTLBParams
*p
)
73 : BaseTLB(p
), table(new TlbEntry
[p
->size
]), size(p
->size
),
74 isStage2(p
->is_stage2
), tableWalker(p
->walker
), stage2Tlb(NULL
),
75 stage2Mmu(NULL
), rangeMRU(1), bootUncacheability(false),
76 miscRegValid(false), curTranType(NormalTran
)
78 tableWalker
->setTlb(this);
80 // Cache system-level properties
81 haveLPAE
= tableWalker
->haveLPAE();
82 haveVirtualization
= tableWalker
->haveVirtualization();
83 haveLargeAsid64
= tableWalker
->haveLargeAsid64();
94 if (stage2Mmu
&& !isStage2
)
95 stage2Tlb
= stage2Mmu
->stage2Tlb();
99 TLB::setMMU(Stage2MMU
*m
)
102 tableWalker
->setMMU(m
);
106 TLB::translateFunctional(ThreadContext
*tc
, Addr va
, Addr
&pa
)
110 if (directToStage2
) {
112 return stage2Tlb
->translateFunctional(tc
, va
, pa
);
115 TlbEntry
*e
= lookup(va
, asid
, vmid
, isHyp
, isSecure
, true, false,
116 aarch64
? aarch64EL
: EL1
);
124 TLB::finalizePhysical(RequestPtr req
, ThreadContext
*tc
, Mode mode
) const
130 TLB::lookup(Addr va
, uint16_t asn
, uint8_t vmid
, bool hyp
, bool secure
,
131 bool functional
, bool ignore_asn
, uint8_t target_el
)
134 TlbEntry
*retval
= NULL
;
136 // Maintaining LRU array
138 while (retval
== NULL
&& x
< size
) {
139 if ((!ignore_asn
&& table
[x
].match(va
, asn
, vmid
, hyp
, secure
, false,
141 (ignore_asn
&& table
[x
].match(va
, vmid
, hyp
, secure
, target_el
))) {
142 // We only move the hit entry ahead when the position is higher
144 if (x
> rangeMRU
&& !functional
) {
145 TlbEntry tmp_entry
= table
[x
];
146 for(int i
= x
; i
> 0; i
--)
147 table
[i
] = table
[i
- 1];
148 table
[0] = tmp_entry
;
158 DPRINTF(TLBVerbose
, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
159 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
161 va
, asn
, retval
? "hit" : "miss", vmid
, hyp
, secure
,
162 retval
? retval
->pfn
: 0, retval
? retval
->size
: 0,
163 retval
? retval
->pAddr(va
) : 0, retval
? retval
->ap
: 0,
164 retval
? retval
->ns
: 0, retval
? retval
->nstid
: 0,
165 retval
? retval
->global
: 0, retval
? retval
->asid
: 0,
166 retval
? retval
->el
: 0, retval
? retval
->el
: 0);
171 // insert a new TLB entry
173 TLB::insert(Addr addr
, TlbEntry
&entry
)
175 DPRINTF(TLB
, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
176 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
177 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry
.pfn
,
178 entry
.size
, entry
.vpn
, entry
.asid
, entry
.vmid
, entry
.N
,
179 entry
.global
, entry
.valid
, entry
.nonCacheable
, entry
.xn
,
180 entry
.ap
, static_cast<uint8_t>(entry
.domain
), entry
.ns
, entry
.nstid
,
183 if (table
[size
- 1].valid
)
184 DPRINTF(TLB
, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
185 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
186 table
[size
-1].vpn
<< table
[size
-1].N
, table
[size
-1].asid
,
187 table
[size
-1].vmid
, table
[size
-1].pfn
<< table
[size
-1].N
,
188 table
[size
-1].size
, table
[size
-1].ap
, table
[size
-1].ns
,
189 table
[size
-1].nstid
, table
[size
-1].global
, table
[size
-1].isHyp
,
192 //inserting to MRU position and evicting the LRU one
194 for (int i
= size
- 1; i
> 0; --i
)
195 table
[i
] = table
[i
-1];
202 TLB::printTlb() const
206 DPRINTF(TLB
, "Current TLB contents:\n");
210 DPRINTF(TLB
, " * %s\n", te
->print());
216 TLB::flushAllSecurity(bool secure_lookup
, uint8_t target_el
, bool ignore_el
)
218 DPRINTF(TLB
, "Flushing all TLB entries (%s lookup)\n",
219 (secure_lookup
? "secure" : "non-secure"));
224 if (te
->valid
&& secure_lookup
== !te
->nstid
&&
225 (te
->vmid
== vmid
|| secure_lookup
) &&
226 checkELMatch(target_el
, te
->el
, ignore_el
)) {
228 DPRINTF(TLB
, " - %s\n", te
->print());
237 // If there's a second stage TLB (and we're not it) then flush it as well
238 // if we're currently in hyp mode
239 if (!isStage2
&& isHyp
) {
240 stage2Tlb
->flushAllSecurity(secure_lookup
, true);
245 TLB::flushAllNs(bool hyp
, uint8_t target_el
, bool ignore_el
)
247 DPRINTF(TLB
, "Flushing all NS TLB entries (%s lookup)\n",
248 (hyp
? "hyp" : "non-hyp"));
253 if (te
->valid
&& te
->nstid
&& te
->isHyp
== hyp
&&
254 checkELMatch(target_el
, te
->el
, ignore_el
)) {
256 DPRINTF(TLB
, " - %s\n", te
->print());
265 // If there's a second stage TLB (and we're not it) then flush it as well
266 if (!isStage2
&& !hyp
) {
267 stage2Tlb
->flushAllNs(false, true);
272 TLB::flushMvaAsid(Addr mva
, uint64_t asn
, bool secure_lookup
, uint8_t target_el
)
274 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x, asid: %#x "
275 "(%s lookup)\n", mva
, asn
, (secure_lookup
?
276 "secure" : "non-secure"));
277 _flushMva(mva
, asn
, secure_lookup
, false, false, target_el
);
282 TLB::flushAsid(uint64_t asn
, bool secure_lookup
, uint8_t target_el
)
284 DPRINTF(TLB
, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn
,
285 (secure_lookup
? "secure" : "non-secure"));
292 if (te
->valid
&& te
->asid
== asn
&& secure_lookup
== !te
->nstid
&&
293 (te
->vmid
== vmid
|| secure_lookup
) &&
294 checkELMatch(target_el
, te
->el
, false)) {
297 DPRINTF(TLB
, " - %s\n", te
->print());
306 TLB::flushMva(Addr mva
, bool secure_lookup
, bool hyp
, uint8_t target_el
)
308 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva
,
309 (secure_lookup
? "secure" : "non-secure"));
310 _flushMva(mva
, 0xbeef, secure_lookup
, hyp
, true, target_el
);
315 TLB::_flushMva(Addr mva
, uint64_t asn
, bool secure_lookup
, bool hyp
,
316 bool ignore_asn
, uint8_t target_el
)
319 // D5.7.2: Sign-extend address to 64 bits
321 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
324 if (secure_lookup
== !te
->nstid
) {
325 DPRINTF(TLB
, " - %s\n", te
->print());
329 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
335 TLB::checkELMatch(uint8_t target_el
, uint8_t tentry_el
, bool ignore_el
)
339 if (target_el
== 2 || target_el
== 3) {
340 elMatch
= (tentry_el
== target_el
);
342 elMatch
= (tentry_el
== 0) || (tentry_el
== 1);
351 // We might have unserialized something or switched CPUs, so make
352 // sure to re-read the misc regs.
353 miscRegValid
= false;
357 TLB::serialize(ostream
&os
)
359 DPRINTF(Checkpoint
, "Serializing Arm TLB\n");
361 SERIALIZE_SCALAR(_attr
);
362 SERIALIZE_SCALAR(haveLPAE
);
363 SERIALIZE_SCALAR(directToStage2
);
364 SERIALIZE_SCALAR(stage2Req
);
365 SERIALIZE_SCALAR(bootUncacheability
);
367 int num_entries
= size
;
368 SERIALIZE_SCALAR(num_entries
);
369 for(int i
= 0; i
< size
; i
++){
370 nameOut(os
, csprintf("%s.TlbEntry%d", name(), i
));
371 table
[i
].serialize(os
);
376 TLB::unserialize(Checkpoint
*cp
, const string
§ion
)
378 DPRINTF(Checkpoint
, "Unserializing Arm TLB\n");
380 UNSERIALIZE_SCALAR(_attr
);
381 UNSERIALIZE_SCALAR(haveLPAE
);
382 UNSERIALIZE_SCALAR(directToStage2
);
383 UNSERIALIZE_SCALAR(stage2Req
);
384 UNSERIALIZE_SCALAR(bootUncacheability
);
387 UNSERIALIZE_SCALAR(num_entries
);
388 for(int i
= 0; i
< min(size
, num_entries
); i
++){
389 table
[i
].unserialize(cp
, csprintf("%s.TlbEntry%d", section
, i
));
397 .name(name() + ".inst_hits")
398 .desc("ITB inst hits")
402 .name(name() + ".inst_misses")
403 .desc("ITB inst misses")
407 .name(name() + ".inst_accesses")
408 .desc("ITB inst accesses")
412 .name(name() + ".read_hits")
413 .desc("DTB read hits")
417 .name(name() + ".read_misses")
418 .desc("DTB read misses")
422 .name(name() + ".read_accesses")
423 .desc("DTB read accesses")
427 .name(name() + ".write_hits")
428 .desc("DTB write hits")
432 .name(name() + ".write_misses")
433 .desc("DTB write misses")
437 .name(name() + ".write_accesses")
438 .desc("DTB write accesses")
442 .name(name() + ".hits")
447 .name(name() + ".misses")
452 .name(name() + ".accesses")
453 .desc("DTB accesses")
457 .name(name() + ".flush_tlb")
458 .desc("Number of times complete TLB was flushed")
462 .name(name() + ".flush_tlb_mva")
463 .desc("Number of times TLB was flushed by MVA")
467 .name(name() + ".flush_tlb_mva_asid")
468 .desc("Number of times TLB was flushed by MVA & ASID")
472 .name(name() + ".flush_tlb_asid")
473 .desc("Number of times TLB was flushed by ASID")
477 .name(name() + ".flush_entries")
478 .desc("Number of entries that have been flushed from TLB")
482 .name(name() + ".align_faults")
483 .desc("Number of TLB faults due to alignment restrictions")
487 .name(name() + ".prefetch_faults")
488 .desc("Number of TLB faults due to prefetch")
492 .name(name() + ".domain_faults")
493 .desc("Number of TLB faults due to domain restrictions")
497 .name(name() + ".perms_faults")
498 .desc("Number of TLB faults due to permissions restrictions")
501 instAccesses
= instHits
+ instMisses
;
502 readAccesses
= readHits
+ readMisses
;
503 writeAccesses
= writeHits
+ writeMisses
;
504 hits
= readHits
+ writeHits
+ instHits
;
505 misses
= readMisses
+ writeMisses
+ instMisses
;
506 accesses
= readAccesses
+ writeAccesses
+ instAccesses
;
510 TLB::translateSe(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
511 Translation
*translation
, bool &delay
, bool timing
)
514 Addr vaddr_tainted
= req
->getVaddr();
517 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
);
519 vaddr
= vaddr_tainted
;
520 uint32_t flags
= req
->getFlags();
522 bool is_fetch
= (mode
== Execute
);
523 bool is_write
= (mode
== Write
);
526 assert(flags
& MustBeOne
);
527 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
528 if (vaddr
& mask(flags
& AlignmentMask
)) {
529 // LPAE is always disabled in SE mode
530 return new DataAbort(vaddr_tainted
,
531 TlbEntry::DomainType::NoAccess
, is_write
,
532 ArmFault::AlignmentFault
, isStage2
,
539 Process
*p
= tc
->getProcessPtr();
541 if (!p
->pTable
->translate(vaddr
, paddr
))
542 return Fault(new GenericPageTableFault(vaddr_tainted
));
543 req
->setPaddr(paddr
);
549 TLB::trickBoxCheck(RequestPtr req
, Mode mode
, TlbEntry::DomainType domain
)
555 TLB::walkTrickBoxCheck(Addr pa
, bool is_secure
, Addr va
, Addr sz
, bool is_exec
,
556 bool is_write
, TlbEntry::DomainType domain
, LookupLevel lookup_level
)
562 TLB::checkPermissions(TlbEntry
*te
, RequestPtr req
, Mode mode
)
564 Addr vaddr
= req
->getVaddr(); // 32-bit don't have to purify
565 uint32_t flags
= req
->getFlags();
566 bool is_fetch
= (mode
== Execute
);
567 bool is_write
= (mode
== Write
);
568 bool is_priv
= isPriv
&& !(flags
& UserMode
);
570 // Get the translation type from the actuall table entry
571 ArmFault::TranMethod tranMethod
= te
->longDescFormat
? ArmFault::LpaeTran
572 : ArmFault::VmsaTran
;
574 // If this is the second stage of translation and the request is for a
575 // stage 1 page table walk then we need to check the HCR.PTW bit. This
576 // allows us to generate a fault if the request targets an area marked
577 // as a device or strongly ordered.
578 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
579 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
580 return new DataAbort(vaddr
, te
->domain
, is_write
,
581 ArmFault::PermissionLL
+ te
->lookupLevel
,
582 isStage2
, tranMethod
);
585 // Generate an alignment fault for unaligned data accesses to device or
586 // strongly ordered memory
588 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
589 if (vaddr
& mask(flags
& AlignmentMask
)) {
591 return new DataAbort(vaddr
, TlbEntry::DomainType::NoAccess
, is_write
,
592 ArmFault::AlignmentFault
, isStage2
,
598 if (te
->nonCacheable
) {
599 // Prevent prefetching from I/O devices.
600 if (req
->isPrefetch()) {
601 // Here we can safely use the fault status for the short
602 // desc. format in all cases
603 return new PrefetchAbort(vaddr
, ArmFault::PrefetchUncacheable
,
604 isStage2
, tranMethod
);
608 if (!te
->longDescFormat
) {
609 switch ((dacr
>> (static_cast<uint8_t>(te
->domain
) * 2)) & 0x3) {
612 DPRINTF(TLB
, "TLB Fault: Data abort on domain. DACR: %#x"
613 " domain: %#x write:%d\n", dacr
,
614 static_cast<uint8_t>(te
->domain
), is_write
);
616 return new PrefetchAbort(vaddr
,
617 ArmFault::DomainLL
+ te
->lookupLevel
,
618 isStage2
, tranMethod
);
620 return new DataAbort(vaddr
, te
->domain
, is_write
,
621 ArmFault::DomainLL
+ te
->lookupLevel
,
622 isStage2
, tranMethod
);
624 // Continue with permissions check
627 panic("UNPRED domain\n");
633 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
634 uint8_t ap
= te
->longDescFormat
? te
->ap
<< 1 : te
->ap
;
635 uint8_t hap
= te
->hap
;
637 if (sctlr
.afe
== 1 || te
->longDescFormat
)
641 bool isWritable
= true;
642 // If this is a stage 2 access (eg for reading stage 1 page table entries)
643 // then don't perform the AP permissions check, we stil do the HAP check
650 DPRINTF(TLB
, "Access permissions 0, checking rs:%#x\n",
653 switch ((int)sctlr
.rs
) {
658 abt
= is_write
|| !is_priv
;
674 abt
= !is_priv
&& is_write
;
675 isWritable
= is_priv
;
681 panic("UNPRED premissions\n");
683 abt
= !is_priv
|| is_write
;
692 panic("Unknown permissions %#x\n", ap
);
696 bool hapAbt
= is_write
? !(hap
& 2) : !(hap
& 1);
697 bool xn
= te
->xn
|| (isWritable
&& sctlr
.wxn
) ||
698 (ap
== 3 && sctlr
.uwxn
&& is_priv
);
699 if (is_fetch
&& (abt
|| xn
||
700 (te
->longDescFormat
&& te
->pxn
&& !is_priv
) ||
701 (isSecure
&& te
->ns
&& scr
.sif
))) {
703 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. AP:%d "
704 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
705 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
,sctlr
.afe
);
706 return new PrefetchAbort(vaddr
,
707 ArmFault::PermissionLL
+ te
->lookupLevel
,
708 isStage2
, tranMethod
);
709 } else if (abt
| hapAbt
) {
711 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
712 " write:%d\n", ap
, is_priv
, is_write
);
713 return new DataAbort(vaddr
, te
->domain
, is_write
,
714 ArmFault::PermissionLL
+ te
->lookupLevel
,
715 isStage2
| !abt
, tranMethod
);
722 TLB::checkPermissions64(TlbEntry
*te
, RequestPtr req
, Mode mode
,
727 Addr vaddr_tainted
= req
->getVaddr();
728 Addr vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
);
730 uint32_t flags
= req
->getFlags();
731 bool is_fetch
= (mode
== Execute
);
732 bool is_write
= (mode
== Write
);
733 bool is_priv M5_VAR_USED
= isPriv
&& !(flags
& UserMode
);
735 updateMiscReg(tc
, curTranType
);
737 // If this is the second stage of translation and the request is for a
738 // stage 1 page table walk then we need to check the HCR.PTW bit. This
739 // allows us to generate a fault if the request targets an area marked
740 // as a device or strongly ordered.
741 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
742 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
743 return new DataAbort(vaddr_tainted
, te
->domain
, is_write
,
744 ArmFault::PermissionLL
+ te
->lookupLevel
,
745 isStage2
, ArmFault::LpaeTran
);
748 // Generate an alignment fault for unaligned accesses to device or
749 // strongly ordered memory
751 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
752 if (vaddr
& mask(flags
& AlignmentMask
)) {
754 return new DataAbort(vaddr_tainted
,
755 TlbEntry::DomainType::NoAccess
, is_write
,
756 ArmFault::AlignmentFault
, isStage2
,
762 if (te
->nonCacheable
) {
763 // Prevent prefetching from I/O devices.
764 if (req
->isPrefetch()) {
765 // Here we can safely use the fault status for the short
766 // desc. format in all cases
767 return new PrefetchAbort(vaddr_tainted
,
768 ArmFault::PrefetchUncacheable
,
769 isStage2
, ArmFault::LpaeTran
);
773 uint8_t ap
= 0x3 & (te
->ap
); // 2-bit access protection field
777 uint8_t pxn
= te
->pxn
;
778 bool r
= !is_write
&& !is_fetch
;
781 DPRINTF(TLBVerbose
, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
782 "w:%d, x:%d\n", ap
, xn
, pxn
, r
, w
, x
);
785 panic("Virtualization in AArch64 state is not supported yet");
790 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
800 grant
= r
|| w
|| (x
&& !sctlr
.wxn
);
821 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
825 grant
= r
|| w
|| (x
&& !sctlr
.wxn
);
833 // regions that are writeable at EL0 should not be
857 uint8_t perm
= (ap
& 0x2) | xn
;
860 grant
= r
|| w
|| (x
&& !sctlr
.wxn
) ;
882 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. "
883 "AP:%d priv:%d write:%d ns:%d sif:%d "
885 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
, sctlr
.afe
);
886 // Use PC value instead of vaddr because vaddr might be aligned to
887 // cache line and should not be the address reported in FAR
888 return new PrefetchAbort(req
->getPC(),
889 ArmFault::PermissionLL
+ te
->lookupLevel
,
890 isStage2
, ArmFault::LpaeTran
);
893 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d "
894 "priv:%d write:%d\n", ap
, is_priv
, is_write
);
895 return new DataAbort(vaddr_tainted
, te
->domain
, is_write
,
896 ArmFault::PermissionLL
+ te
->lookupLevel
,
897 isStage2
, ArmFault::LpaeTran
);
905 TLB::translateFs(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
906 Translation
*translation
, bool &delay
, bool timing
,
907 TLB::ArmTranslationType tranType
, bool functional
)
909 // No such thing as a functional timing access
910 assert(!(timing
&& functional
));
912 updateMiscReg(tc
, tranType
);
914 Addr vaddr_tainted
= req
->getVaddr();
917 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
);
919 vaddr
= vaddr_tainted
;
920 uint32_t flags
= req
->getFlags();
922 bool is_fetch
= (mode
== Execute
);
923 bool is_write
= (mode
== Write
);
924 bool long_desc_format
= aarch64
|| (haveLPAE
&& ttbcr
.eae
);
925 ArmFault::TranMethod tranMethod
= long_desc_format
? ArmFault::LpaeTran
926 : ArmFault::VmsaTran
;
930 DPRINTF(TLBVerbose
, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
931 isPriv
, flags
& UserMode
, isSecure
, tranType
& S1S2NsTran
);
933 DPRINTF(TLB
, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
934 "flags %#x tranType 0x%x\n", vaddr_tainted
, mode
, isStage2
,
935 scr
, sctlr
, flags
, tranType
);
937 // Generate an alignment fault for unaligned PC
938 if (aarch64
&& is_fetch
&& (req
->getPC() & mask(2))) {
939 return new PCAlignmentFault(req
->getPC());
942 // If this is a clrex instruction, provide a PA of 0 with no fault
943 // This will force the monitor to set the tracked address to 0
944 // a bit of a hack but this effectively clrears this processors monitor
945 if (flags
& Request::CLEAR_LL
){
946 // @todo: check implications of security extensions
948 req
->setFlags(Request::UNCACHEABLE
);
949 req
->setFlags(Request::CLEAR_LL
);
952 if ((req
->isInstFetch() && (!sctlr
.i
)) ||
953 ((!req
->isInstFetch()) && (!sctlr
.c
))){
954 req
->setFlags(Request::UNCACHEABLE
);
957 assert(flags
& MustBeOne
);
958 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
959 if (vaddr
& mask(flags
& AlignmentMask
)) {
961 return new DataAbort(vaddr_tainted
,
962 TlbEntry::DomainType::NoAccess
, is_write
,
963 ArmFault::AlignmentFault
, isStage2
,
969 // If guest MMU is off or hcr.vm=0 go straight to stage2
970 if ((isStage2
&& !hcr
.vm
) || (!isStage2
&& !sctlr
.m
)) {
972 req
->setPaddr(vaddr
);
973 // When the MMU is off the security attribute corresponds to the
974 // security state of the processor
976 req
->setFlags(Request::SECURE
);
978 // @todo: double check this (ARM ARM issue C B3.2.1)
979 if (long_desc_format
|| sctlr
.tre
== 0) {
980 req
->setFlags(Request::UNCACHEABLE
);
982 if (nmrr
.ir0
== 0 || nmrr
.or0
== 0 || prrr
.tr0
!= 0x2)
983 req
->setFlags(Request::UNCACHEABLE
);
986 // Set memory attributes
988 temp_te
.ns
= !isSecure
;
989 if (isStage2
|| hcr
.dc
== 0 || isSecure
||
990 (isHyp
&& !(tranType
& S1CTran
))) {
992 temp_te
.mtype
= is_fetch
? TlbEntry::MemoryType::Normal
993 : TlbEntry::MemoryType::StronglyOrdered
;
994 temp_te
.innerAttrs
= 0x0;
995 temp_te
.outerAttrs
= 0x0;
996 temp_te
.shareable
= true;
997 temp_te
.outerShareable
= true;
999 temp_te
.mtype
= TlbEntry::MemoryType::Normal
;
1000 temp_te
.innerAttrs
= 0x3;
1001 temp_te
.outerAttrs
= 0x3;
1002 temp_te
.shareable
= false;
1003 temp_te
.outerShareable
= false;
1005 temp_te
.setAttributes(long_desc_format
);
1006 DPRINTF(TLBVerbose
, "(No MMU) setting memory attributes: shareable:\
1007 %d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1008 temp_te
.shareable
, temp_te
.innerAttrs
, temp_te
.outerAttrs
,
1010 setAttr(temp_te
.attributes
);
1012 return trickBoxCheck(req
, mode
, TlbEntry::DomainType::NoAccess
);
1015 DPRINTF(TLBVerbose
, "Translating %s=%#x context=%d\n",
1016 isStage2
? "IPA" : "VA", vaddr_tainted
, asid
);
1017 // Translation enabled
1019 TlbEntry
*te
= NULL
;
1021 Fault fault
= getResultTe(&te
, req
, tc
, mode
, translation
, timing
,
1022 functional
, &mergeTe
);
1023 // only proceed if we have a valid table entry
1024 if ((te
== NULL
) && (fault
== NoFault
)) delay
= true;
1026 // If we have the table entry transfer some of the attributes to the
1027 // request that triggered the translation
1029 // Set memory attributes
1031 "Setting memory attributes: shareable: %d, innerAttrs: %d, \
1032 outerAttrs: %d, mtype: %d, isStage2: %d\n",
1033 te
->shareable
, te
->innerAttrs
, te
->outerAttrs
,
1034 static_cast<uint8_t>(te
->mtype
), isStage2
);
1035 setAttr(te
->attributes
);
1036 if (te
->nonCacheable
) {
1037 req
->setFlags(Request::UNCACHEABLE
);
1040 if (!bootUncacheability
&&
1041 ((ArmSystem
*)tc
->getSystemPtr())->adderBootUncacheable(vaddr
)) {
1042 req
->setFlags(Request::UNCACHEABLE
);
1045 req
->setPaddr(te
->pAddr(vaddr
));
1046 if (isSecure
&& !te
->ns
) {
1047 req
->setFlags(Request::SECURE
);
1049 if ((!is_fetch
) && (vaddr
& mask(flags
& AlignmentMask
)) &&
1050 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
1051 // Unaligned accesses to Device memory should always cause an
1052 // abort regardless of sctlr.a
1054 return new DataAbort(vaddr_tainted
,
1055 TlbEntry::DomainType::NoAccess
, is_write
,
1056 ArmFault::AlignmentFault
, isStage2
,
1060 // Check for a trickbox generated address fault
1061 if (fault
== NoFault
) {
1062 fault
= trickBoxCheck(req
, mode
, te
->domain
);
1066 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1067 if (fault
== NoFault
) {
1068 CPSR cpsr
= tc
->readMiscReg(MISCREG_CPSR
);
1069 if (aarch64
&& is_fetch
&& cpsr
.il
== 1) {
1070 return new IllegalInstSetStateFault();
1078 TLB::translateAtomic(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1079 TLB::ArmTranslationType tranType
)
1081 updateMiscReg(tc
, tranType
);
1083 if (directToStage2
) {
1085 return stage2Tlb
->translateAtomic(req
, tc
, mode
, tranType
);
1091 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
);
1093 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1099 TLB::translateFunctional(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1100 TLB::ArmTranslationType tranType
)
1102 updateMiscReg(tc
, tranType
);
1104 if (directToStage2
) {
1106 return stage2Tlb
->translateFunctional(req
, tc
, mode
, tranType
);
1112 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
, true);
1114 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1120 TLB::translateTiming(RequestPtr req
, ThreadContext
*tc
,
1121 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
)
1123 updateMiscReg(tc
, tranType
);
1125 if (directToStage2
) {
1127 return stage2Tlb
->translateTiming(req
, tc
, translation
, mode
, tranType
);
1130 assert(translation
);
1132 return translateComplete(req
, tc
, translation
, mode
, tranType
, isStage2
);
1136 TLB::translateComplete(RequestPtr req
, ThreadContext
*tc
,
1137 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
,
1143 fault
= translateFs(req
, tc
, mode
, translation
, delay
, true, tranType
);
1145 fault
= translateSe(req
, tc
, mode
, translation
, delay
, true);
1146 DPRINTF(TLBVerbose
, "Translation returning delay=%d fault=%d\n", delay
, fault
!=
1148 // If we have a translation, and we're not in the middle of doing a stage
1149 // 2 translation tell the translation that we've either finished or its
1150 // going to take a while. By not doing this when we're in the middle of a
1151 // stage 2 translation we prevent marking the translation as delayed twice,
1152 // one when the translation starts and again when the stage 1 translation
1154 if (translation
&& (callFromS2
|| !stage2Req
|| req
->hasPaddr() || fault
!= NoFault
)) {
1156 translation
->finish(fault
, req
, tc
, mode
);
1158 translation
->markDelayed();
1164 TLB::getMasterPort()
1166 return &tableWalker
->getMasterPort("port");
1170 TLB::getWalkerPort()
1172 return tableWalker
->getWalkerPort();
1176 TLB::updateMiscReg(ThreadContext
*tc
, ArmTranslationType tranType
)
1178 // check if the regs have changed, or the translation mode is different.
1179 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1180 // one type of translation anyway
1181 if (miscRegValid
&& ((tranType
== curTranType
) || isStage2
)) {
1185 DPRINTF(TLBVerbose
, "TLB variables changed!\n");
1186 CPSR cpsr
= tc
->readMiscReg(MISCREG_CPSR
);
1187 // Dependencies: SCR/SCR_EL3, CPSR
1188 isSecure
= inSecureState(tc
);
1189 isSecure
&= (tranType
& HypMode
) == 0;
1190 isSecure
&= (tranType
& S1S2NsTran
) == 0;
1191 aarch64
= !cpsr
.width
;
1192 if (aarch64
) { // AArch64
1193 aarch64EL
= (ExceptionLevel
) (uint8_t) cpsr
.el
;
1194 switch (aarch64EL
) {
1198 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL1
);
1199 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL1
);
1200 uint64_t ttbr_asid
= ttbcr
.a1
?
1201 tc
->readMiscReg(MISCREG_TTBR1_EL1
) :
1202 tc
->readMiscReg(MISCREG_TTBR0_EL1
);
1203 asid
= bits(ttbr_asid
,
1204 (haveLargeAsid64
&& ttbcr
.as
) ? 63 : 55, 48);
1208 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL2
);
1209 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL2
);
1213 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL3
);
1214 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL3
);
1218 scr
= tc
->readMiscReg(MISCREG_SCR_EL3
);
1219 isPriv
= aarch64EL
!= EL0
;
1220 // @todo: modify this behaviour to support Virtualization in
1224 directToStage2
= false;
1227 sctlr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR
, tc
,
1229 ttbcr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR
, tc
,
1231 scr
= tc
->readMiscReg(MISCREG_SCR
);
1232 isPriv
= cpsr
.mode
!= MODE_USER
;
1233 if (haveLPAE
&& ttbcr
.eae
) {
1234 // Long-descriptor translation table format in use
1235 uint64_t ttbr_asid
= tc
->readMiscReg(
1236 flattenMiscRegNsBanked(ttbcr
.a1
? MISCREG_TTBR1
1239 asid
= bits(ttbr_asid
, 55, 48);
1241 // Short-descriptor translation table format in use
1242 CONTEXTIDR context_id
= tc
->readMiscReg(flattenMiscRegNsBanked(
1243 MISCREG_CONTEXTIDR
, tc
,!isSecure
));
1244 asid
= context_id
.asid
;
1246 prrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR
, tc
,
1248 nmrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR
, tc
,
1250 dacr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR
, tc
,
1252 hcr
= tc
->readMiscReg(MISCREG_HCR
);
1254 if (haveVirtualization
) {
1255 vmid
= bits(tc
->readMiscReg(MISCREG_VTTBR
), 55, 48);
1256 isHyp
= cpsr
.mode
== MODE_HYP
;
1257 isHyp
|= tranType
& HypMode
;
1258 isHyp
&= (tranType
& S1S2NsTran
) == 0;
1259 isHyp
&= (tranType
& S1CTran
) == 0;
1261 sctlr
= tc
->readMiscReg(MISCREG_HSCTLR
);
1263 // Work out if we should skip the first stage of translation and go
1264 // directly to stage 2. This value is cached so we don't have to
1265 // compute it for every translation.
1266 stage2Req
= hcr
.vm
&& !isStage2
&& !isHyp
&& !isSecure
&&
1267 !(tranType
& S1CTran
);
1268 directToStage2
= stage2Req
&& !sctlr
.m
;
1273 directToStage2
= false;
1276 miscRegValid
= true;
1277 curTranType
= tranType
;
1281 TLB::getTE(TlbEntry
**te
, RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1282 Translation
*translation
, bool timing
, bool functional
,
1283 bool is_secure
, TLB::ArmTranslationType tranType
)
1285 bool is_fetch
= (mode
== Execute
);
1286 bool is_write
= (mode
== Write
);
1288 Addr vaddr_tainted
= req
->getVaddr();
1290 ExceptionLevel target_el
= aarch64
? aarch64EL
: EL1
;
1292 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, target_el
);
1294 vaddr
= vaddr_tainted
;
1296 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false, target_el
);
1298 if (req
->isPrefetch()) {
1299 // if the request is a prefetch don't attempt to fill the TLB or go
1300 // any further with the memory access (here we can safely use the
1301 // fault status for the short desc. format in all cases)
1303 return new PrefetchAbort(vaddr_tainted
, ArmFault::PrefetchTLBMiss
, isStage2
);
1313 // start translation table walk, pass variables rather than
1314 // re-retreaving in table walker for speed
1315 DPRINTF(TLB
, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1316 vaddr_tainted
, asid
, vmid
);
1318 fault
= tableWalker
->walk(req
, tc
, asid
, vmid
, isHyp
, mode
,
1319 translation
, timing
, functional
, is_secure
,
1321 // for timing mode, return and wait for table walk,
1322 if (timing
|| fault
!= NoFault
) {
1326 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false, target_el
);
1342 TLB::getResultTe(TlbEntry
**te
, RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1343 Translation
*translation
, bool timing
, bool functional
,
1347 TlbEntry
*s1Te
= NULL
;
1349 Addr vaddr_tainted
= req
->getVaddr();
1351 // Get the stage 1 table entry
1352 fault
= getTE(&s1Te
, req
, tc
, mode
, translation
, timing
, functional
,
1353 isSecure
, curTranType
);
1354 // only proceed if we have a valid table entry
1355 if ((s1Te
!= NULL
) && (fault
== NoFault
)) {
1356 // Check stage 1 permissions before checking stage 2
1358 fault
= checkPermissions64(s1Te
, req
, mode
, tc
);
1360 fault
= checkPermissions(s1Te
, req
, mode
);
1361 if (stage2Req
& (fault
== NoFault
)) {
1362 Stage2LookUp
*s2Lookup
= new Stage2LookUp(this, stage2Tlb
, *s1Te
,
1363 req
, translation
, mode
, timing
, functional
, curTranType
);
1364 fault
= s2Lookup
->getTe(tc
, mergeTe
);
1365 if (s2Lookup
->isComplete()) {
1367 // We've finished with the lookup so delete it
1370 // The lookup hasn't completed, so we can't delete it now. We
1371 // get round this by asking the object to self delete when the
1372 // translation is complete.
1373 s2Lookup
->setSelfDelete();
1376 // This case deals with an S1 hit (or bypass), followed by
1377 // an S2 hit-but-perms issue
1379 DPRINTF(TLBVerbose
, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1380 vaddr_tainted
, req
->hasPaddr() ? req
->getPaddr() : ~0, fault
);
1381 if (fault
!= NoFault
) {
1382 ArmFault
*armFault
= reinterpret_cast<ArmFault
*>(fault
.get());
1383 armFault
->annotate(ArmFault::S1PTW
, false);
1384 armFault
->annotate(ArmFault::OVA
, vaddr_tainted
);
1394 ArmTLBParams::create()
1396 return new ArmISA::TLB(this);