2 * Copyright (c) 2010-2013, 2016-2018 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 #include "arch/arm/tlb.hh"
51 #include "arch/arm/faults.hh"
52 #include "arch/arm/pagetable.hh"
53 #include "arch/arm/stage2_lookup.hh"
54 #include "arch/arm/stage2_mmu.hh"
55 #include "arch/arm/system.hh"
56 #include "arch/arm/table_walker.hh"
57 #include "arch/arm/utility.hh"
58 #include "arch/generic/mmapped_ipr.hh"
59 #include "base/inifile.hh"
60 #include "base/str.hh"
61 #include "base/trace.hh"
62 #include "cpu/base.hh"
63 #include "cpu/thread_context.hh"
64 #include "debug/Checkpoint.hh"
65 #include "debug/TLB.hh"
66 #include "debug/TLBVerbose.hh"
67 #include "mem/page_table.hh"
68 #include "mem/request.hh"
69 #include "params/ArmTLB.hh"
70 #include "sim/full_system.hh"
71 #include "sim/process.hh"
74 using namespace ArmISA
;
76 TLB::TLB(const ArmTLBParams
*p
)
77 : BaseTLB(p
), table(new TlbEntry
[p
->size
]), size(p
->size
),
78 isStage2(p
->is_stage2
), stage2Req(false), stage2DescReq(false), _attr(0),
79 directToStage2(false), tableWalker(p
->walker
), stage2Tlb(NULL
),
80 stage2Mmu(NULL
), test(nullptr), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0
), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), hcr(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran
)
85 const ArmSystem
*sys
= dynamic_cast<const ArmSystem
*>(p
->sys
);
87 tableWalker
->setTlb(this);
89 // Cache system-level properties
90 haveLPAE
= tableWalker
->haveLPAE();
91 haveVirtualization
= tableWalker
->haveVirtualization();
92 haveLargeAsid64
= tableWalker
->haveLargeAsid64();
95 m5opRange
= sys
->m5opRange();
106 if (stage2Mmu
&& !isStage2
)
107 stage2Tlb
= stage2Mmu
->stage2Tlb();
111 TLB::setMMU(Stage2MMU
*m
, MasterID master_id
)
114 tableWalker
->setMMU(m
, master_id
);
118 TLB::translateFunctional(ThreadContext
*tc
, Addr va
, Addr
&pa
)
122 if (directToStage2
) {
124 return stage2Tlb
->translateFunctional(tc
, va
, pa
);
127 TlbEntry
*e
= lookup(va
, asid
, vmid
, isHyp
, isSecure
, true, false,
128 aarch64
? aarch64EL
: EL1
);
136 TLB::finalizePhysical(const RequestPtr
&req
,
137 ThreadContext
*tc
, Mode mode
) const
139 const Addr paddr
= req
->getPaddr();
141 if (m5opRange
.contains(paddr
)) {
142 req
->setFlags(Request::MMAPPED_IPR
| Request::GENERIC_IPR
);
143 req
->setPaddr(GenericISA::iprAddressPseudoInst(
152 TLB::lookup(Addr va
, uint16_t asn
, uint8_t vmid
, bool hyp
, bool secure
,
153 bool functional
, bool ignore_asn
, uint8_t target_el
)
156 TlbEntry
*retval
= NULL
;
158 // Maintaining LRU array
160 while (retval
== NULL
&& x
< size
) {
161 if ((!ignore_asn
&& table
[x
].match(va
, asn
, vmid
, hyp
, secure
, false,
163 (ignore_asn
&& table
[x
].match(va
, vmid
, hyp
, secure
, target_el
))) {
164 // We only move the hit entry ahead when the position is higher
166 if (x
> rangeMRU
&& !functional
) {
167 TlbEntry tmp_entry
= table
[x
];
168 for (int i
= x
; i
> 0; i
--)
169 table
[i
] = table
[i
- 1];
170 table
[0] = tmp_entry
;
180 DPRINTF(TLBVerbose
, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
181 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
183 va
, asn
, retval
? "hit" : "miss", vmid
, hyp
, secure
,
184 retval
? retval
->pfn
: 0, retval
? retval
->size
: 0,
185 retval
? retval
->pAddr(va
) : 0, retval
? retval
->ap
: 0,
186 retval
? retval
->ns
: 0, retval
? retval
->nstid
: 0,
187 retval
? retval
->global
: 0, retval
? retval
->asid
: 0,
188 retval
? retval
->el
: 0);
193 // insert a new TLB entry
195 TLB::insert(Addr addr
, TlbEntry
&entry
)
197 DPRINTF(TLB
, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
198 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
199 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry
.pfn
,
200 entry
.size
, entry
.vpn
, entry
.asid
, entry
.vmid
, entry
.N
,
201 entry
.global
, entry
.valid
, entry
.nonCacheable
, entry
.xn
,
202 entry
.ap
, static_cast<uint8_t>(entry
.domain
), entry
.ns
, entry
.nstid
,
205 if (table
[size
- 1].valid
)
206 DPRINTF(TLB
, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
207 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
208 table
[size
-1].vpn
<< table
[size
-1].N
, table
[size
-1].asid
,
209 table
[size
-1].vmid
, table
[size
-1].pfn
<< table
[size
-1].N
,
210 table
[size
-1].size
, table
[size
-1].ap
, table
[size
-1].ns
,
211 table
[size
-1].nstid
, table
[size
-1].global
, table
[size
-1].isHyp
,
214 //inserting to MRU position and evicting the LRU one
216 for (int i
= size
- 1; i
> 0; --i
)
217 table
[i
] = table
[i
-1];
221 ppRefills
->notify(1);
225 TLB::printTlb() const
229 DPRINTF(TLB
, "Current TLB contents:\n");
233 DPRINTF(TLB
, " * %s\n", te
->print());
239 TLB::flushAllSecurity(bool secure_lookup
, uint8_t target_el
, bool ignore_el
)
241 DPRINTF(TLB
, "Flushing all TLB entries (%s lookup)\n",
242 (secure_lookup
? "secure" : "non-secure"));
247 if (te
->valid
&& secure_lookup
== !te
->nstid
&&
248 (te
->vmid
== vmid
|| secure_lookup
) &&
249 checkELMatch(target_el
, te
->el
, ignore_el
)) {
251 DPRINTF(TLB
, " - %s\n", te
->print());
260 // If there's a second stage TLB (and we're not it) then flush it as well
261 // if we're currently in hyp mode
262 if (!isStage2
&& isHyp
) {
263 stage2Tlb
->flushAllSecurity(secure_lookup
, true);
268 TLB::flushAllNs(bool hyp
, uint8_t target_el
, bool ignore_el
)
270 DPRINTF(TLB
, "Flushing all NS TLB entries (%s lookup)\n",
271 (hyp
? "hyp" : "non-hyp"));
276 if (te
->valid
&& te
->nstid
&& te
->isHyp
== hyp
&&
277 checkELMatch(target_el
, te
->el
, ignore_el
)) {
279 DPRINTF(TLB
, " - %s\n", te
->print());
288 // If there's a second stage TLB (and we're not it) then flush it as well
289 if (!isStage2
&& !hyp
) {
290 stage2Tlb
->flushAllNs(false, true);
295 TLB::flushMvaAsid(Addr mva
, uint64_t asn
, bool secure_lookup
, uint8_t target_el
)
297 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x, asid: %#x "
298 "(%s lookup)\n", mva
, asn
, (secure_lookup
?
299 "secure" : "non-secure"));
300 _flushMva(mva
, asn
, secure_lookup
, false, false, target_el
);
305 TLB::flushAsid(uint64_t asn
, bool secure_lookup
, uint8_t target_el
)
307 DPRINTF(TLB
, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn
,
308 (secure_lookup
? "secure" : "non-secure"));
315 if (te
->valid
&& te
->asid
== asn
&& secure_lookup
== !te
->nstid
&&
316 (te
->vmid
== vmid
|| secure_lookup
) &&
317 checkELMatch(target_el
, te
->el
, false)) {
320 DPRINTF(TLB
, " - %s\n", te
->print());
329 TLB::flushMva(Addr mva
, bool secure_lookup
, bool hyp
, uint8_t target_el
)
331 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva
,
332 (secure_lookup
? "secure" : "non-secure"));
333 _flushMva(mva
, 0xbeef, secure_lookup
, hyp
, true, target_el
);
338 TLB::_flushMva(Addr mva
, uint64_t asn
, bool secure_lookup
, bool hyp
,
339 bool ignore_asn
, uint8_t target_el
)
342 // D5.7.2: Sign-extend address to 64 bits
344 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
347 if (secure_lookup
== !te
->nstid
) {
348 DPRINTF(TLB
, " - %s\n", te
->print());
352 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
358 TLB::flushIpaVmid(Addr ipa
, bool secure_lookup
, bool hyp
, uint8_t target_el
)
361 stage2Tlb
->_flushMva(ipa
, 0xbeef, secure_lookup
, hyp
, true, target_el
);
365 TLB::checkELMatch(uint8_t target_el
, uint8_t tentry_el
, bool ignore_el
)
369 if (target_el
== 2 || target_el
== 3) {
370 elMatch
= (tentry_el
== target_el
);
372 elMatch
= (tentry_el
== 0) || (tentry_el
== 1);
381 // We might have unserialized something or switched CPUs, so make
382 // sure to re-read the misc regs.
383 miscRegValid
= false;
387 TLB::takeOverFrom(BaseTLB
*_otlb
)
389 TLB
*otlb
= dynamic_cast<TLB
*>(_otlb
);
390 /* Make sure we actually have a valid type */
393 haveLPAE
= otlb
->haveLPAE
;
394 directToStage2
= otlb
->directToStage2
;
395 stage2Req
= otlb
->stage2Req
;
396 stage2DescReq
= otlb
->stage2DescReq
;
398 /* Sync the stage2 MMU if they exist in both
399 * the old CPU and the new
402 stage2Tlb
&& otlb
->stage2Tlb
) {
403 stage2Tlb
->takeOverFrom(otlb
->stage2Tlb
);
406 panic("Incompatible TLB type!");
411 TLB::serialize(CheckpointOut
&cp
) const
413 DPRINTF(Checkpoint
, "Serializing Arm TLB\n");
415 SERIALIZE_SCALAR(_attr
);
416 SERIALIZE_SCALAR(haveLPAE
);
417 SERIALIZE_SCALAR(directToStage2
);
418 SERIALIZE_SCALAR(stage2Req
);
419 SERIALIZE_SCALAR(stage2DescReq
);
421 int num_entries
= size
;
422 SERIALIZE_SCALAR(num_entries
);
423 for (int i
= 0; i
< size
; i
++)
424 table
[i
].serializeSection(cp
, csprintf("TlbEntry%d", i
));
428 TLB::unserialize(CheckpointIn
&cp
)
430 DPRINTF(Checkpoint
, "Unserializing Arm TLB\n");
432 UNSERIALIZE_SCALAR(_attr
);
433 UNSERIALIZE_SCALAR(haveLPAE
);
434 UNSERIALIZE_SCALAR(directToStage2
);
435 UNSERIALIZE_SCALAR(stage2Req
);
436 UNSERIALIZE_SCALAR(stage2DescReq
);
439 UNSERIALIZE_SCALAR(num_entries
);
440 for (int i
= 0; i
< min(size
, num_entries
); i
++)
441 table
[i
].unserializeSection(cp
, csprintf("TlbEntry%d", i
));
449 .name(name() + ".inst_hits")
450 .desc("ITB inst hits")
454 .name(name() + ".inst_misses")
455 .desc("ITB inst misses")
459 .name(name() + ".inst_accesses")
460 .desc("ITB inst accesses")
464 .name(name() + ".read_hits")
465 .desc("DTB read hits")
469 .name(name() + ".read_misses")
470 .desc("DTB read misses")
474 .name(name() + ".read_accesses")
475 .desc("DTB read accesses")
479 .name(name() + ".write_hits")
480 .desc("DTB write hits")
484 .name(name() + ".write_misses")
485 .desc("DTB write misses")
489 .name(name() + ".write_accesses")
490 .desc("DTB write accesses")
494 .name(name() + ".hits")
499 .name(name() + ".misses")
504 .name(name() + ".accesses")
505 .desc("DTB accesses")
509 .name(name() + ".flush_tlb")
510 .desc("Number of times complete TLB was flushed")
514 .name(name() + ".flush_tlb_mva")
515 .desc("Number of times TLB was flushed by MVA")
519 .name(name() + ".flush_tlb_mva_asid")
520 .desc("Number of times TLB was flushed by MVA & ASID")
524 .name(name() + ".flush_tlb_asid")
525 .desc("Number of times TLB was flushed by ASID")
529 .name(name() + ".flush_entries")
530 .desc("Number of entries that have been flushed from TLB")
534 .name(name() + ".align_faults")
535 .desc("Number of TLB faults due to alignment restrictions")
539 .name(name() + ".prefetch_faults")
540 .desc("Number of TLB faults due to prefetch")
544 .name(name() + ".domain_faults")
545 .desc("Number of TLB faults due to domain restrictions")
549 .name(name() + ".perms_faults")
550 .desc("Number of TLB faults due to permissions restrictions")
553 instAccesses
= instHits
+ instMisses
;
554 readAccesses
= readHits
+ readMisses
;
555 writeAccesses
= writeHits
+ writeMisses
;
556 hits
= readHits
+ writeHits
+ instHits
;
557 misses
= readMisses
+ writeMisses
+ instMisses
;
558 accesses
= readAccesses
+ writeAccesses
+ instAccesses
;
562 TLB::regProbePoints()
564 ppRefills
.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
568 TLB::translateSe(const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
569 Translation
*translation
, bool &delay
, bool timing
)
572 Addr vaddr_tainted
= req
->getVaddr();
575 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
, ttbcr
);
577 vaddr
= vaddr_tainted
;
578 Request::Flags flags
= req
->getFlags();
580 bool is_fetch
= (mode
== Execute
);
581 bool is_write
= (mode
== Write
);
584 assert(flags
& MustBeOne
);
585 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
586 if (vaddr
& mask(flags
& AlignmentMask
)) {
587 // LPAE is always disabled in SE mode
588 return std::make_shared
<DataAbort
>(
590 TlbEntry::DomainType::NoAccess
, is_write
,
591 ArmFault::AlignmentFault
, isStage2
,
598 Process
*p
= tc
->getProcessPtr();
600 if (!p
->pTable
->translate(vaddr
, paddr
))
601 return std::make_shared
<GenericPageTableFault
>(vaddr_tainted
);
602 req
->setPaddr(paddr
);
604 return finalizePhysical(req
, tc
, mode
);
608 TLB::checkPermissions(TlbEntry
*te
, const RequestPtr
&req
, Mode mode
)
610 // a data cache maintenance instruction that operates by MVA does
611 // not generate a Data Abort exeception due to a Permission fault
612 if (req
->isCacheMaintenance()) {
616 Addr vaddr
= req
->getVaddr(); // 32-bit don't have to purify
617 Request::Flags flags
= req
->getFlags();
618 bool is_fetch
= (mode
== Execute
);
619 bool is_write
= (mode
== Write
);
620 bool is_priv
= isPriv
&& !(flags
& UserMode
);
622 // Get the translation type from the actuall table entry
623 ArmFault::TranMethod tranMethod
= te
->longDescFormat
? ArmFault::LpaeTran
624 : ArmFault::VmsaTran
;
626 // If this is the second stage of translation and the request is for a
627 // stage 1 page table walk then we need to check the HCR.PTW bit. This
628 // allows us to generate a fault if the request targets an area marked
629 // as a device or strongly ordered.
630 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
631 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
632 return std::make_shared
<DataAbort
>(
633 vaddr
, te
->domain
, is_write
,
634 ArmFault::PermissionLL
+ te
->lookupLevel
,
635 isStage2
, tranMethod
);
638 // Generate an alignment fault for unaligned data accesses to device or
639 // strongly ordered memory
641 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
642 if (vaddr
& mask(flags
& AlignmentMask
)) {
644 return std::make_shared
<DataAbort
>(
645 vaddr
, TlbEntry::DomainType::NoAccess
, is_write
,
646 ArmFault::AlignmentFault
, isStage2
,
652 if (te
->nonCacheable
) {
653 // Prevent prefetching from I/O devices.
654 if (req
->isPrefetch()) {
655 // Here we can safely use the fault status for the short
656 // desc. format in all cases
657 return std::make_shared
<PrefetchAbort
>(
658 vaddr
, ArmFault::PrefetchUncacheable
,
659 isStage2
, tranMethod
);
663 if (!te
->longDescFormat
) {
664 switch ((dacr
>> (static_cast<uint8_t>(te
->domain
) * 2)) & 0x3) {
667 DPRINTF(TLB
, "TLB Fault: Data abort on domain. DACR: %#x"
668 " domain: %#x write:%d\n", dacr
,
669 static_cast<uint8_t>(te
->domain
), is_write
);
671 // Use PC value instead of vaddr because vaddr might
672 // be aligned to cache line and should not be the
673 // address reported in FAR
674 return std::make_shared
<PrefetchAbort
>(
676 ArmFault::DomainLL
+ te
->lookupLevel
,
677 isStage2
, tranMethod
);
679 return std::make_shared
<DataAbort
>(
680 vaddr
, te
->domain
, is_write
,
681 ArmFault::DomainLL
+ te
->lookupLevel
,
682 isStage2
, tranMethod
);
684 // Continue with permissions check
687 panic("UNPRED domain\n");
693 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
694 uint8_t ap
= te
->longDescFormat
? te
->ap
<< 1 : te
->ap
;
695 uint8_t hap
= te
->hap
;
697 if (sctlr
.afe
== 1 || te
->longDescFormat
)
701 bool isWritable
= true;
702 // If this is a stage 2 access (eg for reading stage 1 page table entries)
703 // then don't perform the AP permissions check, we stil do the HAP check
710 DPRINTF(TLB
, "Access permissions 0, checking rs:%#x\n",
713 switch ((int)sctlr
.rs
) {
718 abt
= is_write
|| !is_priv
;
734 abt
= !is_priv
&& is_write
;
735 isWritable
= is_priv
;
741 panic("UNPRED premissions\n");
743 abt
= !is_priv
|| is_write
;
752 panic("Unknown permissions %#x\n", ap
);
756 bool hapAbt
= is_write
? !(hap
& 2) : !(hap
& 1);
757 bool xn
= te
->xn
|| (isWritable
&& sctlr
.wxn
) ||
758 (ap
== 3 && sctlr
.uwxn
&& is_priv
);
759 if (is_fetch
&& (abt
|| xn
||
760 (te
->longDescFormat
&& te
->pxn
&& is_priv
) ||
761 (isSecure
&& te
->ns
&& scr
.sif
))) {
763 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. AP:%d "
764 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
765 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
,sctlr
.afe
);
766 // Use PC value instead of vaddr because vaddr might be aligned to
767 // cache line and should not be the address reported in FAR
768 return std::make_shared
<PrefetchAbort
>(
770 ArmFault::PermissionLL
+ te
->lookupLevel
,
771 isStage2
, tranMethod
);
772 } else if (abt
| hapAbt
) {
774 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
775 " write:%d\n", ap
, is_priv
, is_write
);
776 return std::make_shared
<DataAbort
>(
777 vaddr
, te
->domain
, is_write
,
778 ArmFault::PermissionLL
+ te
->lookupLevel
,
779 isStage2
| !abt
, tranMethod
);
786 TLB::checkPermissions64(TlbEntry
*te
, const RequestPtr
&req
, Mode mode
,
791 // A data cache maintenance instruction that operates by VA does
792 // not generate a Permission fault unless:
793 // * It is a data cache invalidate (dc ivac) which requires write
794 // permissions to the VA, or
795 // * It is executed from EL0
796 if (req
->isCacheClean() && aarch64EL
!= EL0
&& !isStage2
) {
800 Addr vaddr_tainted
= req
->getVaddr();
801 Addr vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
, ttbcr
);
803 Request::Flags flags
= req
->getFlags();
804 bool is_fetch
= (mode
== Execute
);
805 // Cache clean operations require read permissions to the specified VA
806 bool is_write
= !req
->isCacheClean() && mode
== Write
;
807 bool is_priv M5_VAR_USED
= isPriv
&& !(flags
& UserMode
);
809 updateMiscReg(tc
, curTranType
);
811 // If this is the second stage of translation and the request is for a
812 // stage 1 page table walk then we need to check the HCR.PTW bit. This
813 // allows us to generate a fault if the request targets an area marked
814 // as a device or strongly ordered.
815 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
816 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
817 return std::make_shared
<DataAbort
>(
818 vaddr_tainted
, te
->domain
, is_write
,
819 ArmFault::PermissionLL
+ te
->lookupLevel
,
820 isStage2
, ArmFault::LpaeTran
);
823 // Generate an alignment fault for unaligned accesses to device or
824 // strongly ordered memory
826 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
827 if (vaddr
& mask(flags
& AlignmentMask
)) {
829 return std::make_shared
<DataAbort
>(
831 TlbEntry::DomainType::NoAccess
, is_write
,
832 ArmFault::AlignmentFault
, isStage2
,
838 if (te
->nonCacheable
) {
839 // Prevent prefetching from I/O devices.
840 if (req
->isPrefetch()) {
841 // Here we can safely use the fault status for the short
842 // desc. format in all cases
843 return std::make_shared
<PrefetchAbort
>(
845 ArmFault::PrefetchUncacheable
,
846 isStage2
, ArmFault::LpaeTran
);
850 uint8_t ap
= 0x3 & (te
->ap
); // 2-bit access protection field
854 uint8_t pxn
= te
->pxn
;
855 bool r
= !is_write
&& !is_fetch
;
858 DPRINTF(TLBVerbose
, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
859 "w:%d, x:%d\n", ap
, xn
, pxn
, r
, w
, x
);
862 assert(ArmSystem::haveVirtualization(tc
) && aarch64EL
!= EL2
);
863 // In stage 2 we use the hypervisor access permission bits.
864 // The following permissions are described in ARM DDI 0487A.f
866 uint8_t hap
= 0x3 & te
->hap
;
868 // sctlr.wxn overrides the xn bit
869 grant
= !sctlr
.wxn
&& !xn
;
870 } else if (is_write
) {
879 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
889 grant
= r
|| w
|| (x
&& !sctlr
.wxn
);
910 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
914 grant
= r
|| w
|| (x
&& !sctlr
.wxn
);
922 // regions that are writeable at EL0 should not be
946 uint8_t perm
= (ap
& 0x2) | xn
;
949 grant
= r
|| w
|| (x
&& !sctlr
.wxn
) ;
971 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. "
972 "AP:%d priv:%d write:%d ns:%d sif:%d "
974 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
, sctlr
.afe
);
975 // Use PC value instead of vaddr because vaddr might be aligned to
976 // cache line and should not be the address reported in FAR
977 return std::make_shared
<PrefetchAbort
>(
979 ArmFault::PermissionLL
+ te
->lookupLevel
,
980 isStage2
, ArmFault::LpaeTran
);
983 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d "
984 "priv:%d write:%d\n", ap
, is_priv
, is_write
);
985 return std::make_shared
<DataAbort
>(
986 vaddr_tainted
, te
->domain
, is_write
,
987 ArmFault::PermissionLL
+ te
->lookupLevel
,
988 isStage2
, ArmFault::LpaeTran
);
996 TLB::translateFs(const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
997 Translation
*translation
, bool &delay
, bool timing
,
998 TLB::ArmTranslationType tranType
, bool functional
)
1000 // No such thing as a functional timing access
1001 assert(!(timing
&& functional
));
1003 updateMiscReg(tc
, tranType
);
1005 Addr vaddr_tainted
= req
->getVaddr();
1008 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
, ttbcr
);
1010 vaddr
= vaddr_tainted
;
1011 Request::Flags flags
= req
->getFlags();
1013 bool is_fetch
= (mode
== Execute
);
1014 bool is_write
= (mode
== Write
);
1015 bool long_desc_format
= aarch64
|| longDescFormatInUse(tc
);
1016 ArmFault::TranMethod tranMethod
= long_desc_format
? ArmFault::LpaeTran
1017 : ArmFault::VmsaTran
;
1021 DPRINTF(TLBVerbose
, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1022 isPriv
, flags
& UserMode
, isSecure
, tranType
& S1S2NsTran
);
1024 DPRINTF(TLB
, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1025 "flags %#lx tranType 0x%x\n", vaddr_tainted
, mode
, isStage2
,
1026 scr
, sctlr
, flags
, tranType
);
1028 if ((req
->isInstFetch() && (!sctlr
.i
)) ||
1029 ((!req
->isInstFetch()) && (!sctlr
.c
))){
1030 if (!req
->isCacheMaintenance()) {
1031 req
->setFlags(Request::UNCACHEABLE
);
1033 req
->setFlags(Request::STRICT_ORDER
);
1036 assert(flags
& MustBeOne
);
1037 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
1038 if (vaddr
& mask(flags
& AlignmentMask
)) {
1040 return std::make_shared
<DataAbort
>(
1042 TlbEntry::DomainType::NoAccess
, is_write
,
1043 ArmFault::AlignmentFault
, isStage2
,
1049 // If guest MMU is off or hcr.vm=0 go straight to stage2
1050 if ((isStage2
&& !hcr
.vm
) || (!isStage2
&& !sctlr
.m
)) {
1052 req
->setPaddr(vaddr
);
1053 // When the MMU is off the security attribute corresponds to the
1054 // security state of the processor
1056 req
->setFlags(Request::SECURE
);
1058 // @todo: double check this (ARM ARM issue C B3.2.1)
1059 if (long_desc_format
|| sctlr
.tre
== 0 || nmrr
.ir0
== 0 ||
1060 nmrr
.or0
== 0 || prrr
.tr0
!= 0x2) {
1061 if (!req
->isCacheMaintenance()) {
1062 req
->setFlags(Request::UNCACHEABLE
);
1064 req
->setFlags(Request::STRICT_ORDER
);
1067 // Set memory attributes
1069 temp_te
.ns
= !isSecure
;
1070 if (isStage2
|| hcr
.dc
== 0 || isSecure
||
1071 (isHyp
&& !(tranType
& S1CTran
))) {
1073 temp_te
.mtype
= is_fetch
? TlbEntry::MemoryType::Normal
1074 : TlbEntry::MemoryType::StronglyOrdered
;
1075 temp_te
.innerAttrs
= 0x0;
1076 temp_te
.outerAttrs
= 0x0;
1077 temp_te
.shareable
= true;
1078 temp_te
.outerShareable
= true;
1080 temp_te
.mtype
= TlbEntry::MemoryType::Normal
;
1081 temp_te
.innerAttrs
= 0x3;
1082 temp_te
.outerAttrs
= 0x3;
1083 temp_te
.shareable
= false;
1084 temp_te
.outerShareable
= false;
1086 temp_te
.setAttributes(long_desc_format
);
1087 DPRINTF(TLBVerbose
, "(No MMU) setting memory attributes: shareable: "
1088 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1089 temp_te
.shareable
, temp_te
.innerAttrs
, temp_te
.outerAttrs
,
1091 setAttr(temp_te
.attributes
);
1093 return testTranslation(req
, mode
, TlbEntry::DomainType::NoAccess
);
1096 DPRINTF(TLBVerbose
, "Translating %s=%#x context=%d\n",
1097 isStage2
? "IPA" : "VA", vaddr_tainted
, asid
);
1098 // Translation enabled
1100 TlbEntry
*te
= NULL
;
1102 Fault fault
= getResultTe(&te
, req
, tc
, mode
, translation
, timing
,
1103 functional
, &mergeTe
);
1104 // only proceed if we have a valid table entry
1105 if ((te
== NULL
) && (fault
== NoFault
)) delay
= true;
1107 // If we have the table entry transfer some of the attributes to the
1108 // request that triggered the translation
1110 // Set memory attributes
1112 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1113 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1114 te
->shareable
, te
->innerAttrs
, te
->outerAttrs
,
1115 static_cast<uint8_t>(te
->mtype
), isStage2
);
1116 setAttr(te
->attributes
);
1118 if (te
->nonCacheable
&& !req
->isCacheMaintenance())
1119 req
->setFlags(Request::UNCACHEABLE
);
1121 // Require requests to be ordered if the request goes to
1122 // strongly ordered or device memory (i.e., anything other
1123 // than normal memory requires strict order).
1124 if (te
->mtype
!= TlbEntry::MemoryType::Normal
)
1125 req
->setFlags(Request::STRICT_ORDER
);
1127 Addr pa
= te
->pAddr(vaddr
);
1130 if (isSecure
&& !te
->ns
) {
1131 req
->setFlags(Request::SECURE
);
1133 if ((!is_fetch
) && (vaddr
& mask(flags
& AlignmentMask
)) &&
1134 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
1135 // Unaligned accesses to Device memory should always cause an
1136 // abort regardless of sctlr.a
1138 return std::make_shared
<DataAbort
>(
1140 TlbEntry::DomainType::NoAccess
, is_write
,
1141 ArmFault::AlignmentFault
, isStage2
,
1145 // Check for a trickbox generated address fault
1146 if (fault
== NoFault
)
1147 fault
= testTranslation(req
, mode
, te
->domain
);
1150 if (fault
== NoFault
) {
1151 // Don't try to finalize a physical address unless the
1152 // translation has completed (i.e., there is a table entry).
1153 return te
? finalizePhysical(req
, tc
, mode
) : NoFault
;
1160 TLB::translateAtomic(const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
1161 TLB::ArmTranslationType tranType
)
1163 updateMiscReg(tc
, tranType
);
1165 if (directToStage2
) {
1167 return stage2Tlb
->translateAtomic(req
, tc
, mode
, tranType
);
1173 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
);
1175 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1181 TLB::translateFunctional(const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
1182 TLB::ArmTranslationType tranType
)
1184 updateMiscReg(tc
, tranType
);
1186 if (directToStage2
) {
1188 return stage2Tlb
->translateFunctional(req
, tc
, mode
, tranType
);
1194 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
, true);
1196 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1202 TLB::translateTiming(const RequestPtr
&req
, ThreadContext
*tc
,
1203 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
)
1205 updateMiscReg(tc
, tranType
);
1207 if (directToStage2
) {
1209 stage2Tlb
->translateTiming(req
, tc
, translation
, mode
, tranType
);
1213 assert(translation
);
1215 translateComplete(req
, tc
, translation
, mode
, tranType
, isStage2
);
1219 TLB::translateComplete(const RequestPtr
&req
, ThreadContext
*tc
,
1220 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
,
1226 fault
= translateFs(req
, tc
, mode
, translation
, delay
, true, tranType
);
1228 fault
= translateSe(req
, tc
, mode
, translation
, delay
, true);
1229 DPRINTF(TLBVerbose
, "Translation returning delay=%d fault=%d\n", delay
, fault
!=
1231 // If we have a translation, and we're not in the middle of doing a stage
1232 // 2 translation tell the translation that we've either finished or its
1233 // going to take a while. By not doing this when we're in the middle of a
1234 // stage 2 translation we prevent marking the translation as delayed twice,
1235 // one when the translation starts and again when the stage 1 translation
1237 if (translation
&& (callFromS2
|| !stage2Req
|| req
->hasPaddr() || fault
!= NoFault
)) {
1239 translation
->finish(fault
, req
, tc
, mode
);
1241 translation
->markDelayed();
1247 TLB::getTableWalkerPort()
1249 return &stage2Mmu
->getDMAPort();
1253 TLB::updateMiscReg(ThreadContext
*tc
, ArmTranslationType tranType
)
1255 // check if the regs have changed, or the translation mode is different.
1256 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1257 // one type of translation anyway
1258 if (miscRegValid
&& miscRegContext
== tc
->contextId() &&
1259 ((tranType
== curTranType
) || isStage2
)) {
1263 DPRINTF(TLBVerbose
, "TLB variables changed!\n");
1264 cpsr
= tc
->readMiscReg(MISCREG_CPSR
);
1266 // Dependencies: SCR/SCR_EL3, CPSR
1267 isSecure
= inSecureState(tc
) &&
1268 !(tranType
& HypMode
) && !(tranType
& S1S2NsTran
);
1270 aarch64EL
= tranTypeEL(cpsr
, tranType
);
1271 aarch64
= isStage2
?
1273 ELIs64(tc
, aarch64EL
== EL0
? EL1
: aarch64EL
);
1275 if (aarch64
) { // AArch64
1276 // determine EL we need to translate in
1277 switch (aarch64EL
) {
1281 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL1
);
1282 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL1
);
1283 uint64_t ttbr_asid
= ttbcr
.a1
?
1284 tc
->readMiscReg(MISCREG_TTBR1_EL1
) :
1285 tc
->readMiscReg(MISCREG_TTBR0_EL1
);
1286 asid
= bits(ttbr_asid
,
1287 (haveLargeAsid64
&& ttbcr
.as
) ? 63 : 55, 48);
1291 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL2
);
1292 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL2
);
1296 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL3
);
1297 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL3
);
1301 hcr
= tc
->readMiscReg(MISCREG_HCR_EL2
);
1302 scr
= tc
->readMiscReg(MISCREG_SCR_EL3
);
1303 isPriv
= aarch64EL
!= EL0
;
1304 if (haveVirtualization
) {
1305 vmid
= bits(tc
->readMiscReg(MISCREG_VTTBR_EL2
), 55, 48);
1306 isHyp
= tranType
& HypMode
;
1307 isHyp
&= (tranType
& S1S2NsTran
) == 0;
1308 isHyp
&= (tranType
& S1CTran
) == 0;
1309 // Work out if we should skip the first stage of translation and go
1310 // directly to stage 2. This value is cached so we don't have to
1311 // compute it for every translation.
1312 stage2Req
= isStage2
||
1313 (hcr
.vm
&& !isHyp
&& !isSecure
&&
1314 !(tranType
& S1CTran
) && (aarch64EL
< EL2
) &&
1315 !(tranType
& S1E1Tran
)); // <--- FIX THIS HACK
1316 stage2DescReq
= isStage2
|| (hcr
.vm
&& !isHyp
&& !isSecure
&&
1318 directToStage2
= !isStage2
&& stage2Req
&& !sctlr
.m
;
1322 directToStage2
= false;
1324 stage2DescReq
= false;
1327 sctlr
= tc
->readMiscReg(snsBankedIndex(MISCREG_SCTLR
, tc
,
1329 ttbcr
= tc
->readMiscReg(snsBankedIndex(MISCREG_TTBCR
, tc
,
1331 scr
= tc
->readMiscReg(MISCREG_SCR
);
1332 isPriv
= cpsr
.mode
!= MODE_USER
;
1333 if (longDescFormatInUse(tc
)) {
1334 uint64_t ttbr_asid
= tc
->readMiscReg(
1335 snsBankedIndex(ttbcr
.a1
? MISCREG_TTBR1
:
1338 asid
= bits(ttbr_asid
, 55, 48);
1339 } else { // Short-descriptor translation table format in use
1340 CONTEXTIDR context_id
= tc
->readMiscReg(snsBankedIndex(
1341 MISCREG_CONTEXTIDR
, tc
,!isSecure
));
1342 asid
= context_id
.asid
;
1344 prrr
= tc
->readMiscReg(snsBankedIndex(MISCREG_PRRR
, tc
,
1346 nmrr
= tc
->readMiscReg(snsBankedIndex(MISCREG_NMRR
, tc
,
1348 dacr
= tc
->readMiscReg(snsBankedIndex(MISCREG_DACR
, tc
,
1350 hcr
= tc
->readMiscReg(MISCREG_HCR
);
1352 if (haveVirtualization
) {
1353 vmid
= bits(tc
->readMiscReg(MISCREG_VTTBR
), 55, 48);
1354 isHyp
= cpsr
.mode
== MODE_HYP
;
1355 isHyp
|= tranType
& HypMode
;
1356 isHyp
&= (tranType
& S1S2NsTran
) == 0;
1357 isHyp
&= (tranType
& S1CTran
) == 0;
1359 sctlr
= tc
->readMiscReg(MISCREG_HSCTLR
);
1361 // Work out if we should skip the first stage of translation and go
1362 // directly to stage 2. This value is cached so we don't have to
1363 // compute it for every translation.
1364 stage2Req
= hcr
.vm
&& !isStage2
&& !isHyp
&& !isSecure
&&
1365 !(tranType
& S1CTran
);
1366 stage2DescReq
= hcr
.vm
&& !isStage2
&& !isHyp
&& !isSecure
;
1367 directToStage2
= stage2Req
&& !sctlr
.m
;
1372 directToStage2
= false;
1373 stage2DescReq
= false;
1376 miscRegValid
= true;
1377 miscRegContext
= tc
->contextId();
1378 curTranType
= tranType
;
1382 TLB::tranTypeEL(CPSR cpsr
, ArmTranslationType type
)
1403 return opModeToEL((OperatingMode
)(uint8_t)cpsr
.mode
);
1406 panic("Unknown translation mode!\n");
1411 TLB::getTE(TlbEntry
**te
, const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
1412 Translation
*translation
, bool timing
, bool functional
,
1413 bool is_secure
, TLB::ArmTranslationType tranType
)
1415 bool is_fetch
= (mode
== Execute
);
1416 bool is_write
= (mode
== Write
);
1418 Addr vaddr_tainted
= req
->getVaddr();
1420 ExceptionLevel target_el
= aarch64
? aarch64EL
: EL1
;
1422 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, target_el
, ttbcr
);
1424 vaddr
= vaddr_tainted
;
1426 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false, target_el
);
1428 if (req
->isPrefetch()) {
1429 // if the request is a prefetch don't attempt to fill the TLB or go
1430 // any further with the memory access (here we can safely use the
1431 // fault status for the short desc. format in all cases)
1433 return std::make_shared
<PrefetchAbort
>(
1434 vaddr_tainted
, ArmFault::PrefetchTLBMiss
, isStage2
);
1444 // start translation table walk, pass variables rather than
1445 // re-retreaving in table walker for speed
1446 DPRINTF(TLB
, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1447 vaddr_tainted
, asid
, vmid
);
1449 fault
= tableWalker
->walk(req
, tc
, asid
, vmid
, isHyp
, mode
,
1450 translation
, timing
, functional
, is_secure
,
1451 tranType
, stage2DescReq
);
1452 // for timing mode, return and wait for table walk,
1453 if (timing
|| fault
!= NoFault
) {
1457 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false, target_el
);
1473 TLB::getResultTe(TlbEntry
**te
, const RequestPtr
&req
,
1474 ThreadContext
*tc
, Mode mode
,
1475 Translation
*translation
, bool timing
, bool functional
,
1481 // We are already in the stage 2 TLB. Grab the table entry for stage
1482 // 2 only. We are here because stage 1 translation is disabled.
1483 TlbEntry
*s2Te
= NULL
;
1484 // Get the stage 2 table entry
1485 fault
= getTE(&s2Te
, req
, tc
, mode
, translation
, timing
, functional
,
1486 isSecure
, curTranType
);
1487 // Check permissions of stage 2
1488 if ((s2Te
!= NULL
) && (fault
== NoFault
)) {
1490 fault
= checkPermissions64(s2Te
, req
, mode
, tc
);
1492 fault
= checkPermissions(s2Te
, req
, mode
);
1498 TlbEntry
*s1Te
= NULL
;
1500 Addr vaddr_tainted
= req
->getVaddr();
1502 // Get the stage 1 table entry
1503 fault
= getTE(&s1Te
, req
, tc
, mode
, translation
, timing
, functional
,
1504 isSecure
, curTranType
);
1505 // only proceed if we have a valid table entry
1506 if ((s1Te
!= NULL
) && (fault
== NoFault
)) {
1507 // Check stage 1 permissions before checking stage 2
1509 fault
= checkPermissions64(s1Te
, req
, mode
, tc
);
1511 fault
= checkPermissions(s1Te
, req
, mode
);
1512 if (stage2Req
& (fault
== NoFault
)) {
1513 Stage2LookUp
*s2Lookup
= new Stage2LookUp(this, stage2Tlb
, *s1Te
,
1514 req
, translation
, mode
, timing
, functional
, curTranType
);
1515 fault
= s2Lookup
->getTe(tc
, mergeTe
);
1516 if (s2Lookup
->isComplete()) {
1518 // We've finished with the lookup so delete it
1521 // The lookup hasn't completed, so we can't delete it now. We
1522 // get round this by asking the object to self delete when the
1523 // translation is complete.
1524 s2Lookup
->setSelfDelete();
1527 // This case deals with an S1 hit (or bypass), followed by
1528 // an S2 hit-but-perms issue
1530 DPRINTF(TLBVerbose
, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1531 vaddr_tainted
, req
->hasPaddr() ? req
->getPaddr() : ~0, fault
);
1532 if (fault
!= NoFault
) {
1533 ArmFault
*armFault
= reinterpret_cast<ArmFault
*>(fault
.get());
1534 armFault
->annotate(ArmFault::S1PTW
, false);
1535 armFault
->annotate(ArmFault::OVA
, vaddr_tainted
);
1545 TLB::setTestInterface(SimObject
*_ti
)
1550 TlbTestInterface
*ti(dynamic_cast<TlbTestInterface
*>(_ti
));
1551 fatal_if(!ti
, "%s is not a valid ARM TLB tester\n", _ti
->name());
1557 TLB::testTranslation(const RequestPtr
&req
, Mode mode
,
1558 TlbEntry::DomainType domain
)
1560 if (!test
|| !req
->hasSize() || req
->getSize() == 0 ||
1561 req
->isCacheMaintenance()) {
1564 return test
->translationCheck(req
, isPriv
, mode
, domain
);
1569 TLB::testWalk(Addr pa
, Addr size
, Addr va
, bool is_secure
, Mode mode
,
1570 TlbEntry::DomainType domain
, LookupLevel lookup_level
)
1575 return test
->walkCheck(pa
, size
, va
, is_secure
, isPriv
, mode
,
1576 domain
, lookup_level
);
1582 ArmTLBParams::create()
1584 return new ArmISA::TLB(this);