2 * Copyright (c) 2010-2013, 2016-2017 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 #include "arch/arm/tlb.hh"
51 #include "arch/arm/faults.hh"
52 #include "arch/arm/pagetable.hh"
53 #include "arch/arm/stage2_lookup.hh"
54 #include "arch/arm/stage2_mmu.hh"
55 #include "arch/arm/system.hh"
56 #include "arch/arm/table_walker.hh"
57 #include "arch/arm/utility.hh"
58 #include "base/inifile.hh"
59 #include "base/str.hh"
60 #include "base/trace.hh"
61 #include "cpu/base.hh"
62 #include "cpu/thread_context.hh"
63 #include "debug/Checkpoint.hh"
64 #include "debug/TLB.hh"
65 #include "debug/TLBVerbose.hh"
66 #include "mem/page_table.hh"
67 #include "mem/request.hh"
68 #include "params/ArmTLB.hh"
69 #include "sim/full_system.hh"
70 #include "sim/process.hh"
73 using namespace ArmISA
;
75 TLB::TLB(const ArmTLBParams
*p
)
76 : BaseTLB(p
), table(new TlbEntry
[p
->size
]), size(p
->size
),
77 isStage2(p
->is_stage2
), stage2Req(false), _attr(0),
78 directToStage2(false), tableWalker(p
->walker
), stage2Tlb(NULL
),
79 stage2Mmu(NULL
), test(nullptr), rangeMRU(1),
80 aarch64(false), aarch64EL(EL0
), isPriv(false), isSecure(false),
81 isHyp(false), asid(0), vmid(0), dacr(0),
82 miscRegValid(false), miscRegContext(0), curTranType(NormalTran
)
84 tableWalker
->setTlb(this);
86 // Cache system-level properties
87 haveLPAE
= tableWalker
->haveLPAE();
88 haveVirtualization
= tableWalker
->haveVirtualization();
89 haveLargeAsid64
= tableWalker
->haveLargeAsid64();
100 if (stage2Mmu
&& !isStage2
)
101 stage2Tlb
= stage2Mmu
->stage2Tlb();
105 TLB::setMMU(Stage2MMU
*m
, MasterID master_id
)
108 tableWalker
->setMMU(m
, master_id
);
112 TLB::translateFunctional(ThreadContext
*tc
, Addr va
, Addr
&pa
)
116 if (directToStage2
) {
118 return stage2Tlb
->translateFunctional(tc
, va
, pa
);
121 TlbEntry
*e
= lookup(va
, asid
, vmid
, isHyp
, isSecure
, true, false,
122 aarch64
? aarch64EL
: EL1
);
130 TLB::finalizePhysical(RequestPtr req
, ThreadContext
*tc
, Mode mode
) const
136 TLB::lookup(Addr va
, uint16_t asn
, uint8_t vmid
, bool hyp
, bool secure
,
137 bool functional
, bool ignore_asn
, uint8_t target_el
)
140 TlbEntry
*retval
= NULL
;
142 // Maintaining LRU array
144 while (retval
== NULL
&& x
< size
) {
145 if ((!ignore_asn
&& table
[x
].match(va
, asn
, vmid
, hyp
, secure
, false,
147 (ignore_asn
&& table
[x
].match(va
, vmid
, hyp
, secure
, target_el
))) {
148 // We only move the hit entry ahead when the position is higher
150 if (x
> rangeMRU
&& !functional
) {
151 TlbEntry tmp_entry
= table
[x
];
152 for (int i
= x
; i
> 0; i
--)
153 table
[i
] = table
[i
- 1];
154 table
[0] = tmp_entry
;
164 DPRINTF(TLBVerbose
, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
165 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
167 va
, asn
, retval
? "hit" : "miss", vmid
, hyp
, secure
,
168 retval
? retval
->pfn
: 0, retval
? retval
->size
: 0,
169 retval
? retval
->pAddr(va
) : 0, retval
? retval
->ap
: 0,
170 retval
? retval
->ns
: 0, retval
? retval
->nstid
: 0,
171 retval
? retval
->global
: 0, retval
? retval
->asid
: 0,
172 retval
? retval
->el
: 0);
177 // insert a new TLB entry
179 TLB::insert(Addr addr
, TlbEntry
&entry
)
181 DPRINTF(TLB
, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
182 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
183 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry
.pfn
,
184 entry
.size
, entry
.vpn
, entry
.asid
, entry
.vmid
, entry
.N
,
185 entry
.global
, entry
.valid
, entry
.nonCacheable
, entry
.xn
,
186 entry
.ap
, static_cast<uint8_t>(entry
.domain
), entry
.ns
, entry
.nstid
,
189 if (table
[size
- 1].valid
)
190 DPRINTF(TLB
, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
191 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
192 table
[size
-1].vpn
<< table
[size
-1].N
, table
[size
-1].asid
,
193 table
[size
-1].vmid
, table
[size
-1].pfn
<< table
[size
-1].N
,
194 table
[size
-1].size
, table
[size
-1].ap
, table
[size
-1].ns
,
195 table
[size
-1].nstid
, table
[size
-1].global
, table
[size
-1].isHyp
,
198 //inserting to MRU position and evicting the LRU one
200 for (int i
= size
- 1; i
> 0; --i
)
201 table
[i
] = table
[i
-1];
205 ppRefills
->notify(1);
209 TLB::printTlb() const
213 DPRINTF(TLB
, "Current TLB contents:\n");
217 DPRINTF(TLB
, " * %s\n", te
->print());
223 TLB::flushAllSecurity(bool secure_lookup
, uint8_t target_el
, bool ignore_el
)
225 DPRINTF(TLB
, "Flushing all TLB entries (%s lookup)\n",
226 (secure_lookup
? "secure" : "non-secure"));
231 if (te
->valid
&& secure_lookup
== !te
->nstid
&&
232 (te
->vmid
== vmid
|| secure_lookup
) &&
233 checkELMatch(target_el
, te
->el
, ignore_el
)) {
235 DPRINTF(TLB
, " - %s\n", te
->print());
244 // If there's a second stage TLB (and we're not it) then flush it as well
245 // if we're currently in hyp mode
246 if (!isStage2
&& isHyp
) {
247 stage2Tlb
->flushAllSecurity(secure_lookup
, true);
252 TLB::flushAllNs(bool hyp
, uint8_t target_el
, bool ignore_el
)
254 DPRINTF(TLB
, "Flushing all NS TLB entries (%s lookup)\n",
255 (hyp
? "hyp" : "non-hyp"));
260 if (te
->valid
&& te
->nstid
&& te
->isHyp
== hyp
&&
261 checkELMatch(target_el
, te
->el
, ignore_el
)) {
263 DPRINTF(TLB
, " - %s\n", te
->print());
272 // If there's a second stage TLB (and we're not it) then flush it as well
273 if (!isStage2
&& !hyp
) {
274 stage2Tlb
->flushAllNs(false, true);
279 TLB::flushMvaAsid(Addr mva
, uint64_t asn
, bool secure_lookup
, uint8_t target_el
)
281 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x, asid: %#x "
282 "(%s lookup)\n", mva
, asn
, (secure_lookup
?
283 "secure" : "non-secure"));
284 _flushMva(mva
, asn
, secure_lookup
, false, false, target_el
);
289 TLB::flushAsid(uint64_t asn
, bool secure_lookup
, uint8_t target_el
)
291 DPRINTF(TLB
, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn
,
292 (secure_lookup
? "secure" : "non-secure"));
299 if (te
->valid
&& te
->asid
== asn
&& secure_lookup
== !te
->nstid
&&
300 (te
->vmid
== vmid
|| secure_lookup
) &&
301 checkELMatch(target_el
, te
->el
, false)) {
304 DPRINTF(TLB
, " - %s\n", te
->print());
313 TLB::flushMva(Addr mva
, bool secure_lookup
, bool hyp
, uint8_t target_el
)
315 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva
,
316 (secure_lookup
? "secure" : "non-secure"));
317 _flushMva(mva
, 0xbeef, secure_lookup
, hyp
, true, target_el
);
322 TLB::_flushMva(Addr mva
, uint64_t asn
, bool secure_lookup
, bool hyp
,
323 bool ignore_asn
, uint8_t target_el
)
326 // D5.7.2: Sign-extend address to 64 bits
328 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
331 if (secure_lookup
== !te
->nstid
) {
332 DPRINTF(TLB
, " - %s\n", te
->print());
336 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
342 TLB::flushIpaVmid(Addr ipa
, bool secure_lookup
, bool hyp
, uint8_t target_el
)
345 stage2Tlb
->_flushMva(ipa
, 0xbeef, secure_lookup
, hyp
, true, target_el
);
349 TLB::checkELMatch(uint8_t target_el
, uint8_t tentry_el
, bool ignore_el
)
353 if (target_el
== 2 || target_el
== 3) {
354 elMatch
= (tentry_el
== target_el
);
356 elMatch
= (tentry_el
== 0) || (tentry_el
== 1);
365 // We might have unserialized something or switched CPUs, so make
366 // sure to re-read the misc regs.
367 miscRegValid
= false;
371 TLB::takeOverFrom(BaseTLB
*_otlb
)
373 TLB
*otlb
= dynamic_cast<TLB
*>(_otlb
);
374 /* Make sure we actually have a valid type */
377 haveLPAE
= otlb
->haveLPAE
;
378 directToStage2
= otlb
->directToStage2
;
379 stage2Req
= otlb
->stage2Req
;
381 /* Sync the stage2 MMU if they exist in both
382 * the old CPU and the new
385 stage2Tlb
&& otlb
->stage2Tlb
) {
386 stage2Tlb
->takeOverFrom(otlb
->stage2Tlb
);
389 panic("Incompatible TLB type!");
394 TLB::serialize(CheckpointOut
&cp
) const
396 DPRINTF(Checkpoint
, "Serializing Arm TLB\n");
398 SERIALIZE_SCALAR(_attr
);
399 SERIALIZE_SCALAR(haveLPAE
);
400 SERIALIZE_SCALAR(directToStage2
);
401 SERIALIZE_SCALAR(stage2Req
);
403 int num_entries
= size
;
404 SERIALIZE_SCALAR(num_entries
);
405 for (int i
= 0; i
< size
; i
++)
406 table
[i
].serializeSection(cp
, csprintf("TlbEntry%d", i
));
410 TLB::unserialize(CheckpointIn
&cp
)
412 DPRINTF(Checkpoint
, "Unserializing Arm TLB\n");
414 UNSERIALIZE_SCALAR(_attr
);
415 UNSERIALIZE_SCALAR(haveLPAE
);
416 UNSERIALIZE_SCALAR(directToStage2
);
417 UNSERIALIZE_SCALAR(stage2Req
);
420 UNSERIALIZE_SCALAR(num_entries
);
421 for (int i
= 0; i
< min(size
, num_entries
); i
++)
422 table
[i
].unserializeSection(cp
, csprintf("TlbEntry%d", i
));
430 .name(name() + ".inst_hits")
431 .desc("ITB inst hits")
435 .name(name() + ".inst_misses")
436 .desc("ITB inst misses")
440 .name(name() + ".inst_accesses")
441 .desc("ITB inst accesses")
445 .name(name() + ".read_hits")
446 .desc("DTB read hits")
450 .name(name() + ".read_misses")
451 .desc("DTB read misses")
455 .name(name() + ".read_accesses")
456 .desc("DTB read accesses")
460 .name(name() + ".write_hits")
461 .desc("DTB write hits")
465 .name(name() + ".write_misses")
466 .desc("DTB write misses")
470 .name(name() + ".write_accesses")
471 .desc("DTB write accesses")
475 .name(name() + ".hits")
480 .name(name() + ".misses")
485 .name(name() + ".accesses")
486 .desc("DTB accesses")
490 .name(name() + ".flush_tlb")
491 .desc("Number of times complete TLB was flushed")
495 .name(name() + ".flush_tlb_mva")
496 .desc("Number of times TLB was flushed by MVA")
500 .name(name() + ".flush_tlb_mva_asid")
501 .desc("Number of times TLB was flushed by MVA & ASID")
505 .name(name() + ".flush_tlb_asid")
506 .desc("Number of times TLB was flushed by ASID")
510 .name(name() + ".flush_entries")
511 .desc("Number of entries that have been flushed from TLB")
515 .name(name() + ".align_faults")
516 .desc("Number of TLB faults due to alignment restrictions")
520 .name(name() + ".prefetch_faults")
521 .desc("Number of TLB faults due to prefetch")
525 .name(name() + ".domain_faults")
526 .desc("Number of TLB faults due to domain restrictions")
530 .name(name() + ".perms_faults")
531 .desc("Number of TLB faults due to permissions restrictions")
534 instAccesses
= instHits
+ instMisses
;
535 readAccesses
= readHits
+ readMisses
;
536 writeAccesses
= writeHits
+ writeMisses
;
537 hits
= readHits
+ writeHits
+ instHits
;
538 misses
= readMisses
+ writeMisses
+ instMisses
;
539 accesses
= readAccesses
+ writeAccesses
+ instAccesses
;
543 TLB::regProbePoints()
545 ppRefills
.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
549 TLB::translateSe(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
550 Translation
*translation
, bool &delay
, bool timing
)
553 Addr vaddr_tainted
= req
->getVaddr();
556 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
, ttbcr
);
558 vaddr
= vaddr_tainted
;
559 Request::Flags flags
= req
->getFlags();
561 bool is_fetch
= (mode
== Execute
);
562 bool is_write
= (mode
== Write
);
565 assert(flags
& MustBeOne
);
566 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
567 if (vaddr
& mask(flags
& AlignmentMask
)) {
568 // LPAE is always disabled in SE mode
569 return std::make_shared
<DataAbort
>(
571 TlbEntry::DomainType::NoAccess
, is_write
,
572 ArmFault::AlignmentFault
, isStage2
,
579 Process
*p
= tc
->getProcessPtr();
581 if (!p
->pTable
->translate(vaddr
, paddr
))
582 return std::make_shared
<GenericPageTableFault
>(vaddr_tainted
);
583 req
->setPaddr(paddr
);
589 TLB::checkPermissions(TlbEntry
*te
, RequestPtr req
, Mode mode
)
591 Addr vaddr
= req
->getVaddr(); // 32-bit don't have to purify
592 Request::Flags flags
= req
->getFlags();
593 bool is_fetch
= (mode
== Execute
);
594 bool is_write
= (mode
== Write
);
595 bool is_priv
= isPriv
&& !(flags
& UserMode
);
597 // Get the translation type from the actuall table entry
598 ArmFault::TranMethod tranMethod
= te
->longDescFormat
? ArmFault::LpaeTran
599 : ArmFault::VmsaTran
;
601 // If this is the second stage of translation and the request is for a
602 // stage 1 page table walk then we need to check the HCR.PTW bit. This
603 // allows us to generate a fault if the request targets an area marked
604 // as a device or strongly ordered.
605 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
606 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
607 return std::make_shared
<DataAbort
>(
608 vaddr
, te
->domain
, is_write
,
609 ArmFault::PermissionLL
+ te
->lookupLevel
,
610 isStage2
, tranMethod
);
613 // Generate an alignment fault for unaligned data accesses to device or
614 // strongly ordered memory
616 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
617 if (vaddr
& mask(flags
& AlignmentMask
)) {
619 return std::make_shared
<DataAbort
>(
620 vaddr
, TlbEntry::DomainType::NoAccess
, is_write
,
621 ArmFault::AlignmentFault
, isStage2
,
627 if (te
->nonCacheable
) {
628 // Prevent prefetching from I/O devices.
629 if (req
->isPrefetch()) {
630 // Here we can safely use the fault status for the short
631 // desc. format in all cases
632 return std::make_shared
<PrefetchAbort
>(
633 vaddr
, ArmFault::PrefetchUncacheable
,
634 isStage2
, tranMethod
);
638 if (!te
->longDescFormat
) {
639 switch ((dacr
>> (static_cast<uint8_t>(te
->domain
) * 2)) & 0x3) {
642 DPRINTF(TLB
, "TLB Fault: Data abort on domain. DACR: %#x"
643 " domain: %#x write:%d\n", dacr
,
644 static_cast<uint8_t>(te
->domain
), is_write
);
646 // Use PC value instead of vaddr because vaddr might
647 // be aligned to cache line and should not be the
648 // address reported in FAR
649 return std::make_shared
<PrefetchAbort
>(
651 ArmFault::DomainLL
+ te
->lookupLevel
,
652 isStage2
, tranMethod
);
654 return std::make_shared
<DataAbort
>(
655 vaddr
, te
->domain
, is_write
,
656 ArmFault::DomainLL
+ te
->lookupLevel
,
657 isStage2
, tranMethod
);
659 // Continue with permissions check
662 panic("UNPRED domain\n");
668 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
669 uint8_t ap
= te
->longDescFormat
? te
->ap
<< 1 : te
->ap
;
670 uint8_t hap
= te
->hap
;
672 if (sctlr
.afe
== 1 || te
->longDescFormat
)
676 bool isWritable
= true;
677 // If this is a stage 2 access (eg for reading stage 1 page table entries)
678 // then don't perform the AP permissions check, we stil do the HAP check
685 DPRINTF(TLB
, "Access permissions 0, checking rs:%#x\n",
688 switch ((int)sctlr
.rs
) {
693 abt
= is_write
|| !is_priv
;
709 abt
= !is_priv
&& is_write
;
710 isWritable
= is_priv
;
716 panic("UNPRED premissions\n");
718 abt
= !is_priv
|| is_write
;
727 panic("Unknown permissions %#x\n", ap
);
731 bool hapAbt
= is_write
? !(hap
& 2) : !(hap
& 1);
732 bool xn
= te
->xn
|| (isWritable
&& sctlr
.wxn
) ||
733 (ap
== 3 && sctlr
.uwxn
&& is_priv
);
734 if (is_fetch
&& (abt
|| xn
||
735 (te
->longDescFormat
&& te
->pxn
&& is_priv
) ||
736 (isSecure
&& te
->ns
&& scr
.sif
))) {
738 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. AP:%d "
739 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
740 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
,sctlr
.afe
);
741 // Use PC value instead of vaddr because vaddr might be aligned to
742 // cache line and should not be the address reported in FAR
743 return std::make_shared
<PrefetchAbort
>(
745 ArmFault::PermissionLL
+ te
->lookupLevel
,
746 isStage2
, tranMethod
);
747 } else if (abt
| hapAbt
) {
749 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
750 " write:%d\n", ap
, is_priv
, is_write
);
751 return std::make_shared
<DataAbort
>(
752 vaddr
, te
->domain
, is_write
,
753 ArmFault::PermissionLL
+ te
->lookupLevel
,
754 isStage2
| !abt
, tranMethod
);
761 TLB::checkPermissions64(TlbEntry
*te
, RequestPtr req
, Mode mode
,
766 Addr vaddr_tainted
= req
->getVaddr();
767 Addr vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
, ttbcr
);
769 Request::Flags flags
= req
->getFlags();
770 bool is_fetch
= (mode
== Execute
);
771 bool is_write
= (mode
== Write
);
772 bool is_priv M5_VAR_USED
= isPriv
&& !(flags
& UserMode
);
774 updateMiscReg(tc
, curTranType
);
776 // If this is the second stage of translation and the request is for a
777 // stage 1 page table walk then we need to check the HCR.PTW bit. This
778 // allows us to generate a fault if the request targets an area marked
779 // as a device or strongly ordered.
780 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
781 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
782 return std::make_shared
<DataAbort
>(
783 vaddr_tainted
, te
->domain
, is_write
,
784 ArmFault::PermissionLL
+ te
->lookupLevel
,
785 isStage2
, ArmFault::LpaeTran
);
788 // Generate an alignment fault for unaligned accesses to device or
789 // strongly ordered memory
791 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
792 if (vaddr
& mask(flags
& AlignmentMask
)) {
794 return std::make_shared
<DataAbort
>(
796 TlbEntry::DomainType::NoAccess
, is_write
,
797 ArmFault::AlignmentFault
, isStage2
,
803 if (te
->nonCacheable
) {
804 // Prevent prefetching from I/O devices.
805 if (req
->isPrefetch()) {
806 // Here we can safely use the fault status for the short
807 // desc. format in all cases
808 return std::make_shared
<PrefetchAbort
>(
810 ArmFault::PrefetchUncacheable
,
811 isStage2
, ArmFault::LpaeTran
);
815 uint8_t ap
= 0x3 & (te
->ap
); // 2-bit access protection field
819 uint8_t pxn
= te
->pxn
;
820 bool r
= !is_write
&& !is_fetch
;
823 DPRINTF(TLBVerbose
, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
824 "w:%d, x:%d\n", ap
, xn
, pxn
, r
, w
, x
);
827 assert(ArmSystem::haveVirtualization(tc
) && aarch64EL
!= EL2
);
828 // In stage 2 we use the hypervisor access permission bits.
829 // The following permissions are described in ARM DDI 0487A.f
831 uint8_t hap
= 0x3 & te
->hap
;
833 // sctlr.wxn overrides the xn bit
834 grant
= !sctlr
.wxn
&& !xn
;
835 } else if (is_write
) {
844 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
854 grant
= r
|| w
|| (x
&& !sctlr
.wxn
);
875 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
879 grant
= r
|| w
|| (x
&& !sctlr
.wxn
);
887 // regions that are writeable at EL0 should not be
911 uint8_t perm
= (ap
& 0x2) | xn
;
914 grant
= r
|| w
|| (x
&& !sctlr
.wxn
) ;
936 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. "
937 "AP:%d priv:%d write:%d ns:%d sif:%d "
939 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
, sctlr
.afe
);
940 // Use PC value instead of vaddr because vaddr might be aligned to
941 // cache line and should not be the address reported in FAR
942 return std::make_shared
<PrefetchAbort
>(
944 ArmFault::PermissionLL
+ te
->lookupLevel
,
945 isStage2
, ArmFault::LpaeTran
);
948 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d "
949 "priv:%d write:%d\n", ap
, is_priv
, is_write
);
950 return std::make_shared
<DataAbort
>(
951 vaddr_tainted
, te
->domain
, is_write
,
952 ArmFault::PermissionLL
+ te
->lookupLevel
,
953 isStage2
, ArmFault::LpaeTran
);
961 TLB::translateFs(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
962 Translation
*translation
, bool &delay
, bool timing
,
963 TLB::ArmTranslationType tranType
, bool functional
)
965 // No such thing as a functional timing access
966 assert(!(timing
&& functional
));
968 updateMiscReg(tc
, tranType
);
970 Addr vaddr_tainted
= req
->getVaddr();
973 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
, ttbcr
);
975 vaddr
= vaddr_tainted
;
976 Request::Flags flags
= req
->getFlags();
978 bool is_fetch
= (mode
== Execute
);
979 bool is_write
= (mode
== Write
);
980 bool long_desc_format
= aarch64
|| longDescFormatInUse(tc
);
981 ArmFault::TranMethod tranMethod
= long_desc_format
? ArmFault::LpaeTran
982 : ArmFault::VmsaTran
;
986 DPRINTF(TLBVerbose
, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
987 isPriv
, flags
& UserMode
, isSecure
, tranType
& S1S2NsTran
);
989 DPRINTF(TLB
, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
990 "flags %#lx tranType 0x%x\n", vaddr_tainted
, mode
, isStage2
,
991 scr
, sctlr
, flags
, tranType
);
993 if ((req
->isInstFetch() && (!sctlr
.i
)) ||
994 ((!req
->isInstFetch()) && (!sctlr
.c
))){
995 req
->setFlags(Request::UNCACHEABLE
| Request::STRICT_ORDER
);
998 assert(flags
& MustBeOne
);
999 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
1000 if (vaddr
& mask(flags
& AlignmentMask
)) {
1002 return std::make_shared
<DataAbort
>(
1004 TlbEntry::DomainType::NoAccess
, is_write
,
1005 ArmFault::AlignmentFault
, isStage2
,
1011 // If guest MMU is off or hcr.vm=0 go straight to stage2
1012 if ((isStage2
&& !hcr
.vm
) || (!isStage2
&& !sctlr
.m
)) {
1014 req
->setPaddr(vaddr
);
1015 // When the MMU is off the security attribute corresponds to the
1016 // security state of the processor
1018 req
->setFlags(Request::SECURE
);
1020 // @todo: double check this (ARM ARM issue C B3.2.1)
1021 if (long_desc_format
|| sctlr
.tre
== 0) {
1022 req
->setFlags(Request::UNCACHEABLE
| Request::STRICT_ORDER
);
1024 if (nmrr
.ir0
== 0 || nmrr
.or0
== 0 || prrr
.tr0
!= 0x2)
1025 req
->setFlags(Request::UNCACHEABLE
| Request::STRICT_ORDER
);
1028 // Set memory attributes
1030 temp_te
.ns
= !isSecure
;
1031 if (isStage2
|| hcr
.dc
== 0 || isSecure
||
1032 (isHyp
&& !(tranType
& S1CTran
))) {
1034 temp_te
.mtype
= is_fetch
? TlbEntry::MemoryType::Normal
1035 : TlbEntry::MemoryType::StronglyOrdered
;
1036 temp_te
.innerAttrs
= 0x0;
1037 temp_te
.outerAttrs
= 0x0;
1038 temp_te
.shareable
= true;
1039 temp_te
.outerShareable
= true;
1041 temp_te
.mtype
= TlbEntry::MemoryType::Normal
;
1042 temp_te
.innerAttrs
= 0x3;
1043 temp_te
.outerAttrs
= 0x3;
1044 temp_te
.shareable
= false;
1045 temp_te
.outerShareable
= false;
1047 temp_te
.setAttributes(long_desc_format
);
1048 DPRINTF(TLBVerbose
, "(No MMU) setting memory attributes: shareable: "
1049 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1050 temp_te
.shareable
, temp_te
.innerAttrs
, temp_te
.outerAttrs
,
1052 setAttr(temp_te
.attributes
);
1054 return testTranslation(req
, mode
, TlbEntry::DomainType::NoAccess
);
1057 DPRINTF(TLBVerbose
, "Translating %s=%#x context=%d\n",
1058 isStage2
? "IPA" : "VA", vaddr_tainted
, asid
);
1059 // Translation enabled
1061 TlbEntry
*te
= NULL
;
1063 Fault fault
= getResultTe(&te
, req
, tc
, mode
, translation
, timing
,
1064 functional
, &mergeTe
);
1065 // only proceed if we have a valid table entry
1066 if ((te
== NULL
) && (fault
== NoFault
)) delay
= true;
1068 // If we have the table entry transfer some of the attributes to the
1069 // request that triggered the translation
1071 // Set memory attributes
1073 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1074 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1075 te
->shareable
, te
->innerAttrs
, te
->outerAttrs
,
1076 static_cast<uint8_t>(te
->mtype
), isStage2
);
1077 setAttr(te
->attributes
);
1079 if (te
->nonCacheable
)
1080 req
->setFlags(Request::UNCACHEABLE
);
1082 // Require requests to be ordered if the request goes to
1083 // strongly ordered or device memory (i.e., anything other
1084 // than normal memory requires strict order).
1085 if (te
->mtype
!= TlbEntry::MemoryType::Normal
)
1086 req
->setFlags(Request::STRICT_ORDER
);
1088 Addr pa
= te
->pAddr(vaddr
);
1091 if (isSecure
&& !te
->ns
) {
1092 req
->setFlags(Request::SECURE
);
1094 if ((!is_fetch
) && (vaddr
& mask(flags
& AlignmentMask
)) &&
1095 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
1096 // Unaligned accesses to Device memory should always cause an
1097 // abort regardless of sctlr.a
1099 return std::make_shared
<DataAbort
>(
1101 TlbEntry::DomainType::NoAccess
, is_write
,
1102 ArmFault::AlignmentFault
, isStage2
,
1106 // Check for a trickbox generated address fault
1107 if (fault
== NoFault
)
1108 fault
= testTranslation(req
, mode
, te
->domain
);
1111 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1112 if (fault
== NoFault
) {
1113 if (aarch64
&& is_fetch
&& cpsr
.il
== 1) {
1114 return std::make_shared
<IllegalInstSetStateFault
>();
1122 TLB::translateAtomic(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1123 TLB::ArmTranslationType tranType
)
1125 updateMiscReg(tc
, tranType
);
1127 if (directToStage2
) {
1129 return stage2Tlb
->translateAtomic(req
, tc
, mode
, tranType
);
1135 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
);
1137 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1143 TLB::translateFunctional(RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1144 TLB::ArmTranslationType tranType
)
1146 updateMiscReg(tc
, tranType
);
1148 if (directToStage2
) {
1150 return stage2Tlb
->translateFunctional(req
, tc
, mode
, tranType
);
1156 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
, true);
1158 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1164 TLB::translateTiming(RequestPtr req
, ThreadContext
*tc
,
1165 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
)
1167 updateMiscReg(tc
, tranType
);
1169 if (directToStage2
) {
1171 return stage2Tlb
->translateTiming(req
, tc
, translation
, mode
, tranType
);
1174 assert(translation
);
1176 return translateComplete(req
, tc
, translation
, mode
, tranType
, isStage2
);
1180 TLB::translateComplete(RequestPtr req
, ThreadContext
*tc
,
1181 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
,
1187 fault
= translateFs(req
, tc
, mode
, translation
, delay
, true, tranType
);
1189 fault
= translateSe(req
, tc
, mode
, translation
, delay
, true);
1190 DPRINTF(TLBVerbose
, "Translation returning delay=%d fault=%d\n", delay
, fault
!=
1192 // If we have a translation, and we're not in the middle of doing a stage
1193 // 2 translation tell the translation that we've either finished or its
1194 // going to take a while. By not doing this when we're in the middle of a
1195 // stage 2 translation we prevent marking the translation as delayed twice,
1196 // one when the translation starts and again when the stage 1 translation
1198 if (translation
&& (callFromS2
|| !stage2Req
|| req
->hasPaddr() || fault
!= NoFault
)) {
1200 translation
->finish(fault
, req
, tc
, mode
);
1202 translation
->markDelayed();
1208 TLB::getMasterPort()
1210 return &stage2Mmu
->getPort();
1214 TLB::updateMiscReg(ThreadContext
*tc
, ArmTranslationType tranType
)
1216 // check if the regs have changed, or the translation mode is different.
1217 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1218 // one type of translation anyway
1219 if (miscRegValid
&& miscRegContext
== tc
->contextId() &&
1220 ((tranType
== curTranType
) || isStage2
)) {
1224 DPRINTF(TLBVerbose
, "TLB variables changed!\n");
1225 cpsr
= tc
->readMiscReg(MISCREG_CPSR
);
1227 // Dependencies: SCR/SCR_EL3, CPSR
1228 isSecure
= inSecureState(tc
) &&
1229 !(tranType
& HypMode
) && !(tranType
& S1S2NsTran
);
1231 const OperatingMode op_mode
= (OperatingMode
) (uint8_t)cpsr
.mode
;
1232 aarch64
= opModeIs64(op_mode
) ||
1233 (opModeToEL(op_mode
) == EL0
&& ELIs64(tc
, EL1
));
1235 if (aarch64
) { // AArch64
1236 // determine EL we need to translate in
1256 aarch64EL
= (ExceptionLevel
) (uint8_t) cpsr
.el
;
1260 switch (aarch64EL
) {
1264 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL1
);
1265 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL1
);
1266 uint64_t ttbr_asid
= ttbcr
.a1
?
1267 tc
->readMiscReg(MISCREG_TTBR1_EL1
) :
1268 tc
->readMiscReg(MISCREG_TTBR0_EL1
);
1269 asid
= bits(ttbr_asid
,
1270 (haveLargeAsid64
&& ttbcr
.as
) ? 63 : 55, 48);
1274 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL2
);
1275 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL2
);
1279 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL3
);
1280 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL3
);
1284 hcr
= tc
->readMiscReg(MISCREG_HCR_EL2
);
1285 scr
= tc
->readMiscReg(MISCREG_SCR_EL3
);
1286 isPriv
= aarch64EL
!= EL0
;
1287 if (haveVirtualization
) {
1288 vmid
= bits(tc
->readMiscReg(MISCREG_VTTBR_EL2
), 55, 48);
1289 isHyp
= tranType
& HypMode
;
1290 isHyp
&= (tranType
& S1S2NsTran
) == 0;
1291 isHyp
&= (tranType
& S1CTran
) == 0;
1292 // Work out if we should skip the first stage of translation and go
1293 // directly to stage 2. This value is cached so we don't have to
1294 // compute it for every translation.
1295 stage2Req
= isStage2
||
1296 (hcr
.vm
&& !isHyp
&& !isSecure
&&
1297 !(tranType
& S1CTran
) && (aarch64EL
< EL2
) &&
1298 !(tranType
& S1E1Tran
)); // <--- FIX THIS HACK
1299 directToStage2
= !isStage2
&& stage2Req
&& !sctlr
.m
;
1303 directToStage2
= false;
1307 sctlr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR
, tc
,
1309 ttbcr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR
, tc
,
1311 scr
= tc
->readMiscReg(MISCREG_SCR
);
1312 isPriv
= cpsr
.mode
!= MODE_USER
;
1313 if (longDescFormatInUse(tc
)) {
1314 uint64_t ttbr_asid
= tc
->readMiscReg(
1315 flattenMiscRegNsBanked(ttbcr
.a1
? MISCREG_TTBR1
1318 asid
= bits(ttbr_asid
, 55, 48);
1319 } else { // Short-descriptor translation table format in use
1320 CONTEXTIDR context_id
= tc
->readMiscReg(flattenMiscRegNsBanked(
1321 MISCREG_CONTEXTIDR
, tc
,!isSecure
));
1322 asid
= context_id
.asid
;
1324 prrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR
, tc
,
1326 nmrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR
, tc
,
1328 dacr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR
, tc
,
1330 hcr
= tc
->readMiscReg(MISCREG_HCR
);
1332 if (haveVirtualization
) {
1333 vmid
= bits(tc
->readMiscReg(MISCREG_VTTBR
), 55, 48);
1334 isHyp
= cpsr
.mode
== MODE_HYP
;
1335 isHyp
|= tranType
& HypMode
;
1336 isHyp
&= (tranType
& S1S2NsTran
) == 0;
1337 isHyp
&= (tranType
& S1CTran
) == 0;
1339 sctlr
= tc
->readMiscReg(MISCREG_HSCTLR
);
1341 // Work out if we should skip the first stage of translation and go
1342 // directly to stage 2. This value is cached so we don't have to
1343 // compute it for every translation.
1344 stage2Req
= hcr
.vm
&& !isStage2
&& !isHyp
&& !isSecure
&&
1345 !(tranType
& S1CTran
);
1346 directToStage2
= stage2Req
&& !sctlr
.m
;
1351 directToStage2
= false;
1354 miscRegValid
= true;
1355 miscRegContext
= tc
->contextId();
1356 curTranType
= tranType
;
1360 TLB::getTE(TlbEntry
**te
, RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1361 Translation
*translation
, bool timing
, bool functional
,
1362 bool is_secure
, TLB::ArmTranslationType tranType
)
1364 bool is_fetch
= (mode
== Execute
);
1365 bool is_write
= (mode
== Write
);
1367 Addr vaddr_tainted
= req
->getVaddr();
1369 ExceptionLevel target_el
= aarch64
? aarch64EL
: EL1
;
1371 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, target_el
, ttbcr
);
1373 vaddr
= vaddr_tainted
;
1375 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false, target_el
);
1377 if (req
->isPrefetch()) {
1378 // if the request is a prefetch don't attempt to fill the TLB or go
1379 // any further with the memory access (here we can safely use the
1380 // fault status for the short desc. format in all cases)
1382 return std::make_shared
<PrefetchAbort
>(
1383 vaddr_tainted
, ArmFault::PrefetchTLBMiss
, isStage2
);
1393 // start translation table walk, pass variables rather than
1394 // re-retreaving in table walker for speed
1395 DPRINTF(TLB
, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1396 vaddr_tainted
, asid
, vmid
);
1398 fault
= tableWalker
->walk(req
, tc
, asid
, vmid
, isHyp
, mode
,
1399 translation
, timing
, functional
, is_secure
,
1400 tranType
, stage2Req
);
1401 // for timing mode, return and wait for table walk,
1402 if (timing
|| fault
!= NoFault
) {
1406 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false, target_el
);
1422 TLB::getResultTe(TlbEntry
**te
, RequestPtr req
, ThreadContext
*tc
, Mode mode
,
1423 Translation
*translation
, bool timing
, bool functional
,
1429 // We are already in the stage 2 TLB. Grab the table entry for stage
1430 // 2 only. We are here because stage 1 translation is disabled.
1431 TlbEntry
*s2Te
= NULL
;
1432 // Get the stage 2 table entry
1433 fault
= getTE(&s2Te
, req
, tc
, mode
, translation
, timing
, functional
,
1434 isSecure
, curTranType
);
1435 // Check permissions of stage 2
1436 if ((s2Te
!= NULL
) && (fault
= NoFault
)) {
1438 fault
= checkPermissions64(s2Te
, req
, mode
, tc
);
1440 fault
= checkPermissions(s2Te
, req
, mode
);
1446 TlbEntry
*s1Te
= NULL
;
1448 Addr vaddr_tainted
= req
->getVaddr();
1450 // Get the stage 1 table entry
1451 fault
= getTE(&s1Te
, req
, tc
, mode
, translation
, timing
, functional
,
1452 isSecure
, curTranType
);
1453 // only proceed if we have a valid table entry
1454 if ((s1Te
!= NULL
) && (fault
== NoFault
)) {
1455 // Check stage 1 permissions before checking stage 2
1457 fault
= checkPermissions64(s1Te
, req
, mode
, tc
);
1459 fault
= checkPermissions(s1Te
, req
, mode
);
1460 if (stage2Req
& (fault
== NoFault
)) {
1461 Stage2LookUp
*s2Lookup
= new Stage2LookUp(this, stage2Tlb
, *s1Te
,
1462 req
, translation
, mode
, timing
, functional
, curTranType
);
1463 fault
= s2Lookup
->getTe(tc
, mergeTe
);
1464 if (s2Lookup
->isComplete()) {
1466 // We've finished with the lookup so delete it
1469 // The lookup hasn't completed, so we can't delete it now. We
1470 // get round this by asking the object to self delete when the
1471 // translation is complete.
1472 s2Lookup
->setSelfDelete();
1475 // This case deals with an S1 hit (or bypass), followed by
1476 // an S2 hit-but-perms issue
1478 DPRINTF(TLBVerbose
, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1479 vaddr_tainted
, req
->hasPaddr() ? req
->getPaddr() : ~0, fault
);
1480 if (fault
!= NoFault
) {
1481 ArmFault
*armFault
= reinterpret_cast<ArmFault
*>(fault
.get());
1482 armFault
->annotate(ArmFault::S1PTW
, false);
1483 armFault
->annotate(ArmFault::OVA
, vaddr_tainted
);
1493 TLB::setTestInterface(SimObject
*_ti
)
1498 TlbTestInterface
*ti(dynamic_cast<TlbTestInterface
*>(_ti
));
1499 fatal_if(!ti
, "%s is not a valid ARM TLB tester\n", _ti
->name());
1505 TLB::testTranslation(RequestPtr req
, Mode mode
, TlbEntry::DomainType domain
)
1507 if (!test
|| !req
->hasSize() || req
->getSize() == 0) {
1510 return test
->translationCheck(req
, isPriv
, mode
, domain
);
1515 TLB::testWalk(Addr pa
, Addr size
, Addr va
, bool is_secure
, Mode mode
,
1516 TlbEntry::DomainType domain
, LookupLevel lookup_level
)
1521 return test
->walkCheck(pa
, size
, va
, is_secure
, isPriv
, mode
,
1522 domain
, lookup_level
);
1528 ArmTLBParams::create()
1530 return new ArmISA::TLB(this);