2 * Copyright (c) 2010-2013, 2016-2020 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "arch/arm/tlb.hh"
47 #include "arch/arm/faults.hh"
48 #include "arch/arm/isa.hh"
49 #include "arch/arm/pagetable.hh"
50 #include "arch/arm/self_debug.hh"
51 #include "arch/arm/stage2_lookup.hh"
52 #include "arch/arm/stage2_mmu.hh"
53 #include "arch/arm/system.hh"
54 #include "arch/arm/table_walker.hh"
55 #include "arch/arm/tlbi_op.hh"
56 #include "arch/arm/utility.hh"
57 #include "base/inifile.hh"
58 #include "base/str.hh"
59 #include "base/trace.hh"
60 #include "cpu/base.hh"
61 #include "cpu/thread_context.hh"
62 #include "debug/Checkpoint.hh"
63 #include "debug/TLB.hh"
64 #include "debug/TLBVerbose.hh"
65 #include "mem/packet_access.hh"
66 #include "mem/page_table.hh"
67 #include "mem/request.hh"
68 #include "params/ArmTLB.hh"
69 #include "sim/full_system.hh"
70 #include "sim/process.hh"
71 #include "sim/pseudo_inst.hh"
74 using namespace ArmISA
;
76 TLB::TLB(const ArmTLBParams
&p
)
77 : BaseTLB(p
), table(new TlbEntry
[p
.size
]), size(p
.size
),
78 isStage2(p
.is_stage2
), stage2Req(false), stage2DescReq(false), _attr(0),
79 directToStage2(false), tableWalker(p
.walker
), stage2Tlb(NULL
),
80 stage2Mmu(NULL
), test(nullptr), stats(this), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0
), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), hcr(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran
)
85 const ArmSystem
*sys
= dynamic_cast<const ArmSystem
*>(p
.sys
);
87 tableWalker
->setTlb(this);
89 // Cache system-level properties
90 haveLPAE
= tableWalker
->haveLPAE();
91 haveVirtualization
= tableWalker
->haveVirtualization();
92 haveLargeAsid64
= tableWalker
->haveLargeAsid64();
93 physAddrRange
= tableWalker
->physAddrRange();
96 m5opRange
= sys
->m5opRange();
107 if (stage2Mmu
&& !isStage2
)
108 stage2Tlb
= stage2Mmu
->stage2Tlb();
112 TLB::setMMU(Stage2MMU
*m
, RequestorID requestor_id
)
115 tableWalker
->setMMU(m
, requestor_id
);
119 TLB::translateFunctional(ThreadContext
*tc
, Addr va
, Addr
&pa
)
123 if (directToStage2
) {
125 return stage2Tlb
->translateFunctional(tc
, va
, pa
);
128 TlbEntry
*e
= lookup(va
, asid
, vmid
, isHyp
, isSecure
, true, false,
129 aarch64
? aarch64EL
: EL1
, false);
137 TLB::finalizePhysical(const RequestPtr
&req
,
138 ThreadContext
*tc
, Mode mode
) const
140 const Addr paddr
= req
->getPaddr();
142 if (m5opRange
.contains(paddr
)) {
144 PseudoInst::decodeAddrOffset(paddr
- m5opRange
.start(), func
);
145 req
->setLocalAccessor(
146 [func
, mode
](ThreadContext
*tc
, PacketPtr pkt
) -> Cycles
149 PseudoInst::pseudoInst
<PseudoInstABI
>(tc
, func
, ret
);
161 TLB::lookup(Addr va
, uint16_t asn
, uint8_t vmid
, bool hyp
, bool secure
,
162 bool functional
, bool ignore_asn
, ExceptionLevel target_el
,
166 TlbEntry
*retval
= NULL
;
168 // Maintaining LRU array
170 while (retval
== NULL
&& x
< size
) {
171 if ((!ignore_asn
&& table
[x
].match(va
, asn
, vmid
, hyp
, secure
, false,
172 target_el
, in_host
)) ||
173 (ignore_asn
&& table
[x
].match(va
, vmid
, hyp
, secure
, target_el
,
175 // We only move the hit entry ahead when the position is higher
177 if (x
> rangeMRU
&& !functional
) {
178 TlbEntry tmp_entry
= table
[x
];
179 for (int i
= x
; i
> 0; i
--)
180 table
[i
] = table
[i
- 1];
181 table
[0] = tmp_entry
;
191 DPRINTF(TLBVerbose
, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
192 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
194 va
, asn
, retval
? "hit" : "miss", vmid
, hyp
, secure
,
195 retval
? retval
->pfn
: 0, retval
? retval
->size
: 0,
196 retval
? retval
->pAddr(va
) : 0, retval
? retval
->ap
: 0,
197 retval
? retval
->ns
: 0, retval
? retval
->nstid
: 0,
198 retval
? retval
->global
: 0, retval
? retval
->asid
: 0,
199 retval
? retval
->el
: 0);
204 // insert a new TLB entry
206 TLB::insert(Addr addr
, TlbEntry
&entry
)
208 DPRINTF(TLB
, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
209 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
210 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry
.pfn
,
211 entry
.size
, entry
.vpn
, entry
.asid
, entry
.vmid
, entry
.N
,
212 entry
.global
, entry
.valid
, entry
.nonCacheable
, entry
.xn
,
213 entry
.ap
, static_cast<uint8_t>(entry
.domain
), entry
.ns
, entry
.nstid
,
216 if (table
[size
- 1].valid
)
217 DPRINTF(TLB
, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
218 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
219 table
[size
-1].vpn
<< table
[size
-1].N
, table
[size
-1].asid
,
220 table
[size
-1].vmid
, table
[size
-1].pfn
<< table
[size
-1].N
,
221 table
[size
-1].size
, table
[size
-1].ap
, table
[size
-1].ns
,
222 table
[size
-1].nstid
, table
[size
-1].global
, table
[size
-1].isHyp
,
225 //inserting to MRU position and evicting the LRU one
227 for (int i
= size
- 1; i
> 0; --i
)
228 table
[i
] = table
[i
-1];
232 ppRefills
->notify(1);
236 TLB::printTlb() const
240 DPRINTF(TLB
, "Current TLB contents:\n");
244 DPRINTF(TLB
, " * %s\n", te
->print());
252 DPRINTF(TLB
, "Flushing all TLB entries\n");
258 DPRINTF(TLB
, " - %s\n", te
->print());
260 stats
.flushedEntries
++;
266 // If there's a second stage TLB (and we're not it) then flush it as well
268 stage2Tlb
->flushAll();
273 TLB::flush(const TLBIALL
& tlbi_op
)
275 DPRINTF(TLB
, "Flushing all TLB entries (%s lookup)\n",
276 (tlbi_op
.secureLookup
? "secure" : "non-secure"));
281 const bool el_match
= te
->checkELMatch(
282 tlbi_op
.targetEL
, tlbi_op
.inHost
);
283 if (te
->valid
&& tlbi_op
.secureLookup
== !te
->nstid
&&
284 (te
->vmid
== vmid
|| tlbi_op
.el2Enabled
) && el_match
) {
286 DPRINTF(TLB
, " - %s\n", te
->print());
288 stats
.flushedEntries
++;
295 // If there's a second stage TLB (and we're not it) then flush it as well
296 // if we're currently in hyp mode
297 if (!isStage2
&& isHyp
) {
298 stage2Tlb
->flush(tlbi_op
.makeStage2());
303 TLB::flush(const TLBIALLEL
&tlbi_op
)
305 DPRINTF(TLB
, "Flushing all TLB entries (%s lookup)\n",
306 (tlbi_op
.secureLookup
? "secure" : "non-secure"));
311 const bool el_match
= te
->checkELMatch(
312 tlbi_op
.targetEL
, tlbi_op
.inHost
);
313 if (te
->valid
&& tlbi_op
.secureLookup
== !te
->nstid
&& el_match
) {
315 DPRINTF(TLB
, " - %s\n", te
->print());
317 stats
.flushedEntries
++;
324 // If there's a second stage TLB (and we're not it)
325 // and if we're targeting EL1
326 // then flush it as well
327 if (!isStage2
&& tlbi_op
.targetEL
== EL1
) {
328 stage2Tlb
->flush(tlbi_op
.makeStage2());
333 TLB::flush(const TLBIALLN
&tlbi_op
)
335 bool hyp
= tlbi_op
.targetEL
== EL2
;
337 DPRINTF(TLB
, "Flushing all NS TLB entries (%s lookup)\n",
338 (hyp
? "hyp" : "non-hyp"));
343 const bool el_match
= te
->checkELMatch(tlbi_op
.targetEL
, false);
345 if (te
->valid
&& te
->nstid
&& te
->isHyp
== hyp
&& el_match
) {
347 DPRINTF(TLB
, " - %s\n", te
->print());
348 stats
.flushedEntries
++;
356 // If there's a second stage TLB (and we're not it) then flush it as well
357 if (!isStage2
&& !hyp
) {
358 stage2Tlb
->flush(tlbi_op
.makeStage2());
363 TLB::flush(const TLBIMVA
&tlbi_op
)
365 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x, asid: %#x "
366 "(%s lookup)\n", tlbi_op
.addr
, tlbi_op
.asid
,
367 (tlbi_op
.secureLookup
? "secure" : "non-secure"));
368 _flushMva(tlbi_op
.addr
, tlbi_op
.asid
, tlbi_op
.secureLookup
, false,
369 tlbi_op
.targetEL
, tlbi_op
.inHost
);
370 stats
.flushTlbMvaAsid
++;
374 TLB::flush(const TLBIASID
&tlbi_op
)
376 DPRINTF(TLB
, "Flushing TLB entries with asid: %#x (%s lookup)\n",
377 tlbi_op
.asid
, (tlbi_op
.secureLookup
? "secure" : "non-secure"));
384 if (te
->valid
&& te
->asid
== tlbi_op
.asid
&&
385 tlbi_op
.secureLookup
== !te
->nstid
&&
386 (te
->vmid
== vmid
|| tlbi_op
.el2Enabled
) &&
387 te
->checkELMatch(tlbi_op
.targetEL
, tlbi_op
.inHost
)) {
390 DPRINTF(TLB
, " - %s\n", te
->print());
391 stats
.flushedEntries
++;
395 stats
.flushTlbAsid
++;
399 TLB::flush(const TLBIMVAA
&tlbi_op
) {
401 DPRINTF(TLB
, "Flushing TLB entries with mva: %#x (%s lookup)\n",
403 (tlbi_op
.secureLookup
? "secure" : "non-secure"));
404 _flushMva(tlbi_op
.addr
, 0xbeef, tlbi_op
.secureLookup
, true,
405 tlbi_op
.targetEL
, tlbi_op
.inHost
);
410 TLB::_flushMva(Addr mva
, uint64_t asn
, bool secure_lookup
,
411 bool ignore_asn
, ExceptionLevel target_el
, bool in_host
)
414 // D5.7.2: Sign-extend address to 64 bits
417 bool hyp
= target_el
== EL2
;
419 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
422 if (secure_lookup
== !te
->nstid
) {
423 DPRINTF(TLB
, " - %s\n", te
->print());
425 stats
.flushedEntries
++;
427 te
= lookup(mva
, asn
, vmid
, hyp
, secure_lookup
, false, ignore_asn
,
433 TLB::flush(const TLBIIPA
&tlbi_op
)
436 stage2Tlb
->_flushMva(tlbi_op
.addr
, 0xbeef, tlbi_op
.secureLookup
,
437 true, tlbi_op
.targetEL
, false);
443 // We might have unserialized something or switched CPUs, so make
444 // sure to re-read the misc regs.
445 miscRegValid
= false;
449 TLB::takeOverFrom(BaseTLB
*_otlb
)
451 TLB
*otlb
= dynamic_cast<TLB
*>(_otlb
);
452 /* Make sure we actually have a valid type */
455 haveLPAE
= otlb
->haveLPAE
;
456 directToStage2
= otlb
->directToStage2
;
457 stage2Req
= otlb
->stage2Req
;
458 stage2DescReq
= otlb
->stage2DescReq
;
460 /* Sync the stage2 MMU if they exist in both
461 * the old CPU and the new
464 stage2Tlb
&& otlb
->stage2Tlb
) {
465 stage2Tlb
->takeOverFrom(otlb
->stage2Tlb
);
468 panic("Incompatible TLB type!");
472 TLB::TlbStats::TlbStats(Stats::Group
*parent
)
473 : Stats::Group(parent
),
474 ADD_STAT(instHits
,"ITB inst hits"),
475 ADD_STAT(instMisses
, "ITB inst misses"),
476 ADD_STAT(readHits
, "DTB read hits"),
477 ADD_STAT(readMisses
, "DTB read misses"),
478 ADD_STAT(writeHits
, "DTB write hits"),
479 ADD_STAT(writeMisses
, "DTB write misses"),
480 ADD_STAT(inserts
, "Number of times an entry is inserted into the TLB"),
481 ADD_STAT(flushTlb
, "Number of times complete TLB was flushed"),
482 ADD_STAT(flushTlbMva
, "Number of times TLB was flushed by MVA"),
483 ADD_STAT(flushTlbMvaAsid
, "Number of times TLB was flushed by MVA & ASID"),
484 ADD_STAT(flushTlbAsid
, "Number of times TLB was flushed by ASID"),
485 ADD_STAT(flushedEntries
, "Number of entries that have been flushed"
487 ADD_STAT(alignFaults
, "Number of TLB faults due to alignment"
489 ADD_STAT(prefetchFaults
, "Number of TLB faults due to prefetch"),
490 ADD_STAT(domainFaults
, "Number of TLB faults due to domain restrictions"),
491 ADD_STAT(permsFaults
, "Number of TLB faults due to permissions"
493 ADD_STAT(readAccesses
, "DTB read accesses", readHits
+ readMisses
),
494 ADD_STAT(writeAccesses
, "DTB write accesses", writeHits
+ writeMisses
),
495 ADD_STAT(instAccesses
, "ITB inst accesses", instHits
+ instMisses
),
496 ADD_STAT(hits
, "Total TLB (inst and data) hits",
497 readHits
+ writeHits
+ instHits
),
498 ADD_STAT(misses
, "Total TLB (inst and data) misses",
499 readMisses
+ writeMisses
+ instMisses
),
500 ADD_STAT(accesses
, "Total TLB (inst and data) accesses",
501 readAccesses
+ writeAccesses
+ instAccesses
)
506 TLB::regProbePoints()
508 ppRefills
.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
512 TLB::translateSe(const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
513 Translation
*translation
, bool &delay
, bool timing
)
516 Addr vaddr_tainted
= req
->getVaddr();
519 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
, (TCR
)ttbcr
,
522 vaddr
= vaddr_tainted
;
523 Request::Flags flags
= req
->getFlags();
525 bool is_fetch
= (mode
== Execute
);
526 bool is_write
= (mode
== Write
);
529 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
530 if (vaddr
& mask(flags
& AlignmentMask
)) {
531 // LPAE is always disabled in SE mode
532 return std::make_shared
<DataAbort
>(
534 TlbEntry::DomainType::NoAccess
, is_write
,
535 ArmFault::AlignmentFault
, isStage2
,
542 Process
*p
= tc
->getProcessPtr();
544 if (!p
->pTable
->translate(vaddr
, paddr
))
545 return std::make_shared
<GenericPageTableFault
>(vaddr_tainted
);
546 req
->setPaddr(paddr
);
548 return finalizePhysical(req
, tc
, mode
);
552 TLB::checkPermissions(TlbEntry
*te
, const RequestPtr
&req
, Mode mode
)
554 // a data cache maintenance instruction that operates by MVA does
555 // not generate a Data Abort exeception due to a Permission fault
556 if (req
->isCacheMaintenance()) {
560 Addr vaddr
= req
->getVaddr(); // 32-bit don't have to purify
561 Request::Flags flags
= req
->getFlags();
562 bool is_fetch
= (mode
== Execute
);
563 bool is_write
= (mode
== Write
);
564 bool is_priv
= isPriv
&& !(flags
& UserMode
);
566 // Get the translation type from the actuall table entry
567 ArmFault::TranMethod tranMethod
= te
->longDescFormat
? ArmFault::LpaeTran
568 : ArmFault::VmsaTran
;
570 // If this is the second stage of translation and the request is for a
571 // stage 1 page table walk then we need to check the HCR.PTW bit. This
572 // allows us to generate a fault if the request targets an area marked
573 // as a device or strongly ordered.
574 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
575 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
576 return std::make_shared
<DataAbort
>(
577 vaddr
, te
->domain
, is_write
,
578 ArmFault::PermissionLL
+ te
->lookupLevel
,
579 isStage2
, tranMethod
);
582 // Generate an alignment fault for unaligned data accesses to device or
583 // strongly ordered memory
585 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
586 if (vaddr
& mask(flags
& AlignmentMask
)) {
588 return std::make_shared
<DataAbort
>(
589 vaddr
, TlbEntry::DomainType::NoAccess
, is_write
,
590 ArmFault::AlignmentFault
, isStage2
,
596 if (te
->nonCacheable
) {
597 // Prevent prefetching from I/O devices.
598 if (req
->isPrefetch()) {
599 // Here we can safely use the fault status for the short
600 // desc. format in all cases
601 return std::make_shared
<PrefetchAbort
>(
602 vaddr
, ArmFault::PrefetchUncacheable
,
603 isStage2
, tranMethod
);
607 if (!te
->longDescFormat
) {
608 switch ((dacr
>> (static_cast<uint8_t>(te
->domain
) * 2)) & 0x3) {
610 stats
.domainFaults
++;
611 DPRINTF(TLB
, "TLB Fault: Data abort on domain. DACR: %#x"
612 " domain: %#x write:%d\n", dacr
,
613 static_cast<uint8_t>(te
->domain
), is_write
);
615 // Use PC value instead of vaddr because vaddr might
616 // be aligned to cache line and should not be the
617 // address reported in FAR
618 return std::make_shared
<PrefetchAbort
>(
620 ArmFault::DomainLL
+ te
->lookupLevel
,
621 isStage2
, tranMethod
);
623 return std::make_shared
<DataAbort
>(
624 vaddr
, te
->domain
, is_write
,
625 ArmFault::DomainLL
+ te
->lookupLevel
,
626 isStage2
, tranMethod
);
628 // Continue with permissions check
631 panic("UNPRED domain\n");
637 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
638 uint8_t ap
= te
->longDescFormat
? te
->ap
<< 1 : te
->ap
;
639 uint8_t hap
= te
->hap
;
641 if (sctlr
.afe
== 1 || te
->longDescFormat
)
645 bool isWritable
= true;
646 // If this is a stage 2 access (eg for reading stage 1 page table entries)
647 // then don't perform the AP permissions check, we stil do the HAP check
654 DPRINTF(TLB
, "Access permissions 0, checking rs:%#x\n",
657 switch ((int)sctlr
.rs
) {
662 abt
= is_write
|| !is_priv
;
678 abt
= !is_priv
&& is_write
;
679 isWritable
= is_priv
;
685 panic("UNPRED premissions\n");
687 abt
= !is_priv
|| is_write
;
696 panic("Unknown permissions %#x\n", ap
);
700 bool hapAbt
= is_write
? !(hap
& 2) : !(hap
& 1);
701 bool xn
= te
->xn
|| (isWritable
&& sctlr
.wxn
) ||
702 (ap
== 3 && sctlr
.uwxn
&& is_priv
);
703 if (is_fetch
&& (abt
|| xn
||
704 (te
->longDescFormat
&& te
->pxn
&& is_priv
) ||
705 (isSecure
&& te
->ns
&& scr
.sif
))) {
707 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. AP:%d "
708 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
709 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
,sctlr
.afe
);
710 // Use PC value instead of vaddr because vaddr might be aligned to
711 // cache line and should not be the address reported in FAR
712 return std::make_shared
<PrefetchAbort
>(
714 ArmFault::PermissionLL
+ te
->lookupLevel
,
715 isStage2
, tranMethod
);
716 } else if (abt
| hapAbt
) {
718 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
719 " write:%d\n", ap
, is_priv
, is_write
);
720 return std::make_shared
<DataAbort
>(
721 vaddr
, te
->domain
, is_write
,
722 ArmFault::PermissionLL
+ te
->lookupLevel
,
723 isStage2
| !abt
, tranMethod
);
730 TLB::checkPermissions64(TlbEntry
*te
, const RequestPtr
&req
, Mode mode
,
735 // A data cache maintenance instruction that operates by VA does
736 // not generate a Permission fault unless:
737 // * It is a data cache invalidate (dc ivac) which requires write
738 // permissions to the VA, or
739 // * It is executed from EL0
740 if (req
->isCacheClean() && aarch64EL
!= EL0
&& !isStage2
) {
744 Addr vaddr_tainted
= req
->getVaddr();
745 Addr vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
, (TCR
)ttbcr
,
748 Request::Flags flags
= req
->getFlags();
749 bool is_fetch
= (mode
== Execute
);
750 // Cache clean operations require read permissions to the specified VA
751 bool is_write
= !req
->isCacheClean() && mode
== Write
;
752 bool is_atomic
= req
->isAtomic();
753 M5_VAR_USED
bool is_priv
= isPriv
&& !(flags
& UserMode
);
755 updateMiscReg(tc
, curTranType
);
757 // If this is the second stage of translation and the request is for a
758 // stage 1 page table walk then we need to check the HCR.PTW bit. This
759 // allows us to generate a fault if the request targets an area marked
760 // as a device or strongly ordered.
761 if (isStage2
&& req
->isPTWalk() && hcr
.ptw
&&
762 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
763 return std::make_shared
<DataAbort
>(
764 vaddr_tainted
, te
->domain
, is_write
,
765 ArmFault::PermissionLL
+ te
->lookupLevel
,
766 isStage2
, ArmFault::LpaeTran
);
769 // Generate an alignment fault for unaligned accesses to device or
770 // strongly ordered memory
772 if (te
->mtype
!= TlbEntry::MemoryType::Normal
) {
773 if (vaddr
& mask(flags
& AlignmentMask
)) {
775 return std::make_shared
<DataAbort
>(
777 TlbEntry::DomainType::NoAccess
,
778 is_atomic
? false : is_write
,
779 ArmFault::AlignmentFault
, isStage2
,
785 if (te
->nonCacheable
) {
786 // Prevent prefetching from I/O devices.
787 if (req
->isPrefetch()) {
788 // Here we can safely use the fault status for the short
789 // desc. format in all cases
790 return std::make_shared
<PrefetchAbort
>(
792 ArmFault::PrefetchUncacheable
,
793 isStage2
, ArmFault::LpaeTran
);
797 uint8_t ap
= 0x3 & (te
->ap
); // 2-bit access protection field
800 bool wxn
= sctlr
.wxn
;
802 uint8_t pxn
= te
->pxn
;
803 bool r
= (!is_write
&& !is_fetch
);
807 if (ArmSystem::haveEL(tc
, EL3
) && isSecure
&& te
->ns
&& scr
.sif
)
810 // grant_read is used for faults from an atomic instruction that
811 // both reads and writes from a memory location. From a ISS point
812 // of view they count as read if a read to that address would have
813 // generated the fault; they count as writes otherwise
814 bool grant_read
= true;
815 DPRINTF(TLBVerbose
, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
816 "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap
, xn
,
817 pxn
, r
, w
, x
, is_priv
, wxn
);
820 assert(ArmSystem::haveVirtualization(tc
) && aarch64EL
!= EL2
);
821 // In stage 2 we use the hypervisor access permission bits.
822 // The following permissions are described in ARM DDI 0487A.f
824 uint8_t hap
= 0x3 & te
->hap
;
825 grant_read
= hap
& 0x1;
827 // sctlr.wxn overrides the xn bit
829 } else if (is_atomic
) {
832 } else if (is_write
) {
841 grant_read
= ap
& 0x1;
842 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
852 grant
= r
|| w
|| (x
&& !wxn
);
873 if (checkPAN(tc
, ap
, req
, mode
)) {
879 uint8_t perm
= (ap
<< 2) | (xn
<< 1) | pxn
;
883 grant
= r
|| w
|| (x
&& !wxn
);
891 // regions that are writeable at EL0 should not be
913 if (hcr
.e2h
&& checkPAN(tc
, ap
, req
, mode
)) {
921 uint8_t perm
= (ap
& 0x2) | xn
;
924 grant
= r
|| w
|| (x
&& !wxn
);
946 DPRINTF(TLB
, "TLB Fault: Prefetch abort on permission check. "
947 "AP:%d priv:%d write:%d ns:%d sif:%d "
949 ap
, is_priv
, is_write
, te
->ns
, scr
.sif
, sctlr
.afe
);
950 // Use PC value instead of vaddr because vaddr might be aligned to
951 // cache line and should not be the address reported in FAR
952 return std::make_shared
<PrefetchAbort
>(
954 ArmFault::PermissionLL
+ te
->lookupLevel
,
955 isStage2
, ArmFault::LpaeTran
);
958 DPRINTF(TLB
, "TLB Fault: Data abort on permission check. AP:%d "
959 "priv:%d write:%d\n", ap
, is_priv
, is_write
);
960 return std::make_shared
<DataAbort
>(
961 vaddr_tainted
, te
->domain
,
962 (is_atomic
&& !grant_read
) ? false : is_write
,
963 ArmFault::PermissionLL
+ te
->lookupLevel
,
964 isStage2
, ArmFault::LpaeTran
);
972 TLB::checkPAN(ThreadContext
*tc
, uint8_t ap
, const RequestPtr
&req
, Mode mode
)
974 // The PAN bit has no effect on:
975 // 1) Instruction accesses.
976 // 2) Data Cache instructions other than DC ZVA
977 // 3) Address translation instructions, other than ATS1E1RP and
978 // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
980 // 4) Unprivileged instructions (Unimplemented in gem5)
981 AA64MMFR1 mmfr1
= tc
->readMiscReg(MISCREG_ID_AA64MMFR1_EL1
);
982 if (mmfr1
.pan
&& cpsr
.pan
&& (ap
& 0x1) && mode
!= Execute
&&
983 (!req
->isCacheMaintenance() ||
984 (req
->getFlags() & Request::CACHE_BLOCK_ZERO
))) {
992 TLB::translateMmuOff(ThreadContext
*tc
, const RequestPtr
&req
, Mode mode
,
993 TLB::ArmTranslationType tranType
, Addr vaddr
, bool long_desc_format
)
995 bool is_fetch
= (mode
== Execute
);
996 bool is_atomic
= req
->isAtomic();
997 req
->setPaddr(vaddr
);
998 // When the MMU is off the security attribute corresponds to the
999 // security state of the processor
1001 req
->setFlags(Request::SECURE
);
1004 bool selbit
= bits(vaddr
, 55);
1005 TCR tcr1
= tc
->readMiscReg(MISCREG_TCR_EL1
);
1006 int topbit
= computeAddrTop(tc
, selbit
, is_fetch
, tcr1
, currEL(tc
));
1007 int addr_sz
= bits(vaddr
, topbit
, physAddrRange
);
1011 f
= std::make_shared
<PrefetchAbort
>(vaddr
,
1012 ArmFault::AddressSizeLL
, isStage2
, ArmFault::LpaeTran
);
1014 f
= std::make_shared
<DataAbort
>( vaddr
,
1015 TlbEntry::DomainType::NoAccess
,
1016 is_atomic
? false : mode
==Write
,
1017 ArmFault::AddressSizeLL
, isStage2
, ArmFault::LpaeTran
);
1022 // @todo: double check this (ARM ARM issue C B3.2.1)
1023 if (long_desc_format
|| sctlr
.tre
== 0 || nmrr
.ir0
== 0 ||
1024 nmrr
.or0
== 0 || prrr
.tr0
!= 0x2) {
1025 if (!req
->isCacheMaintenance()) {
1026 req
->setFlags(Request::UNCACHEABLE
);
1028 req
->setFlags(Request::STRICT_ORDER
);
1031 // Set memory attributes
1033 temp_te
.ns
= !isSecure
;
1034 bool dc
= (HaveVirtHostExt(tc
)
1035 && hcr
.e2h
== 1 && hcr
.tge
== 1) ? 0: hcr
.dc
;
1036 bool i_cacheability
= sctlr
.i
&& !sctlr
.m
;
1037 if (isStage2
|| !dc
|| isSecure
||
1038 (isHyp
&& !(tranType
& S1CTran
))) {
1040 temp_te
.mtype
= is_fetch
? TlbEntry::MemoryType::Normal
1041 : TlbEntry::MemoryType::StronglyOrdered
;
1042 temp_te
.innerAttrs
= i_cacheability
? 0x2: 0x0;
1043 temp_te
.outerAttrs
= i_cacheability
? 0x2: 0x0;
1044 temp_te
.shareable
= true;
1045 temp_te
.outerShareable
= true;
1047 temp_te
.mtype
= TlbEntry::MemoryType::Normal
;
1048 temp_te
.innerAttrs
= 0x3;
1049 temp_te
.outerAttrs
= 0x3;
1050 temp_te
.shareable
= false;
1051 temp_te
.outerShareable
= false;
1053 temp_te
.setAttributes(long_desc_format
);
1054 DPRINTF(TLBVerbose
, "(No MMU) setting memory attributes: shareable: "
1055 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1056 temp_te
.shareable
, temp_te
.innerAttrs
, temp_te
.outerAttrs
,
1058 setAttr(temp_te
.attributes
);
1060 return testTranslation(req
, mode
, TlbEntry::DomainType::NoAccess
);
1064 TLB::translateMmuOn(ThreadContext
* tc
, const RequestPtr
&req
, Mode mode
,
1065 Translation
*translation
, bool &delay
, bool timing
,
1066 bool functional
, Addr vaddr
,
1067 ArmFault::TranMethod tranMethod
)
1069 TlbEntry
*te
= NULL
;
1070 bool is_fetch
= (mode
== Execute
);
1073 Request::Flags flags
= req
->getFlags();
1074 Addr vaddr_tainted
= req
->getVaddr();
1076 Fault fault
= getResultTe(&te
, req
, tc
, mode
, translation
, timing
,
1077 functional
, &mergeTe
);
1078 // only proceed if we have a valid table entry
1079 if ((te
== NULL
) && (fault
== NoFault
)) delay
= true;
1081 // If we have the table entry transfer some of the attributes to the
1082 // request that triggered the translation
1084 // Set memory attributes
1086 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1087 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1088 te
->shareable
, te
->innerAttrs
, te
->outerAttrs
,
1089 static_cast<uint8_t>(te
->mtype
), isStage2
);
1090 setAttr(te
->attributes
);
1092 if (te
->nonCacheable
&& !req
->isCacheMaintenance())
1093 req
->setFlags(Request::UNCACHEABLE
);
1095 // Require requests to be ordered if the request goes to
1096 // strongly ordered or device memory (i.e., anything other
1097 // than normal memory requires strict order).
1098 if (te
->mtype
!= TlbEntry::MemoryType::Normal
)
1099 req
->setFlags(Request::STRICT_ORDER
);
1101 Addr pa
= te
->pAddr(vaddr
);
1104 if (isSecure
&& !te
->ns
) {
1105 req
->setFlags(Request::SECURE
);
1107 if (!is_fetch
&& fault
== NoFault
&&
1108 (vaddr
& mask(flags
& AlignmentMask
)) &&
1109 (te
->mtype
!= TlbEntry::MemoryType::Normal
)) {
1110 // Unaligned accesses to Device memory should always cause an
1111 // abort regardless of sctlr.a
1112 stats
.alignFaults
++;
1113 bool is_write
= (mode
== Write
);
1114 return std::make_shared
<DataAbort
>(
1116 TlbEntry::DomainType::NoAccess
, is_write
,
1117 ArmFault::AlignmentFault
, isStage2
,
1121 // Check for a trickbox generated address fault
1122 if (fault
== NoFault
)
1123 fault
= testTranslation(req
, mode
, te
->domain
);
1126 if (fault
== NoFault
) {
1127 // Don't try to finalize a physical address unless the
1128 // translation has completed (i.e., there is a table entry).
1129 return te
? finalizePhysical(req
, tc
, mode
) : NoFault
;
1136 TLB::translateFs(const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
1137 Translation
*translation
, bool &delay
, bool timing
,
1138 TLB::ArmTranslationType tranType
, bool functional
)
1140 // No such thing as a functional timing access
1141 assert(!(timing
&& functional
));
1143 updateMiscReg(tc
, tranType
);
1145 Addr vaddr_tainted
= req
->getVaddr();
1148 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, aarch64EL
, (TCR
)ttbcr
,
1151 vaddr
= vaddr_tainted
;
1152 Request::Flags flags
= req
->getFlags();
1154 bool is_fetch
= (mode
== Execute
);
1155 bool is_write
= (mode
== Write
);
1156 bool long_desc_format
= aarch64
|| longDescFormatInUse(tc
);
1157 ArmFault::TranMethod tranMethod
= long_desc_format
? ArmFault::LpaeTran
1158 : ArmFault::VmsaTran
;
1161 "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1162 isPriv
, flags
& UserMode
, isSecure
, tranType
& S1S2NsTran
);
1164 DPRINTF(TLB
, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1165 "flags %#lx tranType 0x%x\n", vaddr_tainted
, mode
, isStage2
,
1166 scr
, sctlr
, flags
, tranType
);
1168 if ((req
->isInstFetch() && (!sctlr
.i
)) ||
1169 ((!req
->isInstFetch()) && (!sctlr
.c
))){
1170 if (!req
->isCacheMaintenance()) {
1171 req
->setFlags(Request::UNCACHEABLE
);
1173 req
->setFlags(Request::STRICT_ORDER
);
1176 if (sctlr
.a
|| !(flags
& AllowUnaligned
)) {
1177 if (vaddr
& mask(flags
& AlignmentMask
)) {
1178 stats
.alignFaults
++;
1179 return std::make_shared
<DataAbort
>(
1181 TlbEntry::DomainType::NoAccess
, is_write
,
1182 ArmFault::AlignmentFault
, isStage2
,
1189 if (HaveVirtHostExt(tc
) && hcr
.e2h
== 1 && hcr
.tge
==1)
1191 else if (hcr
.dc
== 1)
1194 Fault fault
= NoFault
;
1195 // If guest MMU is off or hcr.vm=0 go straight to stage2
1196 if ((isStage2
&& !vm
) || (!isStage2
&& !sctlr
.m
)) {
1197 fault
= translateMmuOff(tc
, req
, mode
, tranType
, vaddr
,
1200 DPRINTF(TLBVerbose
, "Translating %s=%#x context=%d\n",
1201 isStage2
? "IPA" : "VA", vaddr_tainted
, asid
);
1202 // Translation enabled
1203 fault
= translateMmuOn(tc
, req
, mode
, translation
, delay
, timing
,
1204 functional
, vaddr
, tranMethod
);
1207 // Check for Debug Exceptions
1208 SelfDebug
*sd
= ArmISA::ISA::getSelfDebug(tc
);
1210 if (sd
->enabled() && fault
== NoFault
) {
1211 fault
= sd
->testDebug(tc
, req
, mode
);
1218 TLB::translateAtomic(const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
1219 TLB::ArmTranslationType tranType
)
1221 updateMiscReg(tc
, tranType
);
1223 if (directToStage2
) {
1225 return stage2Tlb
->translateAtomic(req
, tc
, mode
, tranType
);
1231 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
);
1233 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1239 TLB::translateFunctional(const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
1240 TLB::ArmTranslationType tranType
)
1242 updateMiscReg(tc
, tranType
);
1244 if (directToStage2
) {
1246 return stage2Tlb
->translateFunctional(req
, tc
, mode
, tranType
);
1252 fault
= translateFs(req
, tc
, mode
, NULL
, delay
, false, tranType
, true);
1254 fault
= translateSe(req
, tc
, mode
, NULL
, delay
, false);
1260 TLB::translateTiming(const RequestPtr
&req
, ThreadContext
*tc
,
1261 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
)
1263 updateMiscReg(tc
, tranType
);
1265 if (directToStage2
) {
1267 stage2Tlb
->translateTiming(req
, tc
, translation
, mode
, tranType
);
1271 assert(translation
);
1273 translateComplete(req
, tc
, translation
, mode
, tranType
, isStage2
);
1277 TLB::translateComplete(const RequestPtr
&req
, ThreadContext
*tc
,
1278 Translation
*translation
, Mode mode
, TLB::ArmTranslationType tranType
,
1284 fault
= translateFs(req
, tc
, mode
, translation
, delay
, true, tranType
);
1286 fault
= translateSe(req
, tc
, mode
, translation
, delay
, true);
1287 DPRINTF(TLBVerbose
, "Translation returning delay=%d fault=%d\n", delay
, fault
!=
1289 // If we have a translation, and we're not in the middle of doing a stage
1290 // 2 translation tell the translation that we've either finished or its
1291 // going to take a while. By not doing this when we're in the middle of a
1292 // stage 2 translation we prevent marking the translation as delayed twice,
1293 // one when the translation starts and again when the stage 1 translation
1296 if (translation
&& (callFromS2
|| !stage2Req
|| req
->hasPaddr() ||
1297 fault
!= NoFault
)) {
1299 translation
->finish(fault
, req
, tc
, mode
);
1301 translation
->markDelayed();
1307 TLB::getTableWalkerPort()
1309 return &stage2Mmu
->getDMAPort();
1313 TLB::updateMiscReg(ThreadContext
*tc
, ArmTranslationType tranType
)
1315 // check if the regs have changed, or the translation mode is different.
1316 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1317 // one type of translation anyway
1318 if (miscRegValid
&& miscRegContext
== tc
->contextId() &&
1319 ((tranType
== curTranType
) || isStage2
)) {
1323 DPRINTF(TLBVerbose
, "TLB variables changed!\n");
1324 cpsr
= tc
->readMiscReg(MISCREG_CPSR
);
1326 // Dependencies: SCR/SCR_EL3, CPSR
1327 isSecure
= ArmISA::isSecure(tc
) &&
1328 !(tranType
& HypMode
) && !(tranType
& S1S2NsTran
);
1330 aarch64EL
= tranTypeEL(cpsr
, tranType
);
1331 aarch64
= isStage2
?
1333 ELIs64(tc
, aarch64EL
== EL0
? EL1
: aarch64EL
);
1335 hcr
= tc
->readMiscReg(MISCREG_HCR_EL2
);
1336 if (aarch64
) { // AArch64
1337 // determine EL we need to translate in
1338 switch (aarch64EL
) {
1340 if (HaveVirtHostExt(tc
) && hcr
.tge
== 1 && hcr
.e2h
== 1) {
1341 // VHE code for EL2&0 regime
1342 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL2
);
1343 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL2
);
1344 uint64_t ttbr_asid
= ttbcr
.a1
?
1345 tc
->readMiscReg(MISCREG_TTBR1_EL2
) :
1346 tc
->readMiscReg(MISCREG_TTBR0_EL2
);
1347 asid
= bits(ttbr_asid
,
1348 (haveLargeAsid64
&& ttbcr
.as
) ? 63 : 55, 48);
1351 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL1
);
1352 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL1
);
1353 uint64_t ttbr_asid
= ttbcr
.a1
?
1354 tc
->readMiscReg(MISCREG_TTBR1_EL1
) :
1355 tc
->readMiscReg(MISCREG_TTBR0_EL1
);
1356 asid
= bits(ttbr_asid
,
1357 (haveLargeAsid64
&& ttbcr
.as
) ? 63 : 55, 48);
1363 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL1
);
1364 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL1
);
1365 uint64_t ttbr_asid
= ttbcr
.a1
?
1366 tc
->readMiscReg(MISCREG_TTBR1_EL1
) :
1367 tc
->readMiscReg(MISCREG_TTBR0_EL1
);
1368 asid
= bits(ttbr_asid
,
1369 (haveLargeAsid64
&& ttbcr
.as
) ? 63 : 55, 48);
1373 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL2
);
1374 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL2
);
1376 // VHE code for EL2&0 regime
1377 uint64_t ttbr_asid
= ttbcr
.a1
?
1378 tc
->readMiscReg(MISCREG_TTBR1_EL2
) :
1379 tc
->readMiscReg(MISCREG_TTBR0_EL2
);
1380 asid
= bits(ttbr_asid
,
1381 (haveLargeAsid64
&& ttbcr
.as
) ? 63 : 55, 48);
1387 sctlr
= tc
->readMiscReg(MISCREG_SCTLR_EL3
);
1388 ttbcr
= tc
->readMiscReg(MISCREG_TCR_EL3
);
1393 scr
= tc
->readMiscReg(MISCREG_SCR_EL3
);
1394 isPriv
= aarch64EL
!= EL0
;
1395 if (haveVirtualization
) {
1396 vmid
= bits(tc
->readMiscReg(MISCREG_VTTBR_EL2
), 55, 48);
1397 isHyp
= aarch64EL
== EL2
;
1398 isHyp
|= tranType
& HypMode
;
1399 isHyp
&= (tranType
& S1S2NsTran
) == 0;
1400 isHyp
&= (tranType
& S1CTran
) == 0;
1402 if (HaveVirtHostExt(tc
) && hcr
.e2h
== 1 && hcr
.tge
==1) {
1406 if (hcr
.e2h
== 1 && (aarch64EL
== EL2
1407 || (hcr
.tge
==1 && aarch64EL
== EL0
))) {
1409 directToStage2
= false;
1411 stage2DescReq
= false;
1413 // Work out if we should skip the first stage of translation and go
1414 // directly to stage 2. This value is cached so we don't have to
1415 // compute it for every translation.
1416 bool sec
= !isSecure
|| (isSecure
&& IsSecureEL2Enabled(tc
));
1417 stage2Req
= isStage2
||
1418 (vm
&& !isHyp
&& sec
&&
1419 !(tranType
& S1CTran
) && (aarch64EL
< EL2
) &&
1420 !(tranType
& S1E1Tran
)); // <--- FIX THIS HACK
1421 stage2DescReq
= isStage2
|| (vm
&& !isHyp
&& sec
&&
1423 directToStage2
= !isStage2
&& stage2Req
&& !sctlr
.m
;
1428 directToStage2
= false;
1430 stage2DescReq
= false;
1433 sctlr
= tc
->readMiscReg(snsBankedIndex(MISCREG_SCTLR
, tc
,
1435 ttbcr
= tc
->readMiscReg(snsBankedIndex(MISCREG_TTBCR
, tc
,
1437 scr
= tc
->readMiscReg(MISCREG_SCR
);
1438 isPriv
= cpsr
.mode
!= MODE_USER
;
1439 if (longDescFormatInUse(tc
)) {
1440 uint64_t ttbr_asid
= tc
->readMiscReg(
1441 snsBankedIndex(ttbcr
.a1
? MISCREG_TTBR1
:
1444 asid
= bits(ttbr_asid
, 55, 48);
1445 } else { // Short-descriptor translation table format in use
1446 CONTEXTIDR context_id
= tc
->readMiscReg(snsBankedIndex(
1447 MISCREG_CONTEXTIDR
, tc
,!isSecure
));
1448 asid
= context_id
.asid
;
1450 prrr
= tc
->readMiscReg(snsBankedIndex(MISCREG_PRRR
, tc
,
1452 nmrr
= tc
->readMiscReg(snsBankedIndex(MISCREG_NMRR
, tc
,
1454 dacr
= tc
->readMiscReg(snsBankedIndex(MISCREG_DACR
, tc
,
1456 hcr
= tc
->readMiscReg(MISCREG_HCR
);
1458 if (haveVirtualization
) {
1459 vmid
= bits(tc
->readMiscReg(MISCREG_VTTBR
), 55, 48);
1460 isHyp
= cpsr
.mode
== MODE_HYP
;
1461 isHyp
|= tranType
& HypMode
;
1462 isHyp
&= (tranType
& S1S2NsTran
) == 0;
1463 isHyp
&= (tranType
& S1CTran
) == 0;
1465 sctlr
= tc
->readMiscReg(MISCREG_HSCTLR
);
1467 // Work out if we should skip the first stage of translation and go
1468 // directly to stage 2. This value is cached so we don't have to
1469 // compute it for every translation.
1470 bool sec
= !isSecure
|| (isSecure
&& IsSecureEL2Enabled(tc
));
1471 stage2Req
= hcr
.vm
&& !isStage2
&& !isHyp
&& sec
&&
1472 !(tranType
& S1CTran
);
1473 stage2DescReq
= hcr
.vm
&& !isStage2
&& !isHyp
&& sec
;
1474 directToStage2
= stage2Req
&& !sctlr
.m
;
1479 directToStage2
= false;
1480 stage2DescReq
= false;
1483 miscRegValid
= true;
1484 miscRegContext
= tc
->contextId();
1485 curTranType
= tranType
;
1489 TLB::tranTypeEL(CPSR cpsr
, ArmTranslationType type
)
1510 return currEL(cpsr
);
1513 panic("Unknown translation mode!\n");
1518 TLB::getTE(TlbEntry
**te
, const RequestPtr
&req
, ThreadContext
*tc
, Mode mode
,
1519 Translation
*translation
, bool timing
, bool functional
,
1520 bool is_secure
, TLB::ArmTranslationType tranType
)
1522 // In a 2-stage system, the IPA->PA translation can be started via this
1523 // call so make sure the miscRegs are correct.
1525 updateMiscReg(tc
, tranType
);
1527 bool is_fetch
= (mode
== Execute
);
1528 bool is_write
= (mode
== Write
);
1530 Addr vaddr_tainted
= req
->getVaddr();
1532 ExceptionLevel target_el
= aarch64
? aarch64EL
: EL1
;
1534 vaddr
= purifyTaggedAddr(vaddr_tainted
, tc
, target_el
, (TCR
)ttbcr
,
1537 vaddr
= vaddr_tainted
;
1539 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false, target_el
,
1542 if (req
->isPrefetch()) {
1543 // if the request is a prefetch don't attempt to fill the TLB or go
1544 // any further with the memory access (here we can safely use the
1545 // fault status for the short desc. format in all cases)
1546 stats
.prefetchFaults
++;
1547 return std::make_shared
<PrefetchAbort
>(
1548 vaddr_tainted
, ArmFault::PrefetchTLBMiss
, isStage2
);
1554 stats
.writeMisses
++;
1558 // start translation table walk, pass variables rather than
1559 // re-retreaving in table walker for speed
1560 DPRINTF(TLB
, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1561 vaddr_tainted
, asid
, vmid
);
1563 fault
= tableWalker
->walk(req
, tc
, asid
, vmid
, isHyp
, mode
,
1564 translation
, timing
, functional
, is_secure
,
1565 tranType
, stage2DescReq
);
1566 // for timing mode, return and wait for table walk,
1567 if (timing
|| fault
!= NoFault
) {
1571 *te
= lookup(vaddr
, asid
, vmid
, isHyp
, is_secure
, false, false,
1588 TLB::getResultTe(TlbEntry
**te
, const RequestPtr
&req
,
1589 ThreadContext
*tc
, Mode mode
,
1590 Translation
*translation
, bool timing
, bool functional
,
1596 // We are already in the stage 2 TLB. Grab the table entry for stage
1597 // 2 only. We are here because stage 1 translation is disabled.
1598 TlbEntry
*s2Te
= NULL
;
1599 // Get the stage 2 table entry
1600 fault
= getTE(&s2Te
, req
, tc
, mode
, translation
, timing
, functional
,
1601 isSecure
, curTranType
);
1602 // Check permissions of stage 2
1603 if ((s2Te
!= NULL
) && (fault
== NoFault
)) {
1605 fault
= checkPermissions64(s2Te
, req
, mode
, tc
);
1607 fault
= checkPermissions(s2Te
, req
, mode
);
1613 TlbEntry
*s1Te
= NULL
;
1615 Addr vaddr_tainted
= req
->getVaddr();
1617 // Get the stage 1 table entry
1618 fault
= getTE(&s1Te
, req
, tc
, mode
, translation
, timing
, functional
,
1619 isSecure
, curTranType
);
1620 // only proceed if we have a valid table entry
1621 if ((s1Te
!= NULL
) && (fault
== NoFault
)) {
1622 // Check stage 1 permissions before checking stage 2
1624 fault
= checkPermissions64(s1Te
, req
, mode
, tc
);
1626 fault
= checkPermissions(s1Te
, req
, mode
);
1627 if (stage2Req
& (fault
== NoFault
)) {
1628 Stage2LookUp
*s2Lookup
= new Stage2LookUp(this, stage2Tlb
, *s1Te
,
1629 req
, translation
, mode
, timing
, functional
, isSecure
,
1631 fault
= s2Lookup
->getTe(tc
, mergeTe
);
1632 if (s2Lookup
->isComplete()) {
1634 // We've finished with the lookup so delete it
1637 // The lookup hasn't completed, so we can't delete it now. We
1638 // get round this by asking the object to self delete when the
1639 // translation is complete.
1640 s2Lookup
->setSelfDelete();
1643 // This case deals with an S1 hit (or bypass), followed by
1644 // an S2 hit-but-perms issue
1646 DPRINTF(TLBVerbose
, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1647 vaddr_tainted
, req
->hasPaddr() ? req
->getPaddr() : ~0, fault
);
1648 if (fault
!= NoFault
) {
1649 ArmFault
*armFault
= reinterpret_cast<ArmFault
*>(fault
.get());
1650 armFault
->annotate(ArmFault::S1PTW
, false);
1651 armFault
->annotate(ArmFault::OVA
, vaddr_tainted
);
1661 TLB::setTestInterface(SimObject
*_ti
)
1666 TlbTestInterface
*ti(dynamic_cast<TlbTestInterface
*>(_ti
));
1667 fatal_if(!ti
, "%s is not a valid ARM TLB tester\n", _ti
->name());
1673 TLB::testTranslation(const RequestPtr
&req
, Mode mode
,
1674 TlbEntry::DomainType domain
)
1676 if (!test
|| !req
->hasSize() || req
->getSize() == 0 ||
1677 req
->isCacheMaintenance()) {
1680 return test
->translationCheck(req
, isPriv
, mode
, domain
);
1685 TLB::testWalk(Addr pa
, Addr size
, Addr va
, bool is_secure
, Mode mode
,
1686 TlbEntry::DomainType domain
, LookupLevel lookup_level
)
1691 return test
->walkCheck(pa
, size
, va
, is_secure
, isPriv
, mode
,
1692 domain
, lookup_level
);