2 * Copyright (c) 2010, 2012-2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include "arch/arm/table_walker.hh"
44 #include "arch/arm/faults.hh"
45 #include "arch/arm/stage2_mmu.hh"
46 #include "arch/arm/system.hh"
47 #include "arch/arm/tlb.hh"
48 #include "cpu/base.hh"
49 #include "cpu/thread_context.hh"
50 #include "debug/Checkpoint.hh"
51 #include "debug/Drain.hh"
52 #include "debug/TLB.hh"
53 #include "debug/TLBVerbose.hh"
54 #include "dev/dma_device.hh"
55 #include "sim/system.hh"
57 using namespace ArmISA
;
59 TableWalker::TableWalker(const Params
*p
)
61 stage2Mmu(NULL
), port(NULL
), masterId(Request::invldMasterId
),
62 isStage2(p
->is_stage2
), tlb(NULL
),
63 currState(NULL
), pending(false),
64 numSquashable(p
->num_squash_per_cycle
),
66 pendingChangeTick(curTick()),
67 doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
68 doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
69 doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
70 doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
71 doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
72 doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
73 LongDescEventByLevel
{ &doL0LongDescEvent
, &doL1LongDescEvent
,
74 &doL2LongDescEvent
, &doL3LongDescEvent
},
75 doProcessEvent([this]{ processWalkWrapper(); }, name())
79 // Cache system-level properties
81 ArmSystem
*armSys
= dynamic_cast<ArmSystem
*>(p
->sys
);
83 haveSecurity
= armSys
->haveSecurity();
84 _haveLPAE
= armSys
->haveLPAE();
85 _haveVirtualization
= armSys
->haveVirtualization();
86 physAddrRange
= armSys
->physAddrRange();
87 _haveLargeAsid64
= armSys
->haveLargeAsid64();
89 haveSecurity
= _haveLPAE
= _haveVirtualization
= false;
90 _haveLargeAsid64
= false;
96 TableWalker::~TableWalker()
102 TableWalker::setMMU(Stage2MMU
*m
, MasterID master_id
)
105 port
= &m
->getDMAPort();
106 masterId
= master_id
;
112 fatal_if(!stage2Mmu
, "Table walker must have a valid stage-2 MMU\n");
113 fatal_if(!port
, "Table walker must have a valid port\n");
114 fatal_if(!tlb
, "Table walker must have a valid TLB\n");
118 TableWalker::getPort(const std::string
&if_name
, PortID idx
)
120 if (if_name
== "port") {
124 fatal("Cannot access table walker port through stage-two walker\n");
127 return ClockedObject::getPort(if_name
, idx
);
130 TableWalker::WalkerState::WalkerState() :
131 tc(nullptr), aarch64(false), el(EL0
), physAddrRange(0), req(nullptr),
132 asid(0), vmid(0), isHyp(false), transState(nullptr),
133 vaddr(0), vaddr_tainted(0),
134 sctlr(0), scr(0), cpsr(0), tcr(0),
135 htcr(0), hcr(0), vtcr(0),
136 isWrite(false), isFetch(false), isSecure(false),
137 isUncacheable(false),
138 secureLookup(false), rwTable(false), userTable(false), xnTable(false),
139 pxnTable(false), hpd(false), stage2Req(false),
140 stage2Tran(nullptr), timing(false), functional(false),
141 mode(BaseTLB::Read
), tranType(TLB::NormalTran
), l2Desc(l1Desc
),
142 delayed(false), tableWalker(nullptr)
147 TableWalker::completeDrain()
149 if (drainState() == DrainState::Draining
&&
150 stateQueues
[L0
].empty() && stateQueues
[L1
].empty() &&
151 stateQueues
[L2
].empty() && stateQueues
[L3
].empty() &&
152 pendingQueue
.empty()) {
154 DPRINTF(Drain
, "TableWalker done draining, processing drain event\n");
162 bool state_queues_not_empty
= false;
164 for (int i
= 0; i
< MAX_LOOKUP_LEVELS
; ++i
) {
165 if (!stateQueues
[i
].empty()) {
166 state_queues_not_empty
= true;
171 if (state_queues_not_empty
|| pendingQueue
.size()) {
172 DPRINTF(Drain
, "TableWalker not drained\n");
173 return DrainState::Draining
;
175 DPRINTF(Drain
, "TableWalker free, no need to drain\n");
176 return DrainState::Drained
;
181 TableWalker::drainResume()
183 if (params()->sys
->isTimingMode() && currState
) {
191 TableWalker::walk(const RequestPtr
&_req
, ThreadContext
*_tc
, uint16_t _asid
,
192 uint8_t _vmid
, bool _isHyp
, TLB::Mode _mode
,
193 TLB::Translation
*_trans
, bool _timing
, bool _functional
,
194 bool secure
, TLB::ArmTranslationType tranType
,
197 assert(!(_functional
&& _timing
));
200 WalkerState
*savedCurrState
= NULL
;
202 if (!currState
&& !_functional
) {
203 // For atomic mode, a new WalkerState instance should be only created
204 // once per TLB. For timing mode, a new instance is generated for every
206 DPRINTF(TLBVerbose
, "creating new instance of WalkerState\n");
208 currState
= new WalkerState();
209 currState
->tableWalker
= this;
210 } else if (_functional
) {
211 // If we are mixing functional mode with timing (or even
212 // atomic), we need to to be careful and clean up after
213 // ourselves to not risk getting into an inconsistent state.
214 DPRINTF(TLBVerbose
, "creating functional instance of WalkerState\n");
215 savedCurrState
= currState
;
216 currState
= new WalkerState();
217 currState
->tableWalker
= this;
218 } else if (_timing
) {
219 // This is a translation that was completed and then faulted again
220 // because some underlying parameters that affect the translation
221 // changed out from under us (e.g. asid). It will either be a
222 // misprediction, in which case nothing will happen or we'll use
223 // this fault to re-execute the faulting instruction which should clean
225 if (currState
->vaddr_tainted
== _req
->getVaddr()) {
226 ++statSquashedBefore
;
227 return std::make_shared
<ReExec
>();
232 currState
->startTime
= curTick();
234 // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
235 // aarch32/translation/translation/AArch32.TranslateAddress dictates
236 // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
239 currState
->aarch64
= ELIs64(_tc
, EL2
);
242 TLB::tranTypeEL(_tc
->readMiscReg(MISCREG_CPSR
), tranType
);
244 ELIs64(_tc
, currState
->el
== EL0
? EL1
: currState
->el
);
246 currState
->transState
= _trans
;
247 currState
->req
= _req
;
248 currState
->fault
= NoFault
;
249 currState
->asid
= _asid
;
250 currState
->vmid
= _vmid
;
251 currState
->isHyp
= _isHyp
;
252 currState
->timing
= _timing
;
253 currState
->functional
= _functional
;
254 currState
->mode
= _mode
;
255 currState
->tranType
= tranType
;
256 currState
->isSecure
= secure
;
257 currState
->physAddrRange
= physAddrRange
;
259 /** @todo These should be cached or grabbed from cached copies in
260 the TLB, all these miscreg reads are expensive */
261 currState
->vaddr_tainted
= currState
->req
->getVaddr();
262 if (currState
->aarch64
)
263 currState
->vaddr
= purifyTaggedAddr(currState
->vaddr_tainted
,
264 currState
->tc
, currState
->el
,
265 currState
->mode
==TLB::Execute
);
267 currState
->vaddr
= currState
->vaddr_tainted
;
269 if (currState
->aarch64
) {
271 currState
->sctlr
= currState
->tc
->readMiscReg(MISCREG_SCTLR_EL1
);
272 currState
->vtcr
= currState
->tc
->readMiscReg(MISCREG_VTCR_EL2
);
273 } else switch (currState
->el
) {
276 currState
->sctlr
= currState
->tc
->readMiscReg(MISCREG_SCTLR_EL1
);
277 currState
->tcr
= currState
->tc
->readMiscReg(MISCREG_TCR_EL1
);
280 assert(_haveVirtualization
);
281 currState
->sctlr
= currState
->tc
->readMiscReg(MISCREG_SCTLR_EL2
);
282 currState
->tcr
= currState
->tc
->readMiscReg(MISCREG_TCR_EL2
);
285 assert(haveSecurity
);
286 currState
->sctlr
= currState
->tc
->readMiscReg(MISCREG_SCTLR_EL3
);
287 currState
->tcr
= currState
->tc
->readMiscReg(MISCREG_TCR_EL3
);
290 panic("Invalid exception level");
293 currState
->hcr
= currState
->tc
->readMiscReg(MISCREG_HCR_EL2
);
295 currState
->sctlr
= currState
->tc
->readMiscReg(snsBankedIndex(
296 MISCREG_SCTLR
, currState
->tc
, !currState
->isSecure
));
297 currState
->ttbcr
= currState
->tc
->readMiscReg(snsBankedIndex(
298 MISCREG_TTBCR
, currState
->tc
, !currState
->isSecure
));
299 currState
->htcr
= currState
->tc
->readMiscReg(MISCREG_HTCR
);
300 currState
->hcr
= currState
->tc
->readMiscReg(MISCREG_HCR
);
301 currState
->vtcr
= currState
->tc
->readMiscReg(MISCREG_VTCR
);
303 sctlr
= currState
->sctlr
;
305 currState
->isFetch
= (currState
->mode
== TLB::Execute
);
306 currState
->isWrite
= (currState
->mode
== TLB::Write
);
308 statRequestOrigin
[REQUESTED
][currState
->isFetch
]++;
310 currState
->stage2Req
= _stage2Req
&& !isStage2
;
312 bool long_desc_format
= currState
->aarch64
|| _isHyp
|| isStage2
||
313 longDescFormatInUse(currState
->tc
);
315 if (long_desc_format
) {
316 // Helper variables used for hierarchical permissions
317 currState
->secureLookup
= currState
->isSecure
;
318 currState
->rwTable
= true;
319 currState
->userTable
= true;
320 currState
->xnTable
= false;
321 currState
->pxnTable
= false;
323 ++statWalksLongDescriptor
;
325 ++statWalksShortDescriptor
;
328 if (!currState
->timing
) {
329 Fault fault
= NoFault
;
330 if (currState
->aarch64
)
331 fault
= processWalkAArch64();
332 else if (long_desc_format
)
333 fault
= processWalkLPAE();
335 fault
= processWalk();
337 // If this was a functional non-timing access restore state to
339 if (currState
->functional
) {
341 currState
= savedCurrState
;
346 if (pending
|| pendingQueue
.size()) {
347 pendingQueue
.push_back(currState
);
353 if (currState
->aarch64
)
354 return processWalkAArch64();
355 else if (long_desc_format
)
356 return processWalkLPAE();
358 return processWalk();
365 TableWalker::processWalkWrapper()
368 assert(pendingQueue
.size());
370 currState
= pendingQueue
.front();
372 // Check if a previous walk filled this request already
373 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
374 TlbEntry
* te
= tlb
->lookup(currState
->vaddr
, currState
->asid
,
375 currState
->vmid
, currState
->isHyp
, currState
->isSecure
, true, false,
378 // Check if we still need to have a walk for this request. If the requesting
379 // instruction has been squashed, or a previous walk has filled the TLB with
380 // a match, we just want to get rid of the walk. The latter could happen
381 // when there are multiple outstanding misses to a single page and a
382 // previous request has been successfully translated.
383 if (!currState
->transState
->squashed() && !te
) {
384 // We've got a valid request, lets process it
386 pendingQueue
.pop_front();
387 // Keep currState in case one of the processWalk... calls NULLs it
388 WalkerState
*curr_state_copy
= currState
;
390 if (currState
->aarch64
)
391 f
= processWalkAArch64();
392 else if (longDescFormatInUse(currState
->tc
) ||
393 currState
->isHyp
|| isStage2
)
394 f
= processWalkLPAE();
399 curr_state_copy
->transState
->finish(f
, curr_state_copy
->req
,
400 curr_state_copy
->tc
, curr_state_copy
->mode
);
402 delete curr_state_copy
;
408 // If the instruction that we were translating for has been
409 // squashed we shouldn't bother.
410 unsigned num_squashed
= 0;
411 ThreadContext
*tc
= currState
->tc
;
412 while ((num_squashed
< numSquashable
) && currState
&&
413 (currState
->transState
->squashed() || te
)) {
414 pendingQueue
.pop_front();
416 statSquashedBefore
++;
418 DPRINTF(TLB
, "Squashing table walk for address %#x\n",
419 currState
->vaddr_tainted
);
421 if (currState
->transState
->squashed()) {
422 // finish the translation which will delete the translation object
423 currState
->transState
->finish(
424 std::make_shared
<UnimpFault
>("Squashed Inst"),
425 currState
->req
, currState
->tc
, currState
->mode
);
427 // translate the request now that we know it will work
428 statWalkServiceTime
.sample(curTick() - currState
->startTime
);
429 tlb
->translateTiming(currState
->req
, currState
->tc
,
430 currState
->transState
, currState
->mode
);
434 // delete the current request
437 // peak at the next one
438 if (pendingQueue
.size()) {
439 currState
= pendingQueue
.front();
440 te
= tlb
->lookup(currState
->vaddr
, currState
->asid
,
441 currState
->vmid
, currState
->isHyp
, currState
->isSecure
, true,
442 false, currState
->el
);
444 // Terminate the loop, nothing more to do
450 // if we still have pending translations, schedule more work
456 TableWalker::processWalk()
460 // For short descriptors, translation configs are held in
462 RegVal ttbr1
= currState
->tc
->readMiscReg(snsBankedIndex(
463 MISCREG_TTBR1
, currState
->tc
, !currState
->isSecure
));
465 const auto irgn0_mask
= 0x1;
466 const auto irgn1_mask
= 0x40;
467 currState
->isUncacheable
= (ttbr1
& (irgn0_mask
| irgn1_mask
)) == 0;
469 // If translation isn't enabled, we shouldn't be here
470 assert(currState
->sctlr
.m
|| isStage2
);
471 const bool is_atomic
= currState
->req
->isAtomic();
473 DPRINTF(TLB
, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
474 currState
->vaddr_tainted
, currState
->ttbcr
, mbits(currState
->vaddr
, 31,
475 32 - currState
->ttbcr
.n
));
477 statWalkWaitTime
.sample(curTick() - currState
->startTime
);
479 if (currState
->ttbcr
.n
== 0 || !mbits(currState
->vaddr
, 31,
480 32 - currState
->ttbcr
.n
)) {
481 DPRINTF(TLB
, " - Selecting TTBR0\n");
482 // Check if table walk is allowed when Security Extensions are enabled
483 if (haveSecurity
&& currState
->ttbcr
.pd0
) {
484 if (currState
->isFetch
)
485 return std::make_shared
<PrefetchAbort
>(
486 currState
->vaddr_tainted
,
487 ArmFault::TranslationLL
+ L1
,
491 return std::make_shared
<DataAbort
>(
492 currState
->vaddr_tainted
,
493 TlbEntry::DomainType::NoAccess
,
494 is_atomic
? false : currState
->isWrite
,
495 ArmFault::TranslationLL
+ L1
, isStage2
,
498 ttbr
= currState
->tc
->readMiscReg(snsBankedIndex(
499 MISCREG_TTBR0
, currState
->tc
, !currState
->isSecure
));
501 DPRINTF(TLB
, " - Selecting TTBR1\n");
502 // Check if table walk is allowed when Security Extensions are enabled
503 if (haveSecurity
&& currState
->ttbcr
.pd1
) {
504 if (currState
->isFetch
)
505 return std::make_shared
<PrefetchAbort
>(
506 currState
->vaddr_tainted
,
507 ArmFault::TranslationLL
+ L1
,
511 return std::make_shared
<DataAbort
>(
512 currState
->vaddr_tainted
,
513 TlbEntry::DomainType::NoAccess
,
514 is_atomic
? false : currState
->isWrite
,
515 ArmFault::TranslationLL
+ L1
, isStage2
,
519 currState
->ttbcr
.n
= 0;
522 Addr l1desc_addr
= mbits(ttbr
, 31, 14 - currState
->ttbcr
.n
) |
523 (bits(currState
->vaddr
, 31 - currState
->ttbcr
.n
, 20) << 2);
524 DPRINTF(TLB
, " - Descriptor at address %#x (%s)\n", l1desc_addr
,
525 currState
->isSecure
? "s" : "ns");
527 // Trickbox address check
529 f
= testWalk(l1desc_addr
, sizeof(uint32_t),
530 TlbEntry::DomainType::NoAccess
, L1
);
532 DPRINTF(TLB
, "Trickbox check caused fault on %#x\n", currState
->vaddr_tainted
);
533 if (currState
->timing
) {
535 nextWalk(currState
->tc
);
538 currState
->tc
= NULL
;
539 currState
->req
= NULL
;
544 Request::Flags flag
= Request::PT_WALK
;
545 if (currState
->sctlr
.c
== 0 || currState
->isUncacheable
) {
546 flag
.set(Request::UNCACHEABLE
);
549 if (currState
->isSecure
) {
550 flag
.set(Request::SECURE
);
554 delayed
= fetchDescriptor(l1desc_addr
, (uint8_t*)&currState
->l1Desc
.data
,
555 sizeof(uint32_t), flag
, L1
, &doL1DescEvent
,
556 &TableWalker::doL1Descriptor
);
558 f
= currState
->fault
;
565 TableWalker::processWalkLPAE()
567 Addr ttbr
, ttbr0_max
, ttbr1_min
, desc_addr
;
569 LookupLevel start_lookup_level
= L1
;
571 DPRINTF(TLB
, "Beginning table walk for address %#x, TTBCR: %#x\n",
572 currState
->vaddr_tainted
, currState
->ttbcr
);
574 statWalkWaitTime
.sample(curTick() - currState
->startTime
);
576 Request::Flags flag
= Request::PT_WALK
;
577 if (currState
->isSecure
)
578 flag
.set(Request::SECURE
);
580 // work out which base address register to use, if in hyp mode we always
583 DPRINTF(TLB
, " - Selecting VTTBR (long-desc.)\n");
584 ttbr
= currState
->tc
->readMiscReg(MISCREG_VTTBR
);
585 tsz
= sext
<4>(currState
->vtcr
.t0sz
);
586 start_lookup_level
= currState
->vtcr
.sl0
? L1
: L2
;
587 currState
->isUncacheable
= currState
->vtcr
.irgn0
== 0;
588 } else if (currState
->isHyp
) {
589 DPRINTF(TLB
, " - Selecting HTTBR (long-desc.)\n");
590 ttbr
= currState
->tc
->readMiscReg(MISCREG_HTTBR
);
591 tsz
= currState
->htcr
.t0sz
;
592 currState
->isUncacheable
= currState
->htcr
.irgn0
== 0;
594 assert(longDescFormatInUse(currState
->tc
));
596 // Determine boundaries of TTBR0/1 regions
597 if (currState
->ttbcr
.t0sz
)
598 ttbr0_max
= (1ULL << (32 - currState
->ttbcr
.t0sz
)) - 1;
599 else if (currState
->ttbcr
.t1sz
)
600 ttbr0_max
= (1ULL << 32) -
601 (1ULL << (32 - currState
->ttbcr
.t1sz
)) - 1;
603 ttbr0_max
= (1ULL << 32) - 1;
604 if (currState
->ttbcr
.t1sz
)
605 ttbr1_min
= (1ULL << 32) - (1ULL << (32 - currState
->ttbcr
.t1sz
));
607 ttbr1_min
= (1ULL << (32 - currState
->ttbcr
.t0sz
));
609 const bool is_atomic
= currState
->req
->isAtomic();
611 // The following code snippet selects the appropriate translation table base
612 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
613 // depending on the address range supported by the translation table (ARM
614 // ARM issue C B3.6.4)
615 if (currState
->vaddr
<= ttbr0_max
) {
616 DPRINTF(TLB
, " - Selecting TTBR0 (long-desc.)\n");
617 // Check if table walk is allowed
618 if (currState
->ttbcr
.epd0
) {
619 if (currState
->isFetch
)
620 return std::make_shared
<PrefetchAbort
>(
621 currState
->vaddr_tainted
,
622 ArmFault::TranslationLL
+ L1
,
626 return std::make_shared
<DataAbort
>(
627 currState
->vaddr_tainted
,
628 TlbEntry::DomainType::NoAccess
,
629 is_atomic
? false : currState
->isWrite
,
630 ArmFault::TranslationLL
+ L1
,
634 ttbr
= currState
->tc
->readMiscReg(snsBankedIndex(
635 MISCREG_TTBR0
, currState
->tc
, !currState
->isSecure
));
636 tsz
= currState
->ttbcr
.t0sz
;
637 currState
->isUncacheable
= currState
->ttbcr
.irgn0
== 0;
638 if (ttbr0_max
< (1ULL << 30)) // Upper limit < 1 GB
639 start_lookup_level
= L2
;
640 } else if (currState
->vaddr
>= ttbr1_min
) {
641 DPRINTF(TLB
, " - Selecting TTBR1 (long-desc.)\n");
642 // Check if table walk is allowed
643 if (currState
->ttbcr
.epd1
) {
644 if (currState
->isFetch
)
645 return std::make_shared
<PrefetchAbort
>(
646 currState
->vaddr_tainted
,
647 ArmFault::TranslationLL
+ L1
,
651 return std::make_shared
<DataAbort
>(
652 currState
->vaddr_tainted
,
653 TlbEntry::DomainType::NoAccess
,
654 is_atomic
? false : currState
->isWrite
,
655 ArmFault::TranslationLL
+ L1
,
659 ttbr
= currState
->tc
->readMiscReg(snsBankedIndex(
660 MISCREG_TTBR1
, currState
->tc
, !currState
->isSecure
));
661 tsz
= currState
->ttbcr
.t1sz
;
662 currState
->isUncacheable
= currState
->ttbcr
.irgn1
== 0;
663 // Lower limit >= 3 GB
664 if (ttbr1_min
>= (1ULL << 31) + (1ULL << 30))
665 start_lookup_level
= L2
;
667 // Out of boundaries -> translation fault
668 if (currState
->isFetch
)
669 return std::make_shared
<PrefetchAbort
>(
670 currState
->vaddr_tainted
,
671 ArmFault::TranslationLL
+ L1
,
675 return std::make_shared
<DataAbort
>(
676 currState
->vaddr_tainted
,
677 TlbEntry::DomainType::NoAccess
,
678 is_atomic
? false : currState
->isWrite
,
679 ArmFault::TranslationLL
+ L1
,
680 isStage2
, ArmFault::LpaeTran
);
685 // Perform lookup (ARM ARM issue C B3.6.6)
686 if (start_lookup_level
== L1
) {
688 desc_addr
= mbits(ttbr
, 39, n
) |
689 (bits(currState
->vaddr
, n
+ 26, 30) << 3);
690 DPRINTF(TLB
, " - Descriptor at address %#x (%s) (long-desc.)\n",
691 desc_addr
, currState
->isSecure
? "s" : "ns");
693 // Skip first-level lookup
694 n
= (tsz
>= 2 ? 14 - tsz
: 12);
695 desc_addr
= mbits(ttbr
, 39, n
) |
696 (bits(currState
->vaddr
, n
+ 17, 21) << 3);
697 DPRINTF(TLB
, " - Descriptor at address %#x (%s) (long-desc.)\n",
698 desc_addr
, currState
->isSecure
? "s" : "ns");
701 // Trickbox address check
702 Fault f
= testWalk(desc_addr
, sizeof(uint64_t),
703 TlbEntry::DomainType::NoAccess
, start_lookup_level
);
705 DPRINTF(TLB
, "Trickbox check caused fault on %#x\n", currState
->vaddr_tainted
);
706 if (currState
->timing
) {
708 nextWalk(currState
->tc
);
711 currState
->tc
= NULL
;
712 currState
->req
= NULL
;
717 if (currState
->sctlr
.c
== 0 || currState
->isUncacheable
) {
718 flag
.set(Request::UNCACHEABLE
);
721 currState
->longDesc
.lookupLevel
= start_lookup_level
;
722 currState
->longDesc
.aarch64
= false;
723 currState
->longDesc
.grainSize
= Grain4KB
;
725 bool delayed
= fetchDescriptor(desc_addr
, (uint8_t*)&currState
->longDesc
.data
,
726 sizeof(uint64_t), flag
, start_lookup_level
,
727 LongDescEventByLevel
[start_lookup_level
],
728 &TableWalker::doLongDescriptor
);
730 f
= currState
->fault
;
737 TableWalker::adjustTableSizeAArch64(unsigned tsz
)
747 TableWalker::checkAddrSizeFaultAArch64(Addr addr
, int currPhysAddrRange
)
749 return (currPhysAddrRange
!= MaxPhysAddrRange
&&
750 bits(addr
, MaxPhysAddrRange
- 1, currPhysAddrRange
));
754 TableWalker::processWalkAArch64()
756 assert(currState
->aarch64
);
758 DPRINTF(TLB
, "Beginning table walk for address %#llx, TCR: %#llx\n",
759 currState
->vaddr_tainted
, currState
->tcr
);
761 static const GrainSize GrainMap_tg0
[] =
762 { Grain4KB
, Grain64KB
, Grain16KB
, ReservedGrain
};
763 static const GrainSize GrainMap_tg1
[] =
764 { ReservedGrain
, Grain16KB
, Grain4KB
, Grain64KB
};
766 statWalkWaitTime
.sample(curTick() - currState
->startTime
);
768 // Determine TTBR, table size, granule size and phys. address range
771 GrainSize tg
= Grain4KB
; // grain size computed from tg* field
774 LookupLevel start_lookup_level
= MAX_LOOKUP_LEVELS
;
776 switch (currState
->el
) {
780 DPRINTF(TLB
, " - Selecting VTTBR0 (AArch64 stage 2)\n");
781 ttbr
= currState
->tc
->readMiscReg(MISCREG_VTTBR_EL2
);
782 tsz
= 64 - currState
->vtcr
.t0sz64
;
783 tg
= GrainMap_tg0
[currState
->vtcr
.tg0
];
784 // ARM DDI 0487A.f D7-2148
785 // The starting level of stage 2 translation depends on
786 // VTCR_EL2.SL0 and VTCR_EL2.TG0
787 LookupLevel __
= MAX_LOOKUP_LEVELS
; // invalid level
788 uint8_t sl_tg
= (currState
->vtcr
.sl0
<< 2) | currState
->vtcr
.tg0
;
789 static const LookupLevel SLL
[] = {
790 L2
, L3
, L3
, __
, // sl0 == 0
791 L1
, L2
, L2
, __
, // sl0 == 1, etc.
795 start_lookup_level
= SLL
[sl_tg
];
796 panic_if(start_lookup_level
== MAX_LOOKUP_LEVELS
,
797 "Cannot discern lookup level from vtcr.{sl0,tg0}");
798 ps
= currState
->vtcr
.ps
;
799 currState
->isUncacheable
= currState
->vtcr
.irgn0
== 0;
801 switch (bits(currState
->vaddr
, 63,48)) {
803 DPRINTF(TLB
, " - Selecting TTBR0 (AArch64)\n");
804 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR0_EL1
);
805 tsz
= adjustTableSizeAArch64(64 - currState
->tcr
.t0sz
);
806 tg
= GrainMap_tg0
[currState
->tcr
.tg0
];
807 currState
->hpd
= currState
->tcr
.hpd0
;
808 currState
->isUncacheable
= currState
->tcr
.irgn0
== 0;
809 if (bits(currState
->vaddr
, 63, tsz
) != 0x0 ||
814 DPRINTF(TLB
, " - Selecting TTBR1 (AArch64)\n");
815 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR1_EL1
);
816 tsz
= adjustTableSizeAArch64(64 - currState
->tcr
.t1sz
);
817 tg
= GrainMap_tg1
[currState
->tcr
.tg1
];
818 currState
->hpd
= currState
->tcr
.hpd1
;
819 currState
->isUncacheable
= currState
->tcr
.irgn1
== 0;
820 if (bits(currState
->vaddr
, 63, tsz
) != mask(64-tsz
) ||
825 // top two bytes must be all 0s or all 1s, else invalid addr
828 ps
= currState
->tcr
.ips
;
832 switch(bits(currState
->vaddr
, 63,48)) {
834 DPRINTF(TLB
, " - Selecting TTBR0 (AArch64)\n");
835 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR0_EL2
);
836 tsz
= adjustTableSizeAArch64(64 - currState
->tcr
.t0sz
);
837 tg
= GrainMap_tg0
[currState
->tcr
.tg0
];
838 currState
->hpd
= currState
->hcr
.e2h
?
839 currState
->tcr
.hpd0
: currState
->tcr
.hpd
;
840 currState
->isUncacheable
= currState
->tcr
.irgn0
== 0;
844 DPRINTF(TLB
, " - Selecting TTBR1 (AArch64)\n");
845 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR1_EL2
);
846 tsz
= adjustTableSizeAArch64(64 - currState
->tcr
.t1sz
);
847 tg
= GrainMap_tg1
[currState
->tcr
.tg1
];
848 currState
->hpd
= currState
->tcr
.hpd1
;
849 currState
->isUncacheable
= currState
->tcr
.irgn1
== 0;
850 if (bits(currState
->vaddr
, 63, tsz
) != mask(64-tsz
) ||
851 currState
->tcr
.epd1
|| !currState
->hcr
.e2h
)
856 // invalid addr if top two bytes are not all 0s
859 ps
= currState
->tcr
.ps
;
862 switch(bits(currState
->vaddr
, 63,48)) {
864 DPRINTF(TLB
, " - Selecting TTBR0 (AArch64)\n");
865 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR0_EL3
);
866 tsz
= adjustTableSizeAArch64(64 - currState
->tcr
.t0sz
);
867 tg
= GrainMap_tg0
[currState
->tcr
.tg0
];
868 currState
->hpd
= currState
->tcr
.hpd
;
869 currState
->isUncacheable
= currState
->tcr
.irgn0
== 0;
872 // invalid addr if top two bytes are not all 0s
875 ps
= currState
->tcr
.ps
;
879 const bool is_atomic
= currState
->req
->isAtomic();
883 if (currState
->isFetch
)
884 f
= std::make_shared
<PrefetchAbort
>(
885 currState
->vaddr_tainted
,
886 ArmFault::TranslationLL
+ L0
, isStage2
,
889 f
= std::make_shared
<DataAbort
>(
890 currState
->vaddr_tainted
,
891 TlbEntry::DomainType::NoAccess
,
892 is_atomic
? false : currState
->isWrite
,
893 ArmFault::TranslationLL
+ L0
,
894 isStage2
, ArmFault::LpaeTran
);
896 if (currState
->timing
) {
898 nextWalk(currState
->tc
);
901 currState
->tc
= NULL
;
902 currState
->req
= NULL
;
908 if (tg
== ReservedGrain
) {
909 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
910 "DEFINED behavior takes this to mean 4KB granules\n");
914 // Determine starting lookup level
915 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
916 // in ARM DDI 0487A. These table values correspond to the cascading tests
917 // to compute the lookup level and are of the form
918 // (grain_size + N*stride), for N = {1, 2, 3}.
919 // A value of 64 will never succeed and a value of 0 will always succeed.
920 if (start_lookup_level
== MAX_LOOKUP_LEVELS
) {
922 GrainSize grain_size
;
923 unsigned lookup_level_cutoff
[MAX_LOOKUP_LEVELS
];
925 static const GrainMap GM
[] = {
926 { Grain4KB
, { 39, 30, 0, 0 } },
927 { Grain16KB
, { 47, 36, 25, 0 } },
928 { Grain64KB
, { 64, 42, 29, 0 } }
931 const unsigned *lookup
= NULL
; // points to a lookup_level_cutoff
933 for (unsigned i
= 0; i
< 3; ++i
) { // choose entry of GM[]
934 if (tg
== GM
[i
].grain_size
) {
935 lookup
= GM
[i
].lookup_level_cutoff
;
941 for (int L
= L0
; L
!= MAX_LOOKUP_LEVELS
; ++L
) {
942 if (tsz
> lookup
[L
]) {
943 start_lookup_level
= (LookupLevel
) L
;
947 panic_if(start_lookup_level
== MAX_LOOKUP_LEVELS
,
948 "Table walker couldn't find lookup level\n");
953 // Determine table base address
954 int base_addr_lo
= 3 + tsz
- stride
* (3 - start_lookup_level
) - tg
;
955 Addr base_addr
= mbits(ttbr
, 47, base_addr_lo
);
957 // Determine physical address size and raise an Address Size Fault if
959 int pa_range
= decodePhysAddrRange64(ps
);
960 // Clamp to lower limit
961 if (pa_range
> physAddrRange
)
962 currState
->physAddrRange
= physAddrRange
;
964 currState
->physAddrRange
= pa_range
;
965 if (checkAddrSizeFaultAArch64(base_addr
, currState
->physAddrRange
)) {
966 DPRINTF(TLB
, "Address size fault before any lookup\n");
968 if (currState
->isFetch
)
969 f
= std::make_shared
<PrefetchAbort
>(
970 currState
->vaddr_tainted
,
971 ArmFault::AddressSizeLL
+ start_lookup_level
,
975 f
= std::make_shared
<DataAbort
>(
976 currState
->vaddr_tainted
,
977 TlbEntry::DomainType::NoAccess
,
978 is_atomic
? false : currState
->isWrite
,
979 ArmFault::AddressSizeLL
+ start_lookup_level
,
984 if (currState
->timing
) {
986 nextWalk(currState
->tc
);
989 currState
->tc
= NULL
;
990 currState
->req
= NULL
;
996 // Determine descriptor address
997 Addr desc_addr
= base_addr
|
998 (bits(currState
->vaddr
, tsz
- 1,
999 stride
* (3 - start_lookup_level
) + tg
) << 3);
1001 // Trickbox address check
1002 Fault f
= testWalk(desc_addr
, sizeof(uint64_t),
1003 TlbEntry::DomainType::NoAccess
, start_lookup_level
);
1005 DPRINTF(TLB
, "Trickbox check caused fault on %#x\n", currState
->vaddr_tainted
);
1006 if (currState
->timing
) {
1008 nextWalk(currState
->tc
);
1011 currState
->tc
= NULL
;
1012 currState
->req
= NULL
;
1017 Request::Flags flag
= Request::PT_WALK
;
1018 if (currState
->sctlr
.c
== 0 || currState
->isUncacheable
) {
1019 flag
.set(Request::UNCACHEABLE
);
1022 if (currState
->isSecure
) {
1023 flag
.set(Request::SECURE
);
1026 currState
->longDesc
.lookupLevel
= start_lookup_level
;
1027 currState
->longDesc
.aarch64
= true;
1028 currState
->longDesc
.grainSize
= tg
;
1030 if (currState
->timing
) {
1031 fetchDescriptor(desc_addr
, (uint8_t*) &currState
->longDesc
.data
,
1032 sizeof(uint64_t), flag
, start_lookup_level
,
1033 LongDescEventByLevel
[start_lookup_level
], NULL
);
1035 fetchDescriptor(desc_addr
, (uint8_t*)&currState
->longDesc
.data
,
1036 sizeof(uint64_t), flag
, -1, NULL
,
1037 &TableWalker::doLongDescriptor
);
1038 f
= currState
->fault
;
1045 TableWalker::memAttrs(ThreadContext
*tc
, TlbEntry
&te
, SCTLR sctlr
,
1046 uint8_t texcb
, bool s
)
1048 // Note: tc and sctlr local variables are hiding tc and sctrl class
1050 DPRINTF(TLBVerbose
, "memAttrs texcb:%d s:%d\n", texcb
, s
);
1051 te
.shareable
= false; // default value
1052 te
.nonCacheable
= false;
1053 te
.outerShareable
= false;
1054 if (sctlr
.tre
== 0 || ((sctlr
.tre
== 1) && (sctlr
.m
== 0))) {
1056 case 0: // Stongly-ordered
1057 te
.nonCacheable
= true;
1058 te
.mtype
= TlbEntry::MemoryType::StronglyOrdered
;
1059 te
.shareable
= true;
1063 case 1: // Shareable Device
1064 te
.nonCacheable
= true;
1065 te
.mtype
= TlbEntry::MemoryType::Device
;
1066 te
.shareable
= true;
1070 case 2: // Outer and Inner Write-Through, no Write-Allocate
1071 te
.mtype
= TlbEntry::MemoryType::Normal
;
1074 te
.outerAttrs
= bits(texcb
, 1, 0);
1076 case 3: // Outer and Inner Write-Back, no Write-Allocate
1077 te
.mtype
= TlbEntry::MemoryType::Normal
;
1080 te
.outerAttrs
= bits(texcb
, 1, 0);
1082 case 4: // Outer and Inner Non-cacheable
1083 te
.nonCacheable
= true;
1084 te
.mtype
= TlbEntry::MemoryType::Normal
;
1087 te
.outerAttrs
= bits(texcb
, 1, 0);
1090 panic("Reserved texcb value!\n");
1092 case 6: // Implementation Defined
1093 panic("Implementation-defined texcb value!\n");
1095 case 7: // Outer and Inner Write-Back, Write-Allocate
1096 te
.mtype
= TlbEntry::MemoryType::Normal
;
1101 case 8: // Non-shareable Device
1102 te
.nonCacheable
= true;
1103 te
.mtype
= TlbEntry::MemoryType::Device
;
1104 te
.shareable
= false;
1108 case 9 ... 15: // Reserved
1109 panic("Reserved texcb value!\n");
1111 case 16 ... 31: // Cacheable Memory
1112 te
.mtype
= TlbEntry::MemoryType::Normal
;
1114 if (bits(texcb
, 1,0) == 0 || bits(texcb
, 3,2) == 0)
1115 te
.nonCacheable
= true;
1116 te
.innerAttrs
= bits(texcb
, 1, 0);
1117 te
.outerAttrs
= bits(texcb
, 3, 2);
1120 panic("More than 32 states for 5 bits?\n");
1124 PRRR prrr
= tc
->readMiscReg(snsBankedIndex(MISCREG_PRRR
,
1125 currState
->tc
, !currState
->isSecure
));
1126 NMRR nmrr
= tc
->readMiscReg(snsBankedIndex(MISCREG_NMRR
,
1127 currState
->tc
, !currState
->isSecure
));
1128 DPRINTF(TLBVerbose
, "memAttrs PRRR:%08x NMRR:%08x\n", prrr
, nmrr
);
1129 uint8_t curr_tr
= 0, curr_ir
= 0, curr_or
= 0;
1130 switch(bits(texcb
, 2,0)) {
1135 te
.outerShareable
= (prrr
.nos0
== 0);
1141 te
.outerShareable
= (prrr
.nos1
== 0);
1147 te
.outerShareable
= (prrr
.nos2
== 0);
1153 te
.outerShareable
= (prrr
.nos3
== 0);
1159 te
.outerShareable
= (prrr
.nos4
== 0);
1165 te
.outerShareable
= (prrr
.nos5
== 0);
1168 panic("Imp defined type\n");
1173 te
.outerShareable
= (prrr
.nos7
== 0);
1179 DPRINTF(TLBVerbose
, "StronglyOrdered\n");
1180 te
.mtype
= TlbEntry::MemoryType::StronglyOrdered
;
1181 te
.nonCacheable
= true;
1184 te
.shareable
= true;
1187 DPRINTF(TLBVerbose
, "Device ds1:%d ds0:%d s:%d\n",
1188 prrr
.ds1
, prrr
.ds0
, s
);
1189 te
.mtype
= TlbEntry::MemoryType::Device
;
1190 te
.nonCacheable
= true;
1194 te
.shareable
= true;
1196 te
.shareable
= true;
1199 DPRINTF(TLBVerbose
, "Normal ns1:%d ns0:%d s:%d\n",
1200 prrr
.ns1
, prrr
.ns0
, s
);
1201 te
.mtype
= TlbEntry::MemoryType::Normal
;
1203 te
.shareable
= true;
1205 te
.shareable
= true;
1208 panic("Reserved type");
1211 if (te
.mtype
== TlbEntry::MemoryType::Normal
){
1214 te
.nonCacheable
= true;
1230 te
.nonCacheable
= true;
1245 DPRINTF(TLBVerbose
, "memAttrs: shareable: %d, innerAttrs: %d, "
1247 te
.shareable
, te
.innerAttrs
, te
.outerAttrs
);
1248 te
.setAttributes(false);
1252 TableWalker::memAttrsLPAE(ThreadContext
*tc
, TlbEntry
&te
,
1253 LongDescriptor
&lDescriptor
)
1258 uint8_t sh
= lDescriptor
.sh();
1259 // Different format and source of attributes if this is a stage 2
1262 attr
= lDescriptor
.memAttr();
1263 uint8_t attr_3_2
= (attr
>> 2) & 0x3;
1264 uint8_t attr_1_0
= attr
& 0x3;
1266 DPRINTF(TLBVerbose
, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr
, sh
);
1268 if (attr_3_2
== 0) {
1269 te
.mtype
= attr_1_0
== 0 ? TlbEntry::MemoryType::StronglyOrdered
1270 : TlbEntry::MemoryType::Device
;
1272 te
.innerAttrs
= attr_1_0
== 0 ? 1 : 3;
1273 te
.nonCacheable
= true;
1275 te
.mtype
= TlbEntry::MemoryType::Normal
;
1276 te
.outerAttrs
= attr_3_2
== 1 ? 0 :
1277 attr_3_2
== 2 ? 2 : 1;
1278 te
.innerAttrs
= attr_1_0
== 1 ? 0 :
1279 attr_1_0
== 2 ? 6 : 5;
1280 te
.nonCacheable
= (attr_3_2
== 1) || (attr_1_0
== 1);
1283 uint8_t attrIndx
= lDescriptor
.attrIndx();
1285 // LPAE always uses remapping of memory attributes, irrespective of the
1286 // value of SCTLR.TRE
1287 MiscRegIndex reg
= attrIndx
& 0x4 ? MISCREG_MAIR1
: MISCREG_MAIR0
;
1288 int reg_as_int
= snsBankedIndex(reg
, currState
->tc
,
1289 !currState
->isSecure
);
1290 uint32_t mair
= currState
->tc
->readMiscReg(reg_as_int
);
1291 attr
= (mair
>> (8 * (attrIndx
% 4))) & 0xff;
1292 uint8_t attr_7_4
= bits(attr
, 7, 4);
1293 uint8_t attr_3_0
= bits(attr
, 3, 0);
1294 DPRINTF(TLBVerbose
, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx
, sh
, attr
);
1296 // Note: the memory subsystem only cares about the 'cacheable' memory
1297 // attribute. The other attributes are only used to fill the PAR register
1298 // accordingly to provide the illusion of full support
1299 te
.nonCacheable
= false;
1303 // Strongly-ordered or Device memory
1304 if (attr_3_0
== 0x0)
1305 te
.mtype
= TlbEntry::MemoryType::StronglyOrdered
;
1306 else if (attr_3_0
== 0x4)
1307 te
.mtype
= TlbEntry::MemoryType::Device
;
1309 panic("Unpredictable behavior\n");
1310 te
.nonCacheable
= true;
1314 // Normal memory, Outer Non-cacheable
1315 te
.mtype
= TlbEntry::MemoryType::Normal
;
1317 if (attr_3_0
== 0x4)
1318 // Inner Non-cacheable
1319 te
.nonCacheable
= true;
1320 else if (attr_3_0
< 0x8)
1321 panic("Unpredictable behavior\n");
1331 if (attr_7_4
& 0x4) {
1332 te
.outerAttrs
= (attr_7_4
& 1) ? 1 : 3;
1334 te
.outerAttrs
= 0x2;
1336 // Normal memory, Outer Cacheable
1337 te
.mtype
= TlbEntry::MemoryType::Normal
;
1338 if (attr_3_0
!= 0x4 && attr_3_0
< 0x8)
1339 panic("Unpredictable behavior\n");
1342 panic("Unpredictable behavior\n");
1348 te
.innerAttrs
= 0x1;
1351 te
.innerAttrs
= attr_7_4
== 0 ? 0x3 : 0;
1363 te
.innerAttrs
= attr_3_0
& 1 ? 0x5 : 0x7;
1366 panic("Unpredictable behavior\n");
1371 te
.outerShareable
= sh
== 2;
1372 te
.shareable
= (sh
& 0x2) ? true : false;
1373 te
.setAttributes(true);
1374 te
.attributes
|= (uint64_t) attr
<< 56;
1378 TableWalker::memAttrsAArch64(ThreadContext
*tc
, TlbEntry
&te
,
1379 LongDescriptor
&lDescriptor
)
1384 uint8_t sh
= lDescriptor
.sh();
1387 attr
= lDescriptor
.memAttr();
1388 uint8_t attr_hi
= (attr
>> 2) & 0x3;
1389 uint8_t attr_lo
= attr
& 0x3;
1391 DPRINTF(TLBVerbose
, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr
, sh
);
1394 te
.mtype
= attr_lo
== 0 ? TlbEntry::MemoryType::StronglyOrdered
1395 : TlbEntry::MemoryType::Device
;
1397 te
.innerAttrs
= attr_lo
== 0 ? 1 : 3;
1398 te
.nonCacheable
= true;
1400 te
.mtype
= TlbEntry::MemoryType::Normal
;
1401 te
.outerAttrs
= attr_hi
== 1 ? 0 :
1402 attr_hi
== 2 ? 2 : 1;
1403 te
.innerAttrs
= attr_lo
== 1 ? 0 :
1404 attr_lo
== 2 ? 6 : 5;
1405 // Treat write-through memory as uncacheable, this is safe
1406 // but for performance reasons not optimal.
1407 te
.nonCacheable
= (attr_hi
== 1) || (attr_hi
== 2) ||
1408 (attr_lo
== 1) || (attr_lo
== 2);
1411 uint8_t attrIndx
= lDescriptor
.attrIndx();
1413 DPRINTF(TLBVerbose
, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx
, sh
);
1417 switch (currState
->el
) {
1420 mair
= tc
->readMiscReg(MISCREG_MAIR_EL1
);
1423 mair
= tc
->readMiscReg(MISCREG_MAIR_EL2
);
1426 mair
= tc
->readMiscReg(MISCREG_MAIR_EL3
);
1429 panic("Invalid exception level");
1433 // Select attributes
1434 attr
= bits(mair
, 8 * attrIndx
+ 7, 8 * attrIndx
);
1435 attr_lo
= bits(attr
, 3, 0);
1436 attr_hi
= bits(attr
, 7, 4);
1439 te
.mtype
= attr_hi
== 0 ? TlbEntry::MemoryType::Device
: TlbEntry::MemoryType::Normal
;
1442 te
.nonCacheable
= false;
1443 if (te
.mtype
== TlbEntry::MemoryType::Device
) { // Device memory
1444 te
.nonCacheable
= true;
1446 // Treat write-through memory as uncacheable, this is safe
1447 // but for performance reasons not optimal.
1449 case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1450 case 0x4: // Normal memory, Outer Non-cacheable
1451 case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1452 te
.nonCacheable
= true;
1455 case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1456 case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1457 warn_if(!attr_hi
, "Unpredictable behavior");
1459 case 0x4: // Device-nGnRE memory or
1460 // Normal memory, Inner Non-cacheable
1461 case 0x8: // Device-nGRE memory or
1462 // Normal memory, Inner Write-through non-transient
1463 te
.nonCacheable
= true;
1466 te
.shareable
= sh
== 2;
1467 te
.outerShareable
= (sh
& 0x2) ? true : false;
1468 // Attributes formatted according to the 64-bit PAR
1469 te
.attributes
= ((uint64_t) attr
<< 56) |
1470 (1 << 11) | // LPAE bit
1471 (te
.ns
<< 9) | // NS bit
1477 TableWalker::doL1Descriptor()
1479 if (currState
->fault
!= NoFault
) {
1483 currState
->l1Desc
.data
= htog(currState
->l1Desc
.data
,
1484 byteOrder(currState
->tc
));
1486 DPRINTF(TLB
, "L1 descriptor for %#x is %#x\n",
1487 currState
->vaddr_tainted
, currState
->l1Desc
.data
);
1490 const bool is_atomic
= currState
->req
->isAtomic();
1492 switch (currState
->l1Desc
.type()) {
1493 case L1Descriptor::Ignore
:
1494 case L1Descriptor::Reserved
:
1495 if (!currState
->timing
) {
1496 currState
->tc
= NULL
;
1497 currState
->req
= NULL
;
1499 DPRINTF(TLB
, "L1 Descriptor Reserved/Ignore, causing fault\n");
1500 if (currState
->isFetch
)
1502 std::make_shared
<PrefetchAbort
>(
1503 currState
->vaddr_tainted
,
1504 ArmFault::TranslationLL
+ L1
,
1506 ArmFault::VmsaTran
);
1509 std::make_shared
<DataAbort
>(
1510 currState
->vaddr_tainted
,
1511 TlbEntry::DomainType::NoAccess
,
1512 is_atomic
? false : currState
->isWrite
,
1513 ArmFault::TranslationLL
+ L1
, isStage2
,
1514 ArmFault::VmsaTran
);
1516 case L1Descriptor::Section
:
1517 if (currState
->sctlr
.afe
&& bits(currState
->l1Desc
.ap(), 0) == 0) {
1518 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1519 * enabled if set, do l1.Desc.setAp0() instead of generating
1523 currState
->fault
= std::make_shared
<DataAbort
>(
1524 currState
->vaddr_tainted
,
1525 currState
->l1Desc
.domain(),
1526 is_atomic
? false : currState
->isWrite
,
1527 ArmFault::AccessFlagLL
+ L1
,
1529 ArmFault::VmsaTran
);
1531 if (currState
->l1Desc
.supersection()) {
1532 panic("Haven't implemented supersections\n");
1534 insertTableEntry(currState
->l1Desc
, false);
1536 case L1Descriptor::PageTable
:
1539 l2desc_addr
= currState
->l1Desc
.l2Addr() |
1540 (bits(currState
->vaddr
, 19, 12) << 2);
1541 DPRINTF(TLB
, "L1 descriptor points to page table at: %#x (%s)\n",
1542 l2desc_addr
, currState
->isSecure
? "s" : "ns");
1544 // Trickbox address check
1545 currState
->fault
= testWalk(l2desc_addr
, sizeof(uint32_t),
1546 currState
->l1Desc
.domain(), L2
);
1548 if (currState
->fault
) {
1549 if (!currState
->timing
) {
1550 currState
->tc
= NULL
;
1551 currState
->req
= NULL
;
1556 Request::Flags flag
= Request::PT_WALK
;
1558 if (currState
->sctlr
.c
== 0 || currState
->isUncacheable
) {
1559 flag
.set(Request::UNCACHEABLE
);
1562 if (currState
->isSecure
)
1563 flag
.set(Request::SECURE
);
1566 delayed
= fetchDescriptor(l2desc_addr
,
1567 (uint8_t*)&currState
->l2Desc
.data
,
1568 sizeof(uint32_t), flag
, -1, &doL2DescEvent
,
1569 &TableWalker::doL2Descriptor
);
1571 currState
->delayed
= true;
1577 panic("A new type in a 2 bit field?\n");
1582 TableWalker::generateLongDescFault(ArmFault::FaultSource src
)
1584 if (currState
->isFetch
) {
1585 return std::make_shared
<PrefetchAbort
>(
1586 currState
->vaddr_tainted
,
1587 src
+ currState
->longDesc
.lookupLevel
,
1589 ArmFault::LpaeTran
);
1591 return std::make_shared
<DataAbort
>(
1592 currState
->vaddr_tainted
,
1593 TlbEntry::DomainType::NoAccess
,
1594 currState
->req
->isAtomic() ? false : currState
->isWrite
,
1595 src
+ currState
->longDesc
.lookupLevel
,
1597 ArmFault::LpaeTran
);
1602 TableWalker::doLongDescriptor()
1604 if (currState
->fault
!= NoFault
) {
1608 currState
->longDesc
.data
= htog(currState
->longDesc
.data
,
1609 byteOrder(currState
->tc
));
1611 DPRINTF(TLB
, "L%d descriptor for %#llx is %#llx (%s)\n",
1612 currState
->longDesc
.lookupLevel
, currState
->vaddr_tainted
,
1613 currState
->longDesc
.data
,
1614 currState
->aarch64
? "AArch64" : "long-desc.");
1616 if ((currState
->longDesc
.type() == LongDescriptor::Block
) ||
1617 (currState
->longDesc
.type() == LongDescriptor::Page
)) {
1618 DPRINTF(TLBVerbose
, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1619 "xn: %d, ap: %d, af: %d, type: %d\n",
1620 currState
->longDesc
.lookupLevel
,
1621 currState
->longDesc
.data
,
1622 currState
->longDesc
.pxn(),
1623 currState
->longDesc
.xn(),
1624 currState
->longDesc
.ap(),
1625 currState
->longDesc
.af(),
1626 currState
->longDesc
.type());
1628 DPRINTF(TLBVerbose
, "Analyzing L%d descriptor: %#llx, type: %d\n",
1629 currState
->longDesc
.lookupLevel
,
1630 currState
->longDesc
.data
,
1631 currState
->longDesc
.type());
1636 switch (currState
->longDesc
.type()) {
1637 case LongDescriptor::Invalid
:
1638 DPRINTF(TLB
, "L%d descriptor Invalid, causing fault type %d\n",
1639 currState
->longDesc
.lookupLevel
,
1640 ArmFault::TranslationLL
+ currState
->longDesc
.lookupLevel
);
1642 currState
->fault
= generateLongDescFault(ArmFault::TranslationLL
);
1643 if (!currState
->timing
) {
1644 currState
->tc
= NULL
;
1645 currState
->req
= NULL
;
1649 case LongDescriptor::Block
:
1650 case LongDescriptor::Page
:
1652 auto fault_source
= ArmFault::FaultSourceInvalid
;
1653 // Check for address size fault
1654 if (checkAddrSizeFaultAArch64(
1655 mbits(currState
->longDesc
.data
, MaxPhysAddrRange
- 1,
1656 currState
->longDesc
.offsetBits()),
1657 currState
->physAddrRange
)) {
1659 DPRINTF(TLB
, "L%d descriptor causing Address Size Fault\n",
1660 currState
->longDesc
.lookupLevel
);
1661 fault_source
= ArmFault::AddressSizeLL
;
1663 // Check for access fault
1664 } else if (currState
->longDesc
.af() == 0) {
1666 DPRINTF(TLB
, "L%d descriptor causing Access Fault\n",
1667 currState
->longDesc
.lookupLevel
);
1668 fault_source
= ArmFault::AccessFlagLL
;
1671 if (fault_source
!= ArmFault::FaultSourceInvalid
) {
1672 currState
->fault
= generateLongDescFault(fault_source
);
1674 insertTableEntry(currState
->longDesc
, true);
1678 case LongDescriptor::Table
:
1680 // Set hierarchical permission flags
1681 currState
->secureLookup
= currState
->secureLookup
&&
1682 currState
->longDesc
.secureTable();
1683 currState
->rwTable
= currState
->rwTable
&&
1684 (currState
->longDesc
.rwTable() || currState
->hpd
);
1685 currState
->userTable
= currState
->userTable
&&
1686 (currState
->longDesc
.userTable() || currState
->hpd
);
1687 currState
->xnTable
= currState
->xnTable
||
1688 (currState
->longDesc
.xnTable() && !currState
->hpd
);
1689 currState
->pxnTable
= currState
->pxnTable
||
1690 (currState
->longDesc
.pxnTable() && !currState
->hpd
);
1692 // Set up next level lookup
1693 Addr next_desc_addr
= currState
->longDesc
.nextDescAddr(
1696 DPRINTF(TLB
, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1697 currState
->longDesc
.lookupLevel
,
1698 currState
->longDesc
.lookupLevel
+ 1,
1700 currState
->secureLookup
? "s" : "ns");
1702 // Check for address size fault
1703 if (currState
->aarch64
&& checkAddrSizeFaultAArch64(
1704 next_desc_addr
, currState
->physAddrRange
)) {
1705 DPRINTF(TLB
, "L%d descriptor causing Address Size Fault\n",
1706 currState
->longDesc
.lookupLevel
);
1708 currState
->fault
= generateLongDescFault(
1709 ArmFault::AddressSizeLL
);
1713 // Trickbox address check
1714 currState
->fault
= testWalk(
1715 next_desc_addr
, sizeof(uint64_t), TlbEntry::DomainType::Client
,
1716 toLookupLevel(currState
->longDesc
.lookupLevel
+1));
1718 if (currState
->fault
) {
1719 if (!currState
->timing
) {
1720 currState
->tc
= NULL
;
1721 currState
->req
= NULL
;
1726 Request::Flags flag
= Request::PT_WALK
;
1727 if (currState
->secureLookup
)
1728 flag
.set(Request::SECURE
);
1730 if (currState
->sctlr
.c
== 0 || currState
->isUncacheable
) {
1731 flag
.set(Request::UNCACHEABLE
);
1734 LookupLevel L
= currState
->longDesc
.lookupLevel
=
1735 (LookupLevel
) (currState
->longDesc
.lookupLevel
+ 1);
1736 Event
*event
= NULL
;
1739 assert(currState
->aarch64
);
1742 event
= LongDescEventByLevel
[L
];
1745 panic("Wrong lookup level in table walk\n");
1750 delayed
= fetchDescriptor(next_desc_addr
, (uint8_t*)&currState
->longDesc
.data
,
1751 sizeof(uint64_t), flag
, -1, event
,
1752 &TableWalker::doLongDescriptor
);
1754 currState
->delayed
= true;
1759 panic("A new type in a 2 bit field?\n");
1764 TableWalker::doL2Descriptor()
1766 if (currState
->fault
!= NoFault
) {
1770 currState
->l2Desc
.data
= htog(currState
->l2Desc
.data
,
1771 byteOrder(currState
->tc
));
1773 DPRINTF(TLB
, "L2 descriptor for %#x is %#x\n",
1774 currState
->vaddr_tainted
, currState
->l2Desc
.data
);
1777 const bool is_atomic
= currState
->req
->isAtomic();
1779 if (currState
->l2Desc
.invalid()) {
1780 DPRINTF(TLB
, "L2 descriptor invalid, causing fault\n");
1781 if (!currState
->timing
) {
1782 currState
->tc
= NULL
;
1783 currState
->req
= NULL
;
1785 if (currState
->isFetch
)
1786 currState
->fault
= std::make_shared
<PrefetchAbort
>(
1787 currState
->vaddr_tainted
,
1788 ArmFault::TranslationLL
+ L2
,
1790 ArmFault::VmsaTran
);
1792 currState
->fault
= std::make_shared
<DataAbort
>(
1793 currState
->vaddr_tainted
, currState
->l1Desc
.domain(),
1794 is_atomic
? false : currState
->isWrite
,
1795 ArmFault::TranslationLL
+ L2
,
1797 ArmFault::VmsaTran
);
1801 if (currState
->sctlr
.afe
&& bits(currState
->l2Desc
.ap(), 0) == 0) {
1802 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1803 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1805 DPRINTF(TLB
, "Generating access fault at L2, afe: %d, ap: %d\n",
1806 currState
->sctlr
.afe
, currState
->l2Desc
.ap());
1808 currState
->fault
= std::make_shared
<DataAbort
>(
1809 currState
->vaddr_tainted
,
1810 TlbEntry::DomainType::NoAccess
,
1811 is_atomic
? false : currState
->isWrite
,
1812 ArmFault::AccessFlagLL
+ L2
, isStage2
,
1813 ArmFault::VmsaTran
);
1816 insertTableEntry(currState
->l2Desc
, false);
1820 TableWalker::doL1DescriptorWrapper()
1822 currState
= stateQueues
[L1
].front();
1823 currState
->delayed
= false;
1824 // if there's a stage2 translation object we don't need it any more
1825 if (currState
->stage2Tran
) {
1826 delete currState
->stage2Tran
;
1827 currState
->stage2Tran
= NULL
;
1831 DPRINTF(TLBVerbose
, "L1 Desc object host addr: %p\n",&currState
->l1Desc
.data
);
1832 DPRINTF(TLBVerbose
, "L1 Desc object data: %08x\n",currState
->l1Desc
.data
);
1834 DPRINTF(TLBVerbose
, "calling doL1Descriptor for vaddr:%#x\n", currState
->vaddr_tainted
);
1837 stateQueues
[L1
].pop_front();
1838 // Check if fault was generated
1839 if (currState
->fault
!= NoFault
) {
1840 currState
->transState
->finish(currState
->fault
, currState
->req
,
1841 currState
->tc
, currState
->mode
);
1842 statWalksShortTerminatedAtLevel
[0]++;
1845 nextWalk(currState
->tc
);
1847 currState
->req
= NULL
;
1848 currState
->tc
= NULL
;
1849 currState
->delayed
= false;
1852 else if (!currState
->delayed
) {
1853 // delay is not set so there is no L2 to do
1854 // Don't finish the translation if a stage 2 look up is underway
1855 statWalkServiceTime
.sample(curTick() - currState
->startTime
);
1856 DPRINTF(TLBVerbose
, "calling translateTiming again\n");
1857 tlb
->translateTiming(currState
->req
, currState
->tc
,
1858 currState
->transState
, currState
->mode
);
1859 statWalksShortTerminatedAtLevel
[0]++;
1862 nextWalk(currState
->tc
);
1864 currState
->req
= NULL
;
1865 currState
->tc
= NULL
;
1866 currState
->delayed
= false;
1869 // need to do L2 descriptor
1870 stateQueues
[L2
].push_back(currState
);
1876 TableWalker::doL2DescriptorWrapper()
1878 currState
= stateQueues
[L2
].front();
1879 assert(currState
->delayed
);
1880 // if there's a stage2 translation object we don't need it any more
1881 if (currState
->stage2Tran
) {
1882 delete currState
->stage2Tran
;
1883 currState
->stage2Tran
= NULL
;
1886 DPRINTF(TLBVerbose
, "calling doL2Descriptor for vaddr:%#x\n",
1887 currState
->vaddr_tainted
);
1890 // Check if fault was generated
1891 if (currState
->fault
!= NoFault
) {
1892 currState
->transState
->finish(currState
->fault
, currState
->req
,
1893 currState
->tc
, currState
->mode
);
1894 statWalksShortTerminatedAtLevel
[1]++;
1896 statWalkServiceTime
.sample(curTick() - currState
->startTime
);
1897 DPRINTF(TLBVerbose
, "calling translateTiming again\n");
1898 tlb
->translateTiming(currState
->req
, currState
->tc
,
1899 currState
->transState
, currState
->mode
);
1900 statWalksShortTerminatedAtLevel
[1]++;
1904 stateQueues
[L2
].pop_front();
1906 nextWalk(currState
->tc
);
1908 currState
->req
= NULL
;
1909 currState
->tc
= NULL
;
1910 currState
->delayed
= false;
1917 TableWalker::doL0LongDescriptorWrapper()
1919 doLongDescriptorWrapper(L0
);
1923 TableWalker::doL1LongDescriptorWrapper()
1925 doLongDescriptorWrapper(L1
);
1929 TableWalker::doL2LongDescriptorWrapper()
1931 doLongDescriptorWrapper(L2
);
1935 TableWalker::doL3LongDescriptorWrapper()
1937 doLongDescriptorWrapper(L3
);
1941 TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level
)
1943 currState
= stateQueues
[curr_lookup_level
].front();
1944 assert(curr_lookup_level
== currState
->longDesc
.lookupLevel
);
1945 currState
->delayed
= false;
1947 // if there's a stage2 translation object we don't need it any more
1948 if (currState
->stage2Tran
) {
1949 delete currState
->stage2Tran
;
1950 currState
->stage2Tran
= NULL
;
1953 DPRINTF(TLBVerbose
, "calling doLongDescriptor for vaddr:%#x\n",
1954 currState
->vaddr_tainted
);
1957 stateQueues
[curr_lookup_level
].pop_front();
1959 if (currState
->fault
!= NoFault
) {
1960 // A fault was generated
1961 currState
->transState
->finish(currState
->fault
, currState
->req
,
1962 currState
->tc
, currState
->mode
);
1965 nextWalk(currState
->tc
);
1967 currState
->req
= NULL
;
1968 currState
->tc
= NULL
;
1969 currState
->delayed
= false;
1971 } else if (!currState
->delayed
) {
1972 // No additional lookups required
1973 DPRINTF(TLBVerbose
, "calling translateTiming again\n");
1974 statWalkServiceTime
.sample(curTick() - currState
->startTime
);
1975 tlb
->translateTiming(currState
->req
, currState
->tc
,
1976 currState
->transState
, currState
->mode
);
1977 statWalksLongTerminatedAtLevel
[(unsigned) curr_lookup_level
]++;
1980 nextWalk(currState
->tc
);
1982 currState
->req
= NULL
;
1983 currState
->tc
= NULL
;
1984 currState
->delayed
= false;
1987 if (curr_lookup_level
>= MAX_LOOKUP_LEVELS
- 1)
1988 panic("Max. number of lookups already reached in table walk\n");
1989 // Need to perform additional lookups
1990 stateQueues
[currState
->longDesc
.lookupLevel
].push_back(currState
);
1997 TableWalker::nextWalk(ThreadContext
*tc
)
1999 if (pendingQueue
.size())
2000 schedule(doProcessEvent
, clockEdge(Cycles(1)));
2006 TableWalker::fetchDescriptor(Addr descAddr
, uint8_t *data
, int numBytes
,
2007 Request::Flags flags
, int queueIndex
, Event
*event
,
2008 void (TableWalker::*doDescriptor
)())
2010 bool isTiming
= currState
->timing
;
2012 DPRINTF(TLBVerbose
, "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2013 descAddr
, currState
->stage2Req
);
2015 // If this translation has a stage 2 then we know descAddr is an IPA and
2016 // needs to be translated before we can access the page table. Do that
2018 if (currState
->stage2Req
) {
2020 flags
= flags
| TLB::MustBeOne
;
2023 Stage2MMU::Stage2Translation
*tran
= new
2024 Stage2MMU::Stage2Translation(*stage2Mmu
, data
, event
,
2026 currState
->stage2Tran
= tran
;
2027 stage2Mmu
->readDataTimed(currState
->tc
, descAddr
, tran
, numBytes
,
2029 fault
= tran
->fault
;
2031 fault
= stage2Mmu
->readDataUntimed(currState
->tc
,
2032 currState
->vaddr
, descAddr
, data
, numBytes
, flags
,
2033 currState
->functional
);
2036 if (fault
!= NoFault
) {
2037 currState
->fault
= fault
;
2040 if (queueIndex
>= 0) {
2041 DPRINTF(TLBVerbose
, "Adding to walker fifo: queue size before adding: %d\n",
2042 stateQueues
[queueIndex
].size());
2043 stateQueues
[queueIndex
].push_back(currState
);
2047 (this->*doDescriptor
)();
2051 port
->dmaAction(MemCmd::ReadReq
, descAddr
, numBytes
, event
, data
,
2052 currState
->tc
->getCpuPtr()->clockPeriod(),flags
);
2053 if (queueIndex
>= 0) {
2054 DPRINTF(TLBVerbose
, "Adding to walker fifo: queue size before adding: %d\n",
2055 stateQueues
[queueIndex
].size());
2056 stateQueues
[queueIndex
].push_back(currState
);
2059 } else if (!currState
->functional
) {
2060 port
->dmaAction(MemCmd::ReadReq
, descAddr
, numBytes
, NULL
, data
,
2061 currState
->tc
->getCpuPtr()->clockPeriod(), flags
);
2062 (this->*doDescriptor
)();
2064 RequestPtr req
= std::make_shared
<Request
>(
2065 descAddr
, numBytes
, flags
, masterId
);
2067 req
->taskId(ContextSwitchTaskId::DMA
);
2068 PacketPtr pkt
= new Packet(req
, MemCmd::ReadReq
);
2069 pkt
->dataStatic(data
);
2070 port
->sendFunctional(pkt
);
2071 (this->*doDescriptor
)();
2079 TableWalker::insertTableEntry(DescriptorBase
&descriptor
, bool longDescriptor
)
2083 // Create and fill a new page table entry
2085 te
.longDescFormat
= longDescriptor
;
2086 te
.isHyp
= currState
->isHyp
;
2087 te
.asid
= currState
->asid
;
2088 te
.vmid
= currState
->vmid
;
2089 te
.N
= descriptor
.offsetBits();
2090 te
.vpn
= currState
->vaddr
>> te
.N
;
2091 te
.size
= (1<<te
.N
) - 1;
2092 te
.pfn
= descriptor
.pfn();
2093 te
.domain
= descriptor
.domain();
2094 te
.lookupLevel
= descriptor
.lookupLevel
;
2095 te
.ns
= !descriptor
.secure(haveSecurity
, currState
) || isStage2
;
2096 te
.nstid
= !currState
->isSecure
;
2097 te
.xn
= descriptor
.xn();
2098 if (currState
->aarch64
)
2099 te
.el
= currState
->el
;
2103 statPageSizes
[pageSizeNtoStatBin(te
.N
)]++;
2104 statRequestOrigin
[COMPLETED
][currState
->isFetch
]++;
2106 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2108 te
.global
= descriptor
.global(currState
) || isStage2
;
2109 if (longDescriptor
) {
2110 LongDescriptor lDescriptor
=
2111 dynamic_cast<LongDescriptor
&>(descriptor
);
2113 te
.xn
|= currState
->xnTable
;
2114 te
.pxn
= currState
->pxnTable
|| lDescriptor
.pxn();
2116 // this is actually the HAP field, but its stored in the same bit
2117 // possitions as the AP field in a stage 1 translation.
2118 te
.hap
= lDescriptor
.ap();
2120 te
.ap
= ((!currState
->rwTable
|| descriptor
.ap() >> 1) << 1) |
2121 (currState
->userTable
&& (descriptor
.ap() & 0x1));
2123 if (currState
->aarch64
)
2124 memAttrsAArch64(currState
->tc
, te
, lDescriptor
);
2126 memAttrsLPAE(currState
->tc
, te
, lDescriptor
);
2128 te
.ap
= descriptor
.ap();
2129 memAttrs(currState
->tc
, te
, currState
->sctlr
, descriptor
.texcb(),
2130 descriptor
.shareable());
2134 DPRINTF(TLB
, descriptor
.dbgHeader().c_str());
2135 DPRINTF(TLB
, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2136 te
.N
, te
.pfn
, te
.size
, te
.global
, te
.valid
);
2137 DPRINTF(TLB
, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2138 "vmid:%d hyp:%d nc:%d ns:%d\n", te
.vpn
, te
.xn
, te
.pxn
,
2139 te
.ap
, static_cast<uint8_t>(te
.domain
), te
.asid
, te
.vmid
, te
.isHyp
,
2140 te
.nonCacheable
, te
.ns
);
2141 DPRINTF(TLB
, " - domain from L%d desc:%d data:%#x\n",
2142 descriptor
.lookupLevel
, static_cast<uint8_t>(descriptor
.domain()),
2143 descriptor
.getRawData());
2145 // Insert the entry into the TLB
2146 tlb
->insert(currState
->vaddr
, te
);
2147 if (!currState
->timing
) {
2148 currState
->tc
= NULL
;
2149 currState
->req
= NULL
;
2153 ArmISA::TableWalker
*
2154 ArmTableWalkerParams::create()
2156 return new ArmISA::TableWalker(this);
2160 TableWalker::toLookupLevel(uint8_t lookup_level_as_int
)
2162 switch (lookup_level_as_int
) {
2170 panic("Invalid lookup level conversion");
2174 /* this method keeps track of the table walker queue's residency, so
2175 * needs to be called whenever requests start and complete. */
2177 TableWalker::pendingChange()
2179 unsigned n
= pendingQueue
.size();
2180 if ((currState
!= NULL
) && (currState
!= pendingQueue
.front())) {
2184 if (n
!= pendingReqs
) {
2185 Tick now
= curTick();
2186 statPendingWalks
.sample(pendingReqs
, now
- pendingChangeTick
);
2188 pendingChangeTick
= now
;
2193 TableWalker::testWalk(Addr pa
, Addr size
, TlbEntry::DomainType domain
,
2194 LookupLevel lookup_level
)
2196 return tlb
->testWalk(pa
, size
, currState
->vaddr
, currState
->isSecure
,
2197 currState
->mode
, domain
, lookup_level
);
2202 TableWalker::pageSizeNtoStatBin(uint8_t N
)
2204 /* for statPageSizes */
2206 case 12: return 0; // 4K
2207 case 14: return 1; // 16K (using 16K granule in v8-64)
2208 case 16: return 2; // 64K
2209 case 20: return 3; // 1M
2210 case 21: return 4; // 2M-LPAE
2211 case 24: return 5; // 16M
2212 case 25: return 6; // 32M (using 16K granule in v8-64)
2213 case 29: return 7; // 512M (using 64K granule in v8-64)
2214 case 30: return 8; // 1G-LPAE
2216 panic("unknown page size");
2222 TableWalker::regStats()
2224 ClockedObject::regStats();
2227 .name(name() + ".walks")
2228 .desc("Table walker walks requested")
2231 statWalksShortDescriptor
2232 .name(name() + ".walksShort")
2233 .desc("Table walker walks initiated with short descriptors")
2234 .flags(Stats::nozero
)
2237 statWalksLongDescriptor
2238 .name(name() + ".walksLong")
2239 .desc("Table walker walks initiated with long descriptors")
2240 .flags(Stats::nozero
)
2243 statWalksShortTerminatedAtLevel
2245 .name(name() + ".walksShortTerminationLevel")
2246 .desc("Level at which table walker walks "
2247 "with short descriptors terminate")
2248 .flags(Stats::nozero
)
2250 statWalksShortTerminatedAtLevel
.subname(0, "Level1");
2251 statWalksShortTerminatedAtLevel
.subname(1, "Level2");
2253 statWalksLongTerminatedAtLevel
2255 .name(name() + ".walksLongTerminationLevel")
2256 .desc("Level at which table walker walks "
2257 "with long descriptors terminate")
2258 .flags(Stats::nozero
)
2260 statWalksLongTerminatedAtLevel
.subname(0, "Level0");
2261 statWalksLongTerminatedAtLevel
.subname(1, "Level1");
2262 statWalksLongTerminatedAtLevel
.subname(2, "Level2");
2263 statWalksLongTerminatedAtLevel
.subname(3, "Level3");
2266 .name(name() + ".walksSquashedBefore")
2267 .desc("Table walks squashed before starting")
2268 .flags(Stats::nozero
)
2272 .name(name() + ".walksSquashedAfter")
2273 .desc("Table walks squashed after completion")
2274 .flags(Stats::nozero
)
2279 .name(name() + ".walkWaitTime")
2280 .desc("Table walker wait (enqueue to first request) latency")
2281 .flags(Stats::pdf
| Stats::nozero
| Stats::nonan
)
2286 .name(name() + ".walkCompletionTime")
2287 .desc("Table walker service (enqueue to completion) latency")
2288 .flags(Stats::pdf
| Stats::nozero
| Stats::nonan
)
2293 .name(name() + ".walksPending")
2294 .desc("Table walker pending requests distribution")
2295 .flags(Stats::pdf
| Stats::dist
| Stats::nozero
| Stats::nonan
)
2298 statPageSizes
// see DDI 0487A D4-1661
2300 .name(name() + ".walkPageSizes")
2301 .desc("Table walker page sizes translated")
2302 .flags(Stats::total
| Stats::pdf
| Stats::dist
| Stats::nozero
)
2304 statPageSizes
.subname(0, "4K");
2305 statPageSizes
.subname(1, "16K");
2306 statPageSizes
.subname(2, "64K");
2307 statPageSizes
.subname(3, "1M");
2308 statPageSizes
.subname(4, "2M");
2309 statPageSizes
.subname(5, "16M");
2310 statPageSizes
.subname(6, "32M");
2311 statPageSizes
.subname(7, "512M");
2312 statPageSizes
.subname(8, "1G");
2315 .init(2,2) // Instruction/Data, requests/completed
2316 .name(name() + ".walkRequestOrigin")
2317 .desc("Table walker requests started/completed, data/inst")
2318 .flags(Stats::total
)
2320 statRequestOrigin
.subname(0,"Requested");
2321 statRequestOrigin
.subname(1,"Completed");
2322 statRequestOrigin
.ysubname(0,"Data");
2323 statRequestOrigin
.ysubname(1,"Inst");