2 * Copyright (c) 2010, 2012-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 #include "arch/arm/faults.hh"
44 #include "arch/arm/stage2_mmu.hh"
45 #include "arch/arm/system.hh"
46 #include "arch/arm/table_walker.hh"
47 #include "arch/arm/tlb.hh"
48 #include "cpu/base.hh"
49 #include "cpu/thread_context.hh"
50 #include "debug/Checkpoint.hh"
51 #include "debug/Drain.hh"
52 #include "debug/TLB.hh"
53 #include "debug/TLBVerbose.hh"
54 #include "sim/system.hh"
56 using namespace ArmISA
;
58 TableWalker::TableWalker(const Params
*p
)
59 : MemObject(p
), port(this, p
->sys
), drainManager(NULL
),
60 stage2Mmu(NULL
), isStage2(p
->is_stage2
), tlb(NULL
),
61 currState(NULL
), pending(false), masterId(p
->sys
->getMasterId(name())),
62 numSquashable(p
->num_squash_per_cycle
),
63 doL1DescEvent(this), doL2DescEvent(this),
64 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
65 doL3LongDescEvent(this),
70 // Cache system-level properties
72 armSys
= dynamic_cast<ArmSystem
*>(p
->sys
);
74 haveSecurity
= armSys
->haveSecurity();
75 _haveLPAE
= armSys
->haveLPAE();
76 _haveVirtualization
= armSys
->haveVirtualization();
77 physAddrRange
= armSys
->physAddrRange();
78 _haveLargeAsid64
= armSys
->haveLargeAsid64();
81 haveSecurity
= _haveLPAE
= _haveVirtualization
= false;
82 _haveLargeAsid64
= false;
88 TableWalker::~TableWalker()
93 TableWalker::WalkerState::WalkerState() : stage2Tran(NULL
), l2Desc(l1Desc
)
98 TableWalker::completeDrain()
100 if (drainManager
&& stateQueues
[L1
].empty() && stateQueues
[L2
].empty() &&
101 pendingQueue
.empty()) {
102 setDrainState(Drainable::Drained
);
103 DPRINTF(Drain
, "TableWalker done draining, processing drain event\n");
104 drainManager
->signalDrainDone();
110 TableWalker::drain(DrainManager
*dm
)
112 unsigned int count
= port
.drain(dm
);
114 bool state_queues_not_empty
= false;
116 for (int i
= 0; i
< MAX_LOOKUP_LEVELS
; ++i
) {
117 if (!stateQueues
[i
].empty()) {
118 state_queues_not_empty
= true;
123 if (state_queues_not_empty
|| pendingQueue
.size()) {
125 setDrainState(Drainable::Draining
);
126 DPRINTF(Drain
, "TableWalker not drained\n");
128 // return port drain count plus the table walker itself needs to drain
131 setDrainState(Drainable::Drained
);
132 DPRINTF(Drain
, "TableWalker free, no need to drain\n");
134 // table walker is drained, but its ports may still need to be drained
140 TableWalker::drainResume()
142 Drainable::drainResume();
143 if (params()->sys
->isTimingMode() && currState
) {
150 TableWalker::getMasterPort(const std::string
&if_name
, PortID idx
)
152 if (if_name
== "port") {
155 return MemObject::getMasterPort(if_name
, idx
);
159 TableWalker::walk(RequestPtr _req
, ThreadContext
*_tc
, uint16_t _asid
,
160 uint8_t _vmid
, bool _isHyp
, TLB::Mode _mode
,
161 TLB::Translation
*_trans
, bool _timing
, bool _functional
,
162 bool secure
, TLB::ArmTranslationType tranType
)
164 assert(!(_functional
&& _timing
));
165 WalkerState
*savedCurrState
= NULL
;
167 if (!currState
&& !_functional
) {
168 // For atomic mode, a new WalkerState instance should be only created
169 // once per TLB. For timing mode, a new instance is generated for every
171 DPRINTF(TLBVerbose
, "creating new instance of WalkerState\n");
173 currState
= new WalkerState();
174 currState
->tableWalker
= this;
175 } else if (_functional
) {
176 // If we are mixing functional mode with timing (or even
177 // atomic), we need to to be careful and clean up after
178 // ourselves to not risk getting into an inconsistent state.
179 DPRINTF(TLBVerbose
, "creating functional instance of WalkerState\n");
180 savedCurrState
= currState
;
181 currState
= new WalkerState();
182 currState
->tableWalker
= this;
183 } else if (_timing
) {
184 // This is a translation that was completed and then faulted again
185 // because some underlying parameters that affect the translation
186 // changed out from under us (e.g. asid). It will either be a
187 // misprediction, in which case nothing will happen or we'll use
188 // this fault to re-execute the faulting instruction which should clean
190 if (currState
->vaddr_tainted
== _req
->getVaddr()) {
191 return std::make_shared
<ReExec
>();
196 currState
->aarch64
= opModeIs64(currOpMode(_tc
));
197 currState
->el
= currEL(_tc
);
198 currState
->transState
= _trans
;
199 currState
->req
= _req
;
200 currState
->fault
= NoFault
;
201 currState
->asid
= _asid
;
202 currState
->vmid
= _vmid
;
203 currState
->isHyp
= _isHyp
;
204 currState
->timing
= _timing
;
205 currState
->functional
= _functional
;
206 currState
->mode
= _mode
;
207 currState
->tranType
= tranType
;
208 currState
->isSecure
= secure
;
209 currState
->physAddrRange
= physAddrRange
;
211 /** @todo These should be cached or grabbed from cached copies in
212 the TLB, all these miscreg reads are expensive */
213 currState
->vaddr_tainted
= currState
->req
->getVaddr();
214 if (currState
->aarch64
)
215 currState
->vaddr
= purifyTaggedAddr(currState
->vaddr_tainted
,
216 currState
->tc
, currState
->el
);
218 currState
->vaddr
= currState
->vaddr_tainted
;
220 if (currState
->aarch64
) {
221 switch (currState
->el
) {
224 currState
->sctlr
= currState
->tc
->readMiscReg(MISCREG_SCTLR_EL1
);
225 currState
->tcr
= currState
->tc
->readMiscReg(MISCREG_TCR_EL1
);
227 // @todo: uncomment this to enable Virtualization
229 // assert(haveVirtualization);
230 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
231 // currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
234 assert(haveSecurity
);
235 currState
->sctlr
= currState
->tc
->readMiscReg(MISCREG_SCTLR_EL3
);
236 currState
->tcr
= currState
->tc
->readMiscReg(MISCREG_TCR_EL3
);
239 panic("Invalid exception level");
243 currState
->sctlr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
244 MISCREG_SCTLR
, currState
->tc
, !currState
->isSecure
));
245 currState
->ttbcr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
246 MISCREG_TTBCR
, currState
->tc
, !currState
->isSecure
));
247 currState
->htcr
= currState
->tc
->readMiscReg(MISCREG_HTCR
);
248 currState
->hcr
= currState
->tc
->readMiscReg(MISCREG_HCR
);
249 currState
->vtcr
= currState
->tc
->readMiscReg(MISCREG_VTCR
);
251 sctlr
= currState
->sctlr
;
253 currState
->isFetch
= (currState
->mode
== TLB::Execute
);
254 currState
->isWrite
= (currState
->mode
== TLB::Write
);
256 // We only do a second stage of translation if we're not secure, or in
257 // hyp mode, the second stage MMU is enabled, and this table walker
258 // instance is the first stage.
259 currState
->doingStage2
= false;
260 // @todo: for now disable this in AArch64 (HCR is not set)
261 currState
->stage2Req
= !currState
->aarch64
&& currState
->hcr
.vm
&&
262 !isStage2
&& !currState
->isSecure
&& !currState
->isHyp
;
264 bool long_desc_format
= currState
->aarch64
||
265 (_haveLPAE
&& currState
->ttbcr
.eae
) ||
268 if (long_desc_format
) {
269 // Helper variables used for hierarchical permissions
270 currState
->secureLookup
= currState
->isSecure
;
271 currState
->rwTable
= true;
272 currState
->userTable
= true;
273 currState
->xnTable
= false;
274 currState
->pxnTable
= false;
277 if (!currState
->timing
) {
278 Fault fault
= NoFault
;
279 if (currState
->aarch64
)
280 fault
= processWalkAArch64();
281 else if (long_desc_format
)
282 fault
= processWalkLPAE();
284 fault
= processWalk();
286 // If this was a functional non-timing access restore state to
288 if (currState
->functional
) {
290 currState
= savedCurrState
;
295 if (pending
|| pendingQueue
.size()) {
296 pendingQueue
.push_back(currState
);
300 if (currState
->aarch64
)
301 return processWalkAArch64();
302 else if (long_desc_format
)
303 return processWalkLPAE();
305 return processWalk();
312 TableWalker::processWalkWrapper()
315 assert(pendingQueue
.size());
316 currState
= pendingQueue
.front();
318 ExceptionLevel target_el
= EL0
;
319 if (currState
->aarch64
)
320 target_el
= currEL(currState
->tc
);
324 // Check if a previous walk filled this request already
325 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
326 TlbEntry
* te
= tlb
->lookup(currState
->vaddr
, currState
->asid
,
327 currState
->vmid
, currState
->isHyp
, currState
->isSecure
, true, false,
330 // Check if we still need to have a walk for this request. If the requesting
331 // instruction has been squashed, or a previous walk has filled the TLB with
332 // a match, we just want to get rid of the walk. The latter could happen
333 // when there are multiple outstanding misses to a single page and a
334 // previous request has been successfully translated.
335 if (!currState
->transState
->squashed() && !te
) {
336 // We've got a valid request, lets process it
338 pendingQueue
.pop_front();
339 if (currState
->aarch64
)
340 processWalkAArch64();
341 else if ((_haveLPAE
&& currState
->ttbcr
.eae
) || currState
->isHyp
|| isStage2
)
349 // If the instruction that we were translating for has been
350 // squashed we shouldn't bother.
351 unsigned num_squashed
= 0;
352 ThreadContext
*tc
= currState
->tc
;
353 while ((num_squashed
< numSquashable
) && currState
&&
354 (currState
->transState
->squashed() || te
)) {
355 pendingQueue
.pop_front();
358 DPRINTF(TLB
, "Squashing table walk for address %#x\n",
359 currState
->vaddr_tainted
);
361 if (currState
->transState
->squashed()) {
362 // finish the translation which will delete the translation object
363 currState
->transState
->finish(
364 std::make_shared
<UnimpFault
>("Squashed Inst"),
365 currState
->req
, currState
->tc
, currState
->mode
);
367 // translate the request now that we know it will work
368 tlb
->translateTiming(currState
->req
, currState
->tc
,
369 currState
->transState
, currState
->mode
);
373 // delete the current request
376 // peak at the next one
377 if (pendingQueue
.size()) {
378 currState
= pendingQueue
.front();
379 te
= tlb
->lookup(currState
->vaddr
, currState
->asid
,
380 currState
->vmid
, currState
->isHyp
, currState
->isSecure
, true,
383 // Terminate the loop, nothing more to do
388 // if we've still got pending translations schedule more work
395 TableWalker::processWalk()
399 // If translation isn't enabled, we shouldn't be here
400 assert(currState
->sctlr
.m
|| isStage2
);
402 DPRINTF(TLB
, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
403 currState
->vaddr_tainted
, currState
->ttbcr
, mbits(currState
->vaddr
, 31,
404 32 - currState
->ttbcr
.n
));
406 if (currState
->ttbcr
.n
== 0 || !mbits(currState
->vaddr
, 31,
407 32 - currState
->ttbcr
.n
)) {
408 DPRINTF(TLB
, " - Selecting TTBR0\n");
409 // Check if table walk is allowed when Security Extensions are enabled
410 if (haveSecurity
&& currState
->ttbcr
.pd0
) {
411 if (currState
->isFetch
)
412 return std::make_shared
<PrefetchAbort
>(
413 currState
->vaddr_tainted
,
414 ArmFault::TranslationLL
+ L1
,
418 return std::make_shared
<DataAbort
>(
419 currState
->vaddr_tainted
,
420 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
421 ArmFault::TranslationLL
+ L1
, isStage2
,
424 ttbr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
425 MISCREG_TTBR0
, currState
->tc
, !currState
->isSecure
));
427 DPRINTF(TLB
, " - Selecting TTBR1\n");
428 // Check if table walk is allowed when Security Extensions are enabled
429 if (haveSecurity
&& currState
->ttbcr
.pd1
) {
430 if (currState
->isFetch
)
431 return std::make_shared
<PrefetchAbort
>(
432 currState
->vaddr_tainted
,
433 ArmFault::TranslationLL
+ L1
,
437 return std::make_shared
<DataAbort
>(
438 currState
->vaddr_tainted
,
439 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
440 ArmFault::TranslationLL
+ L1
, isStage2
,
443 ttbr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
444 MISCREG_TTBR1
, currState
->tc
, !currState
->isSecure
));
445 currState
->ttbcr
.n
= 0;
448 Addr l1desc_addr
= mbits(ttbr
, 31, 14 - currState
->ttbcr
.n
) |
449 (bits(currState
->vaddr
, 31 - currState
->ttbcr
.n
, 20) << 2);
450 DPRINTF(TLB
, " - Descriptor at address %#x (%s)\n", l1desc_addr
,
451 currState
->isSecure
? "s" : "ns");
453 // Trickbox address check
455 f
= tlb
->walkTrickBoxCheck(l1desc_addr
, currState
->isSecure
,
456 currState
->vaddr
, sizeof(uint32_t), currState
->isFetch
,
457 currState
->isWrite
, TlbEntry::DomainType::NoAccess
, L1
);
459 DPRINTF(TLB
, "Trickbox check caused fault on %#x\n", currState
->vaddr_tainted
);
460 if (currState
->timing
) {
462 nextWalk(currState
->tc
);
465 currState
->tc
= NULL
;
466 currState
->req
= NULL
;
471 Request::Flags flag
= 0;
472 if (currState
->sctlr
.c
== 0) {
473 flag
= Request::UNCACHEABLE
;
477 delayed
= fetchDescriptor(l1desc_addr
, (uint8_t*)&currState
->l1Desc
.data
,
478 sizeof(uint32_t), flag
, L1
, &doL1DescEvent
,
479 &TableWalker::doL1Descriptor
);
481 f
= currState
->fault
;
488 TableWalker::processWalkLPAE()
490 Addr ttbr
, ttbr0_max
, ttbr1_min
, desc_addr
;
492 LookupLevel start_lookup_level
= L1
;
494 DPRINTF(TLB
, "Beginning table walk for address %#x, TTBCR: %#x\n",
495 currState
->vaddr_tainted
, currState
->ttbcr
);
497 Request::Flags flag
= 0;
498 if (currState
->isSecure
)
499 flag
.set(Request::SECURE
);
501 // work out which base address register to use, if in hyp mode we always
504 DPRINTF(TLB
, " - Selecting VTTBR (long-desc.)\n");
505 ttbr
= currState
->tc
->readMiscReg(MISCREG_VTTBR
);
506 tsz
= sext
<4>(currState
->vtcr
.t0sz
);
507 start_lookup_level
= currState
->vtcr
.sl0
? L1
: L2
;
508 } else if (currState
->isHyp
) {
509 DPRINTF(TLB
, " - Selecting HTTBR (long-desc.)\n");
510 ttbr
= currState
->tc
->readMiscReg(MISCREG_HTTBR
);
511 tsz
= currState
->htcr
.t0sz
;
513 assert(_haveLPAE
&& currState
->ttbcr
.eae
);
515 // Determine boundaries of TTBR0/1 regions
516 if (currState
->ttbcr
.t0sz
)
517 ttbr0_max
= (1ULL << (32 - currState
->ttbcr
.t0sz
)) - 1;
518 else if (currState
->ttbcr
.t1sz
)
519 ttbr0_max
= (1ULL << 32) -
520 (1ULL << (32 - currState
->ttbcr
.t1sz
)) - 1;
522 ttbr0_max
= (1ULL << 32) - 1;
523 if (currState
->ttbcr
.t1sz
)
524 ttbr1_min
= (1ULL << 32) - (1ULL << (32 - currState
->ttbcr
.t1sz
));
526 ttbr1_min
= (1ULL << (32 - currState
->ttbcr
.t0sz
));
528 // The following code snippet selects the appropriate translation table base
529 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
530 // depending on the address range supported by the translation table (ARM
531 // ARM issue C B3.6.4)
532 if (currState
->vaddr
<= ttbr0_max
) {
533 DPRINTF(TLB
, " - Selecting TTBR0 (long-desc.)\n");
534 // Check if table walk is allowed
535 if (currState
->ttbcr
.epd0
) {
536 if (currState
->isFetch
)
537 return std::make_shared
<PrefetchAbort
>(
538 currState
->vaddr_tainted
,
539 ArmFault::TranslationLL
+ L1
,
543 return std::make_shared
<DataAbort
>(
544 currState
->vaddr_tainted
,
545 TlbEntry::DomainType::NoAccess
,
547 ArmFault::TranslationLL
+ L1
,
551 ttbr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
552 MISCREG_TTBR0
, currState
->tc
, !currState
->isSecure
));
553 tsz
= currState
->ttbcr
.t0sz
;
554 if (ttbr0_max
< (1ULL << 30)) // Upper limit < 1 GB
555 start_lookup_level
= L2
;
556 } else if (currState
->vaddr
>= ttbr1_min
) {
557 DPRINTF(TLB
, " - Selecting TTBR1 (long-desc.)\n");
558 // Check if table walk is allowed
559 if (currState
->ttbcr
.epd1
) {
560 if (currState
->isFetch
)
561 return std::make_shared
<PrefetchAbort
>(
562 currState
->vaddr_tainted
,
563 ArmFault::TranslationLL
+ L1
,
567 return std::make_shared
<DataAbort
>(
568 currState
->vaddr_tainted
,
569 TlbEntry::DomainType::NoAccess
,
571 ArmFault::TranslationLL
+ L1
,
575 ttbr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
576 MISCREG_TTBR1
, currState
->tc
, !currState
->isSecure
));
577 tsz
= currState
->ttbcr
.t1sz
;
578 if (ttbr1_min
>= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
579 start_lookup_level
= L2
;
581 // Out of boundaries -> translation fault
582 if (currState
->isFetch
)
583 return std::make_shared
<PrefetchAbort
>(
584 currState
->vaddr_tainted
,
585 ArmFault::TranslationLL
+ L1
,
589 return std::make_shared
<DataAbort
>(
590 currState
->vaddr_tainted
,
591 TlbEntry::DomainType::NoAccess
,
592 currState
->isWrite
, ArmFault::TranslationLL
+ L1
,
593 isStage2
, ArmFault::LpaeTran
);
598 // Perform lookup (ARM ARM issue C B3.6.6)
599 if (start_lookup_level
== L1
) {
601 desc_addr
= mbits(ttbr
, 39, n
) |
602 (bits(currState
->vaddr
, n
+ 26, 30) << 3);
603 DPRINTF(TLB
, " - Descriptor at address %#x (%s) (long-desc.)\n",
604 desc_addr
, currState
->isSecure
? "s" : "ns");
606 // Skip first-level lookup
607 n
= (tsz
>= 2 ? 14 - tsz
: 12);
608 desc_addr
= mbits(ttbr
, 39, n
) |
609 (bits(currState
->vaddr
, n
+ 17, 21) << 3);
610 DPRINTF(TLB
, " - Descriptor at address %#x (%s) (long-desc.)\n",
611 desc_addr
, currState
->isSecure
? "s" : "ns");
614 // Trickbox address check
615 Fault f
= tlb
->walkTrickBoxCheck(desc_addr
, currState
->isSecure
,
616 currState
->vaddr
, sizeof(uint64_t), currState
->isFetch
,
617 currState
->isWrite
, TlbEntry::DomainType::NoAccess
,
620 DPRINTF(TLB
, "Trickbox check caused fault on %#x\n", currState
->vaddr_tainted
);
621 if (currState
->timing
) {
623 nextWalk(currState
->tc
);
626 currState
->tc
= NULL
;
627 currState
->req
= NULL
;
632 if (currState
->sctlr
.c
== 0) {
633 flag
= Request::UNCACHEABLE
;
636 if (currState
->isSecure
)
637 flag
.set(Request::SECURE
);
639 currState
->longDesc
.lookupLevel
= start_lookup_level
;
640 currState
->longDesc
.aarch64
= false;
641 currState
->longDesc
.grainSize
= Grain4KB
;
643 Event
*event
= start_lookup_level
== L1
? (Event
*) &doL1LongDescEvent
644 : (Event
*) &doL2LongDescEvent
;
646 bool delayed
= fetchDescriptor(desc_addr
, (uint8_t*)&currState
->longDesc
.data
,
647 sizeof(uint64_t), flag
, start_lookup_level
,
648 event
, &TableWalker::doLongDescriptor
);
650 f
= currState
->fault
;
657 TableWalker::adjustTableSizeAArch64(unsigned tsz
)
667 TableWalker::checkAddrSizeFaultAArch64(Addr addr
, int currPhysAddrRange
)
669 return (currPhysAddrRange
!= MaxPhysAddrRange
&&
670 bits(addr
, MaxPhysAddrRange
- 1, currPhysAddrRange
));
674 TableWalker::processWalkAArch64()
676 assert(currState
->aarch64
);
678 DPRINTF(TLB
, "Beginning table walk for address %#llx, TCR: %#llx\n",
679 currState
->vaddr_tainted
, currState
->tcr
);
681 static const GrainSize GrainMapDefault
[] =
682 { Grain4KB
, Grain64KB
, Grain16KB
, ReservedGrain
};
683 static const GrainSize GrainMap_EL1_tg1
[] =
684 { ReservedGrain
, Grain16KB
, Grain4KB
, Grain64KB
};
686 // Determine TTBR, table size, granule size and phys. address range
689 GrainSize tg
= Grain4KB
; // grain size computed from tg* field
691 switch (currState
->el
) {
694 switch (bits(currState
->vaddr
, 63,48)) {
696 DPRINTF(TLB
, " - Selecting TTBR0 (AArch64)\n");
697 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR0_EL1
);
698 tsz
= adjustTableSizeAArch64(64 - currState
->tcr
.t0sz
);
699 tg
= GrainMapDefault
[currState
->tcr
.tg0
];
700 if (bits(currState
->vaddr
, 63, tsz
) != 0x0 ||
705 DPRINTF(TLB
, " - Selecting TTBR1 (AArch64)\n");
706 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR1_EL1
);
707 tsz
= adjustTableSizeAArch64(64 - currState
->tcr
.t1sz
);
708 tg
= GrainMap_EL1_tg1
[currState
->tcr
.tg1
];
709 if (bits(currState
->vaddr
, 63, tsz
) != mask(64-tsz
) ||
714 // top two bytes must be all 0s or all 1s, else invalid addr
717 ps
= currState
->tcr
.ips
;
721 switch(bits(currState
->vaddr
, 63,48)) {
723 DPRINTF(TLB
, " - Selecting TTBR0 (AArch64)\n");
724 if (currState
->el
== EL2
)
725 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR0_EL2
);
727 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR0_EL3
);
728 tsz
= adjustTableSizeAArch64(64 - currState
->tcr
.t0sz
);
729 tg
= GrainMapDefault
[currState
->tcr
.tg0
];
732 // invalid addr if top two bytes are not all 0s
735 ps
= currState
->tcr
.ips
;
741 if (currState
->isFetch
)
742 f
= std::make_shared
<PrefetchAbort
>(
743 currState
->vaddr_tainted
,
744 ArmFault::TranslationLL
+ L0
, isStage2
,
747 f
= std::make_shared
<DataAbort
>(
748 currState
->vaddr_tainted
,
749 TlbEntry::DomainType::NoAccess
,
751 ArmFault::TranslationLL
+ L0
,
752 isStage2
, ArmFault::LpaeTran
);
754 if (currState
->timing
) {
756 nextWalk(currState
->tc
);
759 currState
->tc
= NULL
;
760 currState
->req
= NULL
;
766 if (tg
== ReservedGrain
) {
767 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
768 "DEFINED behavior takes this to mean 4KB granules\n");
773 LookupLevel start_lookup_level
= MAX_LOOKUP_LEVELS
;
775 // Determine starting lookup level
776 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
777 // in ARM DDI 0487A. These table values correspond to the cascading tests
778 // to compute the lookup level and are of the form
779 // (grain_size + N*stride), for N = {1, 2, 3}.
780 // A value of 64 will never succeed and a value of 0 will always succeed.
783 GrainSize grain_size
;
784 unsigned lookup_level_cutoff
[MAX_LOOKUP_LEVELS
];
786 static const GrainMap GM
[] = {
787 { Grain4KB
, { 39, 30, 0, 0 } },
788 { Grain16KB
, { 47, 36, 25, 0 } },
789 { Grain64KB
, { 64, 42, 29, 0 } }
792 const unsigned *lookup
= NULL
; // points to a lookup_level_cutoff
794 for (unsigned i
= 0; i
< 3; ++i
) { // choose entry of GM[]
795 if (tg
== GM
[i
].grain_size
) {
796 lookup
= GM
[i
].lookup_level_cutoff
;
802 for (int L
= L0
; L
!= MAX_LOOKUP_LEVELS
; ++L
) {
803 if (tsz
> lookup
[L
]) {
804 start_lookup_level
= (LookupLevel
) L
;
808 panic_if(start_lookup_level
== MAX_LOOKUP_LEVELS
,
809 "Table walker couldn't find lookup level\n");
812 // Determine table base address
813 int base_addr_lo
= 3 + tsz
- stride
* (3 - start_lookup_level
) - tg
;
814 Addr base_addr
= mbits(ttbr
, 47, base_addr_lo
);
816 // Determine physical address size and raise an Address Size Fault if
818 int pa_range
= decodePhysAddrRange64(ps
);
819 // Clamp to lower limit
820 if (pa_range
> physAddrRange
)
821 currState
->physAddrRange
= physAddrRange
;
823 currState
->physAddrRange
= pa_range
;
824 if (checkAddrSizeFaultAArch64(base_addr
, currState
->physAddrRange
)) {
825 DPRINTF(TLB
, "Address size fault before any lookup\n");
827 if (currState
->isFetch
)
828 f
= std::make_shared
<PrefetchAbort
>(
829 currState
->vaddr_tainted
,
830 ArmFault::AddressSizeLL
+ start_lookup_level
,
834 f
= std::make_shared
<DataAbort
>(
835 currState
->vaddr_tainted
,
836 TlbEntry::DomainType::NoAccess
,
838 ArmFault::AddressSizeLL
+ start_lookup_level
,
843 if (currState
->timing
) {
845 nextWalk(currState
->tc
);
848 currState
->tc
= NULL
;
849 currState
->req
= NULL
;
855 // Determine descriptor address
856 Addr desc_addr
= base_addr
|
857 (bits(currState
->vaddr
, tsz
- 1,
858 stride
* (3 - start_lookup_level
) + tg
) << 3);
860 // Trickbox address check
861 Fault f
= tlb
->walkTrickBoxCheck(desc_addr
, currState
->isSecure
,
862 currState
->vaddr
, sizeof(uint64_t), currState
->isFetch
,
863 currState
->isWrite
, TlbEntry::DomainType::NoAccess
,
866 DPRINTF(TLB
, "Trickbox check caused fault on %#x\n", currState
->vaddr_tainted
);
867 if (currState
->timing
) {
869 nextWalk(currState
->tc
);
872 currState
->tc
= NULL
;
873 currState
->req
= NULL
;
878 Request::Flags flag
= 0;
879 if (currState
->sctlr
.c
== 0) {
880 flag
= Request::UNCACHEABLE
;
883 currState
->longDesc
.lookupLevel
= start_lookup_level
;
884 currState
->longDesc
.aarch64
= true;
885 currState
->longDesc
.grainSize
= tg
;
887 if (currState
->timing
) {
889 switch (start_lookup_level
) {
891 event
= (Event
*) &doL0LongDescEvent
;
894 event
= (Event
*) &doL1LongDescEvent
;
897 event
= (Event
*) &doL2LongDescEvent
;
900 event
= (Event
*) &doL3LongDescEvent
;
903 panic("Invalid table lookup level");
906 port
.dmaAction(MemCmd::ReadReq
, desc_addr
, sizeof(uint64_t), event
,
907 (uint8_t*) &currState
->longDesc
.data
,
908 currState
->tc
->getCpuPtr()->clockPeriod(), flag
);
910 "Adding to walker fifo: queue size before adding: %d\n",
911 stateQueues
[start_lookup_level
].size());
912 stateQueues
[start_lookup_level
].push_back(currState
);
914 } else if (!currState
->functional
) {
915 port
.dmaAction(MemCmd::ReadReq
, desc_addr
, sizeof(uint64_t),
916 NULL
, (uint8_t*) &currState
->longDesc
.data
,
917 currState
->tc
->getCpuPtr()->clockPeriod(), flag
);
919 f
= currState
->fault
;
921 RequestPtr req
= new Request(desc_addr
, sizeof(uint64_t), flag
,
923 PacketPtr pkt
= new Packet(req
, MemCmd::ReadReq
);
924 pkt
->dataStatic((uint8_t*) &currState
->longDesc
.data
);
925 port
.sendFunctional(pkt
);
929 f
= currState
->fault
;
936 TableWalker::memAttrs(ThreadContext
*tc
, TlbEntry
&te
, SCTLR sctlr
,
937 uint8_t texcb
, bool s
)
939 // Note: tc and sctlr local variables are hiding tc and sctrl class
941 DPRINTF(TLBVerbose
, "memAttrs texcb:%d s:%d\n", texcb
, s
);
942 te
.shareable
= false; // default value
943 te
.nonCacheable
= false;
944 te
.outerShareable
= false;
945 if (sctlr
.tre
== 0 || ((sctlr
.tre
== 1) && (sctlr
.m
== 0))) {
947 case 0: // Stongly-ordered
948 te
.nonCacheable
= true;
949 te
.mtype
= TlbEntry::MemoryType::StronglyOrdered
;
954 case 1: // Shareable Device
955 te
.nonCacheable
= true;
956 te
.mtype
= TlbEntry::MemoryType::Device
;
961 case 2: // Outer and Inner Write-Through, no Write-Allocate
962 te
.mtype
= TlbEntry::MemoryType::Normal
;
965 te
.outerAttrs
= bits(texcb
, 1, 0);
967 case 3: // Outer and Inner Write-Back, no Write-Allocate
968 te
.mtype
= TlbEntry::MemoryType::Normal
;
971 te
.outerAttrs
= bits(texcb
, 1, 0);
973 case 4: // Outer and Inner Non-cacheable
974 te
.nonCacheable
= true;
975 te
.mtype
= TlbEntry::MemoryType::Normal
;
978 te
.outerAttrs
= bits(texcb
, 1, 0);
981 panic("Reserved texcb value!\n");
983 case 6: // Implementation Defined
984 panic("Implementation-defined texcb value!\n");
986 case 7: // Outer and Inner Write-Back, Write-Allocate
987 te
.mtype
= TlbEntry::MemoryType::Normal
;
992 case 8: // Non-shareable Device
993 te
.nonCacheable
= true;
994 te
.mtype
= TlbEntry::MemoryType::Device
;
995 te
.shareable
= false;
999 case 9 ... 15: // Reserved
1000 panic("Reserved texcb value!\n");
1002 case 16 ... 31: // Cacheable Memory
1003 te
.mtype
= TlbEntry::MemoryType::Normal
;
1005 if (bits(texcb
, 1,0) == 0 || bits(texcb
, 3,2) == 0)
1006 te
.nonCacheable
= true;
1007 te
.innerAttrs
= bits(texcb
, 1, 0);
1008 te
.outerAttrs
= bits(texcb
, 3, 2);
1011 panic("More than 32 states for 5 bits?\n");
1015 PRRR prrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR
,
1016 currState
->tc
, !currState
->isSecure
));
1017 NMRR nmrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR
,
1018 currState
->tc
, !currState
->isSecure
));
1019 DPRINTF(TLBVerbose
, "memAttrs PRRR:%08x NMRR:%08x\n", prrr
, nmrr
);
1020 uint8_t curr_tr
= 0, curr_ir
= 0, curr_or
= 0;
1021 switch(bits(texcb
, 2,0)) {
1026 te
.outerShareable
= (prrr
.nos0
== 0);
1032 te
.outerShareable
= (prrr
.nos1
== 0);
1038 te
.outerShareable
= (prrr
.nos2
== 0);
1044 te
.outerShareable
= (prrr
.nos3
== 0);
1050 te
.outerShareable
= (prrr
.nos4
== 0);
1056 te
.outerShareable
= (prrr
.nos5
== 0);
1059 panic("Imp defined type\n");
1064 te
.outerShareable
= (prrr
.nos7
== 0);
1070 DPRINTF(TLBVerbose
, "StronglyOrdered\n");
1071 te
.mtype
= TlbEntry::MemoryType::StronglyOrdered
;
1072 te
.nonCacheable
= true;
1075 te
.shareable
= true;
1078 DPRINTF(TLBVerbose
, "Device ds1:%d ds0:%d s:%d\n",
1079 prrr
.ds1
, prrr
.ds0
, s
);
1080 te
.mtype
= TlbEntry::MemoryType::Device
;
1081 te
.nonCacheable
= true;
1085 te
.shareable
= true;
1087 te
.shareable
= true;
1090 DPRINTF(TLBVerbose
, "Normal ns1:%d ns0:%d s:%d\n",
1091 prrr
.ns1
, prrr
.ns0
, s
);
1092 te
.mtype
= TlbEntry::MemoryType::Normal
;
1094 te
.shareable
= true;
1096 te
.shareable
= true;
1099 panic("Reserved type");
1102 if (te
.mtype
== TlbEntry::MemoryType::Normal
){
1105 te
.nonCacheable
= true;
1121 te
.nonCacheable
= true;
1136 DPRINTF(TLBVerbose
, "memAttrs: shareable: %d, innerAttrs: %d, "
1138 te
.shareable
, te
.innerAttrs
, te
.outerAttrs
);
1139 te
.setAttributes(false);
1143 TableWalker::memAttrsLPAE(ThreadContext
*tc
, TlbEntry
&te
,
1144 LongDescriptor
&lDescriptor
)
1149 uint8_t sh
= lDescriptor
.sh();
1150 // Different format and source of attributes if this is a stage 2
1153 attr
= lDescriptor
.memAttr();
1154 uint8_t attr_3_2
= (attr
>> 2) & 0x3;
1155 uint8_t attr_1_0
= attr
& 0x3;
1157 DPRINTF(TLBVerbose
, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr
, sh
);
1159 if (attr_3_2
== 0) {
1160 te
.mtype
= attr_1_0
== 0 ? TlbEntry::MemoryType::StronglyOrdered
1161 : TlbEntry::MemoryType::Device
;
1163 te
.innerAttrs
= attr_1_0
== 0 ? 1 : 3;
1164 te
.nonCacheable
= true;
1166 te
.mtype
= TlbEntry::MemoryType::Normal
;
1167 te
.outerAttrs
= attr_3_2
== 1 ? 0 :
1168 attr_3_2
== 2 ? 2 : 1;
1169 te
.innerAttrs
= attr_1_0
== 1 ? 0 :
1170 attr_1_0
== 2 ? 6 : 5;
1171 te
.nonCacheable
= (attr_3_2
== 1) || (attr_1_0
== 1);
1174 uint8_t attrIndx
= lDescriptor
.attrIndx();
1176 // LPAE always uses remapping of memory attributes, irrespective of the
1177 // value of SCTLR.TRE
1178 MiscRegIndex reg
= attrIndx
& 0x4 ? MISCREG_MAIR1
: MISCREG_MAIR0
;
1179 int reg_as_int
= flattenMiscRegNsBanked(reg
, currState
->tc
,
1180 !currState
->isSecure
);
1181 uint32_t mair
= currState
->tc
->readMiscReg(reg_as_int
);
1182 attr
= (mair
>> (8 * (attrIndx
% 4))) & 0xff;
1183 uint8_t attr_7_4
= bits(attr
, 7, 4);
1184 uint8_t attr_3_0
= bits(attr
, 3, 0);
1185 DPRINTF(TLBVerbose
, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx
, sh
, attr
);
1187 // Note: the memory subsystem only cares about the 'cacheable' memory
1188 // attribute. The other attributes are only used to fill the PAR register
1189 // accordingly to provide the illusion of full support
1190 te
.nonCacheable
= false;
1194 // Strongly-ordered or Device memory
1195 if (attr_3_0
== 0x0)
1196 te
.mtype
= TlbEntry::MemoryType::StronglyOrdered
;
1197 else if (attr_3_0
== 0x4)
1198 te
.mtype
= TlbEntry::MemoryType::Device
;
1200 panic("Unpredictable behavior\n");
1201 te
.nonCacheable
= true;
1205 // Normal memory, Outer Non-cacheable
1206 te
.mtype
= TlbEntry::MemoryType::Normal
;
1208 if (attr_3_0
== 0x4)
1209 // Inner Non-cacheable
1210 te
.nonCacheable
= true;
1211 else if (attr_3_0
< 0x8)
1212 panic("Unpredictable behavior\n");
1222 if (attr_7_4
& 0x4) {
1223 te
.outerAttrs
= (attr_7_4
& 1) ? 1 : 3;
1225 te
.outerAttrs
= 0x2;
1227 // Normal memory, Outer Cacheable
1228 te
.mtype
= TlbEntry::MemoryType::Normal
;
1229 if (attr_3_0
!= 0x4 && attr_3_0
< 0x8)
1230 panic("Unpredictable behavior\n");
1233 panic("Unpredictable behavior\n");
1239 te
.innerAttrs
= 0x1;
1242 te
.innerAttrs
= attr_7_4
== 0 ? 0x3 : 0;
1254 te
.innerAttrs
= attr_3_0
& 1 ? 0x5 : 0x7;
1257 panic("Unpredictable behavior\n");
1262 te
.outerShareable
= sh
== 2;
1263 te
.shareable
= (sh
& 0x2) ? true : false;
1264 te
.setAttributes(true);
1265 te
.attributes
|= (uint64_t) attr
<< 56;
1269 TableWalker::memAttrsAArch64(ThreadContext
*tc
, TlbEntry
&te
, uint8_t attrIndx
,
1272 DPRINTF(TLBVerbose
, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx
, sh
);
1276 switch (currState
->el
) {
1279 mair
= tc
->readMiscReg(MISCREG_MAIR_EL1
);
1282 mair
= tc
->readMiscReg(MISCREG_MAIR_EL2
);
1285 mair
= tc
->readMiscReg(MISCREG_MAIR_EL3
);
1288 panic("Invalid exception level");
1292 // Select attributes
1293 uint8_t attr
= bits(mair
, 8 * attrIndx
+ 7, 8 * attrIndx
);
1294 uint8_t attr_lo
= bits(attr
, 3, 0);
1295 uint8_t attr_hi
= bits(attr
, 7, 4);
1298 te
.mtype
= attr_hi
== 0 ? TlbEntry::MemoryType::Device
: TlbEntry::MemoryType::Normal
;
1301 te
.nonCacheable
= false;
1302 if (te
.mtype
== TlbEntry::MemoryType::Device
|| // Device memory
1303 attr_hi
== 0x8 || // Normal memory, Outer Non-cacheable
1304 attr_lo
== 0x8) { // Normal memory, Inner Non-cacheable
1305 te
.nonCacheable
= true;
1308 te
.shareable
= sh
== 2;
1309 te
.outerShareable
= (sh
& 0x2) ? true : false;
1310 // Attributes formatted according to the 64-bit PAR
1311 te
.attributes
= ((uint64_t) attr
<< 56) |
1312 (1 << 11) | // LPAE bit
1313 (te
.ns
<< 9) | // NS bit
1318 TableWalker::doL1Descriptor()
1320 if (currState
->fault
!= NoFault
) {
1324 DPRINTF(TLB
, "L1 descriptor for %#x is %#x\n",
1325 currState
->vaddr_tainted
, currState
->l1Desc
.data
);
1328 switch (currState
->l1Desc
.type()) {
1329 case L1Descriptor::Ignore
:
1330 case L1Descriptor::Reserved
:
1331 if (!currState
->timing
) {
1332 currState
->tc
= NULL
;
1333 currState
->req
= NULL
;
1335 DPRINTF(TLB
, "L1 Descriptor Reserved/Ignore, causing fault\n");
1336 if (currState
->isFetch
)
1338 std::make_shared
<PrefetchAbort
>(
1339 currState
->vaddr_tainted
,
1340 ArmFault::TranslationLL
+ L1
,
1342 ArmFault::VmsaTran
);
1345 std::make_shared
<DataAbort
>(
1346 currState
->vaddr_tainted
,
1347 TlbEntry::DomainType::NoAccess
,
1349 ArmFault::TranslationLL
+ L1
, isStage2
,
1350 ArmFault::VmsaTran
);
1352 case L1Descriptor::Section
:
1353 if (currState
->sctlr
.afe
&& bits(currState
->l1Desc
.ap(), 0) == 0) {
1354 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1355 * enabled if set, do l1.Desc.setAp0() instead of generating
1359 currState
->fault
= std::make_shared
<DataAbort
>(
1360 currState
->vaddr_tainted
,
1361 currState
->l1Desc
.domain(),
1363 ArmFault::AccessFlagLL
+ L1
,
1365 ArmFault::VmsaTran
);
1367 if (currState
->l1Desc
.supersection()) {
1368 panic("Haven't implemented supersections\n");
1370 insertTableEntry(currState
->l1Desc
, false);
1372 case L1Descriptor::PageTable
:
1375 l2desc_addr
= currState
->l1Desc
.l2Addr() |
1376 (bits(currState
->vaddr
, 19, 12) << 2);
1377 DPRINTF(TLB
, "L1 descriptor points to page table at: %#x (%s)\n",
1378 l2desc_addr
, currState
->isSecure
? "s" : "ns");
1380 // Trickbox address check
1381 currState
->fault
= tlb
->walkTrickBoxCheck(
1382 l2desc_addr
, currState
->isSecure
, currState
->vaddr
,
1383 sizeof(uint32_t), currState
->isFetch
, currState
->isWrite
,
1384 currState
->l1Desc
.domain(), L2
);
1386 if (currState
->fault
) {
1387 if (!currState
->timing
) {
1388 currState
->tc
= NULL
;
1389 currState
->req
= NULL
;
1394 Request::Flags flag
= 0;
1395 if (currState
->isSecure
)
1396 flag
.set(Request::SECURE
);
1399 delayed
= fetchDescriptor(l2desc_addr
,
1400 (uint8_t*)&currState
->l2Desc
.data
,
1401 sizeof(uint32_t), flag
, -1, &doL2DescEvent
,
1402 &TableWalker::doL2Descriptor
);
1404 currState
->delayed
= true;
1410 panic("A new type in a 2 bit field?\n");
1415 TableWalker::doLongDescriptor()
1417 if (currState
->fault
!= NoFault
) {
1421 DPRINTF(TLB
, "L%d descriptor for %#llx is %#llx (%s)\n",
1422 currState
->longDesc
.lookupLevel
, currState
->vaddr_tainted
,
1423 currState
->longDesc
.data
,
1424 currState
->aarch64
? "AArch64" : "long-desc.");
1426 if ((currState
->longDesc
.type() == LongDescriptor::Block
) ||
1427 (currState
->longDesc
.type() == LongDescriptor::Page
)) {
1428 DPRINTF(TLBVerbose
, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1429 "xn: %d, ap: %d, af: %d, type: %d\n",
1430 currState
->longDesc
.lookupLevel
,
1431 currState
->longDesc
.data
,
1432 currState
->longDesc
.pxn(),
1433 currState
->longDesc
.xn(),
1434 currState
->longDesc
.ap(),
1435 currState
->longDesc
.af(),
1436 currState
->longDesc
.type());
1438 DPRINTF(TLBVerbose
, "Analyzing L%d descriptor: %#llx, type: %d\n",
1439 currState
->longDesc
.lookupLevel
,
1440 currState
->longDesc
.data
,
1441 currState
->longDesc
.type());
1446 switch (currState
->longDesc
.type()) {
1447 case LongDescriptor::Invalid
:
1448 if (!currState
->timing
) {
1449 currState
->tc
= NULL
;
1450 currState
->req
= NULL
;
1453 DPRINTF(TLB
, "L%d descriptor Invalid, causing fault type %d\n",
1454 currState
->longDesc
.lookupLevel
,
1455 ArmFault::TranslationLL
+ currState
->longDesc
.lookupLevel
);
1456 if (currState
->isFetch
)
1457 currState
->fault
= std::make_shared
<PrefetchAbort
>(
1458 currState
->vaddr_tainted
,
1459 ArmFault::TranslationLL
+ currState
->longDesc
.lookupLevel
,
1461 ArmFault::LpaeTran
);
1463 currState
->fault
= std::make_shared
<DataAbort
>(
1464 currState
->vaddr_tainted
,
1465 TlbEntry::DomainType::NoAccess
,
1467 ArmFault::TranslationLL
+ currState
->longDesc
.lookupLevel
,
1469 ArmFault::LpaeTran
);
1471 case LongDescriptor::Block
:
1472 case LongDescriptor::Page
:
1476 // Check for address size fault
1477 if (checkAddrSizeFaultAArch64(
1478 mbits(currState
->longDesc
.data
, MaxPhysAddrRange
- 1,
1479 currState
->longDesc
.offsetBits()),
1480 currState
->physAddrRange
)) {
1482 DPRINTF(TLB
, "L%d descriptor causing Address Size Fault\n",
1483 currState
->longDesc
.lookupLevel
);
1484 // Check for access fault
1485 } else if (currState
->longDesc
.af() == 0) {
1487 DPRINTF(TLB
, "L%d descriptor causing Access Fault\n",
1488 currState
->longDesc
.lookupLevel
);
1492 if (currState
->isFetch
)
1493 currState
->fault
= std::make_shared
<PrefetchAbort
>(
1494 currState
->vaddr_tainted
,
1495 (aff
? ArmFault::AccessFlagLL
: ArmFault::AddressSizeLL
) +
1496 currState
->longDesc
.lookupLevel
,
1498 ArmFault::LpaeTran
);
1500 currState
->fault
= std::make_shared
<DataAbort
>(
1501 currState
->vaddr_tainted
,
1502 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
1503 (aff
? ArmFault::AccessFlagLL
: ArmFault::AddressSizeLL
) +
1504 currState
->longDesc
.lookupLevel
,
1506 ArmFault::LpaeTran
);
1508 insertTableEntry(currState
->longDesc
, true);
1512 case LongDescriptor::Table
:
1514 // Set hierarchical permission flags
1515 currState
->secureLookup
= currState
->secureLookup
&&
1516 currState
->longDesc
.secureTable();
1517 currState
->rwTable
= currState
->rwTable
&&
1518 currState
->longDesc
.rwTable();
1519 currState
->userTable
= currState
->userTable
&&
1520 currState
->longDesc
.userTable();
1521 currState
->xnTable
= currState
->xnTable
||
1522 currState
->longDesc
.xnTable();
1523 currState
->pxnTable
= currState
->pxnTable
||
1524 currState
->longDesc
.pxnTable();
1526 // Set up next level lookup
1527 Addr next_desc_addr
= currState
->longDesc
.nextDescAddr(
1530 DPRINTF(TLB
, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1531 currState
->longDesc
.lookupLevel
,
1532 currState
->longDesc
.lookupLevel
+ 1,
1534 currState
->secureLookup
? "s" : "ns");
1536 // Check for address size fault
1537 if (currState
->aarch64
&& checkAddrSizeFaultAArch64(
1538 next_desc_addr
, currState
->physAddrRange
)) {
1539 DPRINTF(TLB
, "L%d descriptor causing Address Size Fault\n",
1540 currState
->longDesc
.lookupLevel
);
1541 if (currState
->isFetch
)
1542 currState
->fault
= std::make_shared
<PrefetchAbort
>(
1543 currState
->vaddr_tainted
,
1544 ArmFault::AddressSizeLL
1545 + currState
->longDesc
.lookupLevel
,
1547 ArmFault::LpaeTran
);
1549 currState
->fault
= std::make_shared
<DataAbort
>(
1550 currState
->vaddr_tainted
,
1551 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
1552 ArmFault::AddressSizeLL
1553 + currState
->longDesc
.lookupLevel
,
1555 ArmFault::LpaeTran
);
1559 // Trickbox address check
1560 currState
->fault
= tlb
->walkTrickBoxCheck(
1561 next_desc_addr
, currState
->vaddr
,
1562 currState
->vaddr
, sizeof(uint64_t),
1563 currState
->isFetch
, currState
->isWrite
,
1564 TlbEntry::DomainType::Client
,
1565 toLookupLevel(currState
->longDesc
.lookupLevel
+1));
1567 if (currState
->fault
) {
1568 if (!currState
->timing
) {
1569 currState
->tc
= NULL
;
1570 currState
->req
= NULL
;
1575 Request::Flags flag
= 0;
1576 if (currState
->secureLookup
)
1577 flag
.set(Request::SECURE
);
1579 currState
->longDesc
.lookupLevel
=
1580 (LookupLevel
) (currState
->longDesc
.lookupLevel
+ 1);
1581 Event
*event
= NULL
;
1582 switch (currState
->longDesc
.lookupLevel
) {
1584 assert(currState
->aarch64
);
1585 event
= &doL1LongDescEvent
;
1588 event
= &doL2LongDescEvent
;
1591 event
= &doL3LongDescEvent
;
1594 panic("Wrong lookup level in table walk\n");
1599 delayed
= fetchDescriptor(next_desc_addr
, (uint8_t*)&currState
->longDesc
.data
,
1600 sizeof(uint64_t), flag
, -1, event
,
1601 &TableWalker::doLongDescriptor
);
1603 currState
->delayed
= true;
1608 panic("A new type in a 2 bit field?\n");
1613 TableWalker::doL2Descriptor()
1615 if (currState
->fault
!= NoFault
) {
1619 DPRINTF(TLB
, "L2 descriptor for %#x is %#x\n",
1620 currState
->vaddr_tainted
, currState
->l2Desc
.data
);
1623 if (currState
->l2Desc
.invalid()) {
1624 DPRINTF(TLB
, "L2 descriptor invalid, causing fault\n");
1625 if (!currState
->timing
) {
1626 currState
->tc
= NULL
;
1627 currState
->req
= NULL
;
1629 if (currState
->isFetch
)
1630 currState
->fault
= std::make_shared
<PrefetchAbort
>(
1631 currState
->vaddr_tainted
,
1632 ArmFault::TranslationLL
+ L2
,
1634 ArmFault::VmsaTran
);
1636 currState
->fault
= std::make_shared
<DataAbort
>(
1637 currState
->vaddr_tainted
, currState
->l1Desc
.domain(),
1638 currState
->isWrite
, ArmFault::TranslationLL
+ L2
,
1640 ArmFault::VmsaTran
);
1644 if (currState
->sctlr
.afe
&& bits(currState
->l2Desc
.ap(), 0) == 0) {
1645 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1646 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1648 DPRINTF(TLB
, "Generating access fault at L2, afe: %d, ap: %d\n",
1649 currState
->sctlr
.afe
, currState
->l2Desc
.ap());
1651 currState
->fault
= std::make_shared
<DataAbort
>(
1652 currState
->vaddr_tainted
,
1653 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
1654 ArmFault::AccessFlagLL
+ L2
, isStage2
,
1655 ArmFault::VmsaTran
);
1658 insertTableEntry(currState
->l2Desc
, false);
1662 TableWalker::doL1DescriptorWrapper()
1664 currState
= stateQueues
[L1
].front();
1665 currState
->delayed
= false;
1666 // if there's a stage2 translation object we don't need it any more
1667 if (currState
->stage2Tran
) {
1668 delete currState
->stage2Tran
;
1669 currState
->stage2Tran
= NULL
;
1673 DPRINTF(TLBVerbose
, "L1 Desc object host addr: %p\n",&currState
->l1Desc
.data
);
1674 DPRINTF(TLBVerbose
, "L1 Desc object data: %08x\n",currState
->l1Desc
.data
);
1676 DPRINTF(TLBVerbose
, "calling doL1Descriptor for vaddr:%#x\n", currState
->vaddr_tainted
);
1679 stateQueues
[L1
].pop_front();
1681 // Check if fault was generated
1682 if (currState
->fault
!= NoFault
) {
1683 currState
->transState
->finish(currState
->fault
, currState
->req
,
1684 currState
->tc
, currState
->mode
);
1687 nextWalk(currState
->tc
);
1689 currState
->req
= NULL
;
1690 currState
->tc
= NULL
;
1691 currState
->delayed
= false;
1694 else if (!currState
->delayed
) {
1695 // delay is not set so there is no L2 to do
1696 // Don't finish the translation if a stage 2 look up is underway
1697 if (!currState
->doingStage2
) {
1698 DPRINTF(TLBVerbose
, "calling translateTiming again\n");
1699 currState
->fault
= tlb
->translateTiming(currState
->req
, currState
->tc
,
1700 currState
->transState
, currState
->mode
);
1704 nextWalk(currState
->tc
);
1706 currState
->req
= NULL
;
1707 currState
->tc
= NULL
;
1708 currState
->delayed
= false;
1711 // need to do L2 descriptor
1712 stateQueues
[L2
].push_back(currState
);
1718 TableWalker::doL2DescriptorWrapper()
1720 currState
= stateQueues
[L2
].front();
1721 assert(currState
->delayed
);
1722 // if there's a stage2 translation object we don't need it any more
1723 if (currState
->stage2Tran
) {
1724 delete currState
->stage2Tran
;
1725 currState
->stage2Tran
= NULL
;
1728 DPRINTF(TLBVerbose
, "calling doL2Descriptor for vaddr:%#x\n",
1729 currState
->vaddr_tainted
);
1732 // Check if fault was generated
1733 if (currState
->fault
!= NoFault
) {
1734 currState
->transState
->finish(currState
->fault
, currState
->req
,
1735 currState
->tc
, currState
->mode
);
1738 // Don't finish the translation if a stage 2 look up is underway
1739 if (!currState
->doingStage2
) {
1740 DPRINTF(TLBVerbose
, "calling translateTiming again\n");
1741 currState
->fault
= tlb
->translateTiming(currState
->req
,
1742 currState
->tc
, currState
->transState
, currState
->mode
);
1747 stateQueues
[L2
].pop_front();
1750 nextWalk(currState
->tc
);
1752 currState
->req
= NULL
;
1753 currState
->tc
= NULL
;
1754 currState
->delayed
= false;
1761 TableWalker::doL0LongDescriptorWrapper()
1763 doLongDescriptorWrapper(L0
);
1767 TableWalker::doL1LongDescriptorWrapper()
1769 doLongDescriptorWrapper(L1
);
1773 TableWalker::doL2LongDescriptorWrapper()
1775 doLongDescriptorWrapper(L2
);
1779 TableWalker::doL3LongDescriptorWrapper()
1781 doLongDescriptorWrapper(L3
);
1785 TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level
)
1787 currState
= stateQueues
[curr_lookup_level
].front();
1788 assert(curr_lookup_level
== currState
->longDesc
.lookupLevel
);
1789 currState
->delayed
= false;
1791 // if there's a stage2 translation object we don't need it any more
1792 if (currState
->stage2Tran
) {
1793 delete currState
->stage2Tran
;
1794 currState
->stage2Tran
= NULL
;
1797 DPRINTF(TLBVerbose
, "calling doLongDescriptor for vaddr:%#x\n",
1798 currState
->vaddr_tainted
);
1801 stateQueues
[curr_lookup_level
].pop_front();
1803 if (currState
->fault
!= NoFault
) {
1804 // A fault was generated
1805 currState
->transState
->finish(currState
->fault
, currState
->req
,
1806 currState
->tc
, currState
->mode
);
1809 nextWalk(currState
->tc
);
1811 currState
->req
= NULL
;
1812 currState
->tc
= NULL
;
1813 currState
->delayed
= false;
1815 } else if (!currState
->delayed
) {
1816 // No additional lookups required
1817 // Don't finish the translation if a stage 2 look up is underway
1818 if (!currState
->doingStage2
) {
1819 DPRINTF(TLBVerbose
, "calling translateTiming again\n");
1820 currState
->fault
= tlb
->translateTiming(currState
->req
, currState
->tc
,
1821 currState
->transState
,
1826 nextWalk(currState
->tc
);
1828 currState
->req
= NULL
;
1829 currState
->tc
= NULL
;
1830 currState
->delayed
= false;
1833 if (curr_lookup_level
>= MAX_LOOKUP_LEVELS
- 1)
1834 panic("Max. number of lookups already reached in table walk\n");
1835 // Need to perform additional lookups
1836 stateQueues
[currState
->longDesc
.lookupLevel
].push_back(currState
);
1843 TableWalker::nextWalk(ThreadContext
*tc
)
1845 if (pendingQueue
.size())
1846 schedule(doProcessEvent
, clockEdge(Cycles(1)));
1850 TableWalker::fetchDescriptor(Addr descAddr
, uint8_t *data
, int numBytes
,
1851 Request::Flags flags
, int queueIndex
, Event
*event
,
1852 void (TableWalker::*doDescriptor
)())
1854 bool isTiming
= currState
->timing
;
1856 // do the requests for the page table descriptors have to go through the
1858 if (currState
->stage2Req
) {
1860 flags
= flags
| TLB::MustBeOne
;
1863 Stage2MMU::Stage2Translation
*tran
= new
1864 Stage2MMU::Stage2Translation(*stage2Mmu
, data
, event
,
1866 currState
->stage2Tran
= tran
;
1867 stage2Mmu
->readDataTimed(currState
->tc
, descAddr
, tran
, numBytes
,
1869 fault
= tran
->fault
;
1871 fault
= stage2Mmu
->readDataUntimed(currState
->tc
,
1872 currState
->vaddr
, descAddr
, data
, numBytes
, flags
, masterId
,
1873 currState
->functional
);
1876 if (fault
!= NoFault
) {
1877 currState
->fault
= fault
;
1880 if (queueIndex
>= 0) {
1881 DPRINTF(TLBVerbose
, "Adding to walker fifo: queue size before adding: %d\n",
1882 stateQueues
[queueIndex
].size());
1883 stateQueues
[queueIndex
].push_back(currState
);
1887 (this->*doDescriptor
)();
1891 port
.dmaAction(MemCmd::ReadReq
, descAddr
, numBytes
, event
, data
,
1892 currState
->tc
->getCpuPtr()->clockPeriod(), flags
);
1893 if (queueIndex
>= 0) {
1894 DPRINTF(TLBVerbose
, "Adding to walker fifo: queue size before adding: %d\n",
1895 stateQueues
[queueIndex
].size());
1896 stateQueues
[queueIndex
].push_back(currState
);
1899 } else if (!currState
->functional
) {
1900 port
.dmaAction(MemCmd::ReadReq
, descAddr
, numBytes
, NULL
, data
,
1901 currState
->tc
->getCpuPtr()->clockPeriod(), flags
);
1902 (this->*doDescriptor
)();
1904 RequestPtr req
= new Request(descAddr
, numBytes
, flags
, masterId
);
1905 req
->taskId(ContextSwitchTaskId::DMA
);
1906 PacketPtr pkt
= new Packet(req
, MemCmd::ReadReq
);
1907 pkt
->dataStatic(data
);
1908 port
.sendFunctional(pkt
);
1909 (this->*doDescriptor
)();
1918 TableWalker::insertTableEntry(DescriptorBase
&descriptor
, bool longDescriptor
)
1922 // Create and fill a new page table entry
1924 te
.longDescFormat
= longDescriptor
;
1925 te
.isHyp
= currState
->isHyp
;
1926 te
.asid
= currState
->asid
;
1927 te
.vmid
= currState
->vmid
;
1928 te
.N
= descriptor
.offsetBits();
1929 te
.vpn
= currState
->vaddr
>> te
.N
;
1930 te
.size
= (1<<te
.N
) - 1;
1931 te
.pfn
= descriptor
.pfn();
1932 te
.domain
= descriptor
.domain();
1933 te
.lookupLevel
= descriptor
.lookupLevel
;
1934 te
.ns
= !descriptor
.secure(haveSecurity
, currState
) || isStage2
;
1935 te
.nstid
= !currState
->isSecure
;
1936 te
.xn
= descriptor
.xn();
1937 if (currState
->aarch64
)
1938 te
.el
= currState
->el
;
1942 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1944 te
.global
= descriptor
.global(currState
) || isStage2
;
1945 if (longDescriptor
) {
1946 LongDescriptor lDescriptor
=
1947 dynamic_cast<LongDescriptor
&>(descriptor
);
1949 te
.xn
|= currState
->xnTable
;
1950 te
.pxn
= currState
->pxnTable
|| lDescriptor
.pxn();
1952 // this is actually the HAP field, but its stored in the same bit
1953 // possitions as the AP field in a stage 1 translation.
1954 te
.hap
= lDescriptor
.ap();
1956 te
.ap
= ((!currState
->rwTable
|| descriptor
.ap() >> 1) << 1) |
1957 (currState
->userTable
&& (descriptor
.ap() & 0x1));
1959 if (currState
->aarch64
)
1960 memAttrsAArch64(currState
->tc
, te
, currState
->longDesc
.attrIndx(),
1961 currState
->longDesc
.sh());
1963 memAttrsLPAE(currState
->tc
, te
, lDescriptor
);
1965 te
.ap
= descriptor
.ap();
1966 memAttrs(currState
->tc
, te
, currState
->sctlr
, descriptor
.texcb(),
1967 descriptor
.shareable());
1971 DPRINTF(TLB
, descriptor
.dbgHeader().c_str());
1972 DPRINTF(TLB
, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1973 te
.N
, te
.pfn
, te
.size
, te
.global
, te
.valid
);
1974 DPRINTF(TLB
, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1975 "vmid:%d hyp:%d nc:%d ns:%d\n", te
.vpn
, te
.xn
, te
.pxn
,
1976 te
.ap
, static_cast<uint8_t>(te
.domain
), te
.asid
, te
.vmid
, te
.isHyp
,
1977 te
.nonCacheable
, te
.ns
);
1978 DPRINTF(TLB
, " - domain from L%d desc:%d data:%#x\n",
1979 descriptor
.lookupLevel
, static_cast<uint8_t>(descriptor
.domain()),
1980 descriptor
.getRawData());
1982 // Insert the entry into the TLB
1983 tlb
->insert(currState
->vaddr
, te
);
1984 if (!currState
->timing
) {
1985 currState
->tc
= NULL
;
1986 currState
->req
= NULL
;
1990 ArmISA::TableWalker
*
1991 ArmTableWalkerParams::create()
1993 return new ArmISA::TableWalker(this);
1997 TableWalker::toLookupLevel(uint8_t lookup_level_as_int
)
1999 switch (lookup_level_as_int
) {
2007 panic("Invalid lookup level conversion");