2 * Copyright (c) 2010, 2012-2013 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "arch/arm/faults.hh"
42 #include "arch/arm/stage2_mmu.hh"
43 #include "arch/arm/system.hh"
44 #include "arch/arm/table_walker.hh"
45 #include "arch/arm/tlb.hh"
46 #include "cpu/base.hh"
47 #include "cpu/thread_context.hh"
48 #include "debug/Checkpoint.hh"
49 #include "debug/Drain.hh"
50 #include "debug/TLB.hh"
51 #include "debug/TLBVerbose.hh"
52 #include "sim/system.hh"
54 using namespace ArmISA
;
56 TableWalker::TableWalker(const Params
*p
)
57 : MemObject(p
), port(this, p
->sys
), drainManager(NULL
),
58 stage2Mmu(NULL
), isStage2(p
->is_stage2
), tlb(NULL
),
59 currState(NULL
), pending(false), masterId(p
->sys
->getMasterId(name())),
60 numSquashable(p
->num_squash_per_cycle
),
61 doL1DescEvent(this), doL2DescEvent(this),
62 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
63 doL3LongDescEvent(this),
68 // Cache system-level properties
70 armSys
= dynamic_cast<ArmSystem
*>(p
->sys
);
72 haveSecurity
= armSys
->haveSecurity();
73 _haveLPAE
= armSys
->haveLPAE();
74 _haveVirtualization
= armSys
->haveVirtualization();
75 physAddrRange
= armSys
->physAddrRange();
76 _haveLargeAsid64
= armSys
->haveLargeAsid64();
79 haveSecurity
= _haveLPAE
= _haveVirtualization
= false;
80 _haveLargeAsid64
= false;
86 TableWalker::~TableWalker()
91 TableWalker::WalkerState::WalkerState() : stage2Tran(NULL
), l2Desc(l1Desc
)
96 TableWalker::completeDrain()
98 if (drainManager
&& stateQueues
[L1
].empty() && stateQueues
[L2
].empty() &&
99 pendingQueue
.empty()) {
100 setDrainState(Drainable::Drained
);
101 DPRINTF(Drain
, "TableWalker done draining, processing drain event\n");
102 drainManager
->signalDrainDone();
108 TableWalker::drain(DrainManager
*dm
)
110 unsigned int count
= port
.drain(dm
);
112 bool state_queues_not_empty
= false;
114 for (int i
= 0; i
< MAX_LOOKUP_LEVELS
; ++i
) {
115 if (!stateQueues
[i
].empty()) {
116 state_queues_not_empty
= true;
121 if (state_queues_not_empty
|| pendingQueue
.size()) {
123 setDrainState(Drainable::Draining
);
124 DPRINTF(Drain
, "TableWalker not drained\n");
126 // return port drain count plus the table walker itself needs to drain
129 setDrainState(Drainable::Drained
);
130 DPRINTF(Drain
, "TableWalker free, no need to drain\n");
132 // table walker is drained, but its ports may still need to be drained
138 TableWalker::drainResume()
140 Drainable::drainResume();
141 if (params()->sys
->isTimingMode() && currState
) {
148 TableWalker::getMasterPort(const std::string
&if_name
, PortID idx
)
150 if (if_name
== "port") {
153 return MemObject::getMasterPort(if_name
, idx
);
157 TableWalker::walk(RequestPtr _req
, ThreadContext
*_tc
, uint16_t _asid
,
158 uint8_t _vmid
, bool _isHyp
, TLB::Mode _mode
,
159 TLB::Translation
*_trans
, bool _timing
, bool _functional
,
160 bool secure
, TLB::ArmTranslationType tranType
)
162 assert(!(_functional
&& _timing
));
163 WalkerState
*savedCurrState
= NULL
;
165 if (!currState
&& !_functional
) {
166 // For atomic mode, a new WalkerState instance should be only created
167 // once per TLB. For timing mode, a new instance is generated for every
169 DPRINTF(TLBVerbose
, "creating new instance of WalkerState\n");
171 currState
= new WalkerState();
172 currState
->tableWalker
= this;
173 } else if (_functional
) {
174 // If we are mixing functional mode with timing (or even
175 // atomic), we need to to be careful and clean up after
176 // ourselves to not risk getting into an inconsistent state.
177 DPRINTF(TLBVerbose
, "creating functional instance of WalkerState\n");
178 savedCurrState
= currState
;
179 currState
= new WalkerState();
180 currState
->tableWalker
= this;
181 } else if (_timing
) {
182 // This is a translation that was completed and then faulted again
183 // because some underlying parameters that affect the translation
184 // changed out from under us (e.g. asid). It will either be a
185 // misprediction, in which case nothing will happen or we'll use
186 // this fault to re-execute the faulting instruction which should clean
188 if (currState
->vaddr_tainted
== _req
->getVaddr()) {
194 currState
->aarch64
= opModeIs64(currOpMode(_tc
));
195 currState
->el
= currEL(_tc
);
196 currState
->transState
= _trans
;
197 currState
->req
= _req
;
198 currState
->fault
= NoFault
;
199 currState
->asid
= _asid
;
200 currState
->vmid
= _vmid
;
201 currState
->isHyp
= _isHyp
;
202 currState
->timing
= _timing
;
203 currState
->functional
= _functional
;
204 currState
->mode
= _mode
;
205 currState
->tranType
= tranType
;
206 currState
->isSecure
= secure
;
207 currState
->physAddrRange
= physAddrRange
;
209 /** @todo These should be cached or grabbed from cached copies in
210 the TLB, all these miscreg reads are expensive */
211 currState
->vaddr_tainted
= currState
->req
->getVaddr();
212 if (currState
->aarch64
)
213 currState
->vaddr
= purifyTaggedAddr(currState
->vaddr_tainted
,
214 currState
->tc
, currState
->el
);
216 currState
->vaddr
= currState
->vaddr_tainted
;
218 if (currState
->aarch64
) {
219 switch (currState
->el
) {
222 currState
->sctlr
= currState
->tc
->readMiscReg(MISCREG_SCTLR_EL1
);
223 currState
->ttbcr
= currState
->tc
->readMiscReg(MISCREG_TCR_EL1
);
225 // @todo: uncomment this to enable Virtualization
227 // assert(haveVirtualization);
228 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
229 // currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
232 assert(haveSecurity
);
233 currState
->sctlr
= currState
->tc
->readMiscReg(MISCREG_SCTLR_EL3
);
234 currState
->ttbcr
= currState
->tc
->readMiscReg(MISCREG_TCR_EL3
);
237 panic("Invalid exception level");
241 currState
->sctlr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
242 MISCREG_SCTLR
, currState
->tc
, !currState
->isSecure
));
243 currState
->ttbcr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
244 MISCREG_TTBCR
, currState
->tc
, !currState
->isSecure
));
245 currState
->htcr
= currState
->tc
->readMiscReg(MISCREG_HTCR
);
246 currState
->hcr
= currState
->tc
->readMiscReg(MISCREG_HCR
);
247 currState
->vtcr
= currState
->tc
->readMiscReg(MISCREG_VTCR
);
249 sctlr
= currState
->sctlr
;
251 currState
->isFetch
= (currState
->mode
== TLB::Execute
);
252 currState
->isWrite
= (currState
->mode
== TLB::Write
);
254 // We only do a second stage of translation if we're not secure, or in
255 // hyp mode, the second stage MMU is enabled, and this table walker
256 // instance is the first stage.
257 currState
->doingStage2
= false;
258 // @todo: for now disable this in AArch64 (HCR is not set)
259 currState
->stage2Req
= !currState
->aarch64
&& currState
->hcr
.vm
&&
260 !isStage2
&& !currState
->isSecure
&& !currState
->isHyp
;
262 bool long_desc_format
= currState
->aarch64
||
263 (_haveLPAE
&& currState
->ttbcr
.eae
) ||
266 if (long_desc_format
) {
267 // Helper variables used for hierarchical permissions
268 currState
->secureLookup
= currState
->isSecure
;
269 currState
->rwTable
= true;
270 currState
->userTable
= true;
271 currState
->xnTable
= false;
272 currState
->pxnTable
= false;
275 if (!currState
->timing
) {
276 Fault fault
= NoFault
;
277 if (currState
->aarch64
)
278 fault
= processWalkAArch64();
279 else if (long_desc_format
)
280 fault
= processWalkLPAE();
282 fault
= processWalk();
284 // If this was a functional non-timing access restore state to
286 if (currState
->functional
) {
288 currState
= savedCurrState
;
293 if (pending
|| pendingQueue
.size()) {
294 pendingQueue
.push_back(currState
);
298 if (currState
->aarch64
)
299 return processWalkAArch64();
300 else if (long_desc_format
)
301 return processWalkLPAE();
303 return processWalk();
310 TableWalker::processWalkWrapper()
313 assert(pendingQueue
.size());
314 currState
= pendingQueue
.front();
316 ExceptionLevel target_el
= EL0
;
317 if (currState
->aarch64
)
318 target_el
= currEL(currState
->tc
);
322 // Check if a previous walk filled this request already
323 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
324 TlbEntry
* te
= tlb
->lookup(currState
->vaddr
, currState
->asid
,
325 currState
->vmid
, currState
->isHyp
, currState
->isSecure
, true, false,
328 // Check if we still need to have a walk for this request. If the requesting
329 // instruction has been squashed, or a previous walk has filled the TLB with
330 // a match, we just want to get rid of the walk. The latter could happen
331 // when there are multiple outstanding misses to a single page and a
332 // previous request has been successfully translated.
333 if (!currState
->transState
->squashed() && !te
) {
334 // We've got a valid request, lets process it
336 pendingQueue
.pop_front();
337 if (currState
->aarch64
)
338 processWalkAArch64();
339 else if ((_haveLPAE
&& currState
->ttbcr
.eae
) || currState
->isHyp
|| isStage2
)
347 // If the instruction that we were translating for has been
348 // squashed we shouldn't bother.
349 unsigned num_squashed
= 0;
350 ThreadContext
*tc
= currState
->tc
;
351 while ((num_squashed
< numSquashable
) && currState
&&
352 (currState
->transState
->squashed() || te
)) {
353 pendingQueue
.pop_front();
356 DPRINTF(TLB
, "Squashing table walk for address %#x\n",
357 currState
->vaddr_tainted
);
359 if (currState
->transState
->squashed()) {
360 // finish the translation which will delete the translation object
361 currState
->transState
->finish(new UnimpFault("Squashed Inst"),
362 currState
->req
, currState
->tc
, currState
->mode
);
364 // translate the request now that we know it will work
365 tlb
->translateTiming(currState
->req
, currState
->tc
,
366 currState
->transState
, currState
->mode
);
370 // delete the current request
373 // peak at the next one
374 if (pendingQueue
.size()) {
375 currState
= pendingQueue
.front();
376 te
= tlb
->lookup(currState
->vaddr
, currState
->asid
,
377 currState
->vmid
, currState
->isHyp
, currState
->isSecure
, true,
380 // Terminate the loop, nothing more to do
385 // if we've still got pending translations schedule more work
392 TableWalker::processWalk()
396 // If translation isn't enabled, we shouldn't be here
397 assert(currState
->sctlr
.m
|| isStage2
);
399 DPRINTF(TLB
, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
400 currState
->vaddr_tainted
, currState
->ttbcr
, mbits(currState
->vaddr
, 31,
401 32 - currState
->ttbcr
.n
));
403 if (currState
->ttbcr
.n
== 0 || !mbits(currState
->vaddr
, 31,
404 32 - currState
->ttbcr
.n
)) {
405 DPRINTF(TLB
, " - Selecting TTBR0\n");
406 // Check if table walk is allowed when Security Extensions are enabled
407 if (haveSecurity
&& currState
->ttbcr
.pd0
) {
408 if (currState
->isFetch
)
409 return new PrefetchAbort(currState
->vaddr_tainted
,
410 ArmFault::TranslationLL
+ L1
,
414 return new DataAbort(currState
->vaddr_tainted
,
415 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
416 ArmFault::TranslationLL
+ L1
, isStage2
,
419 ttbr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
420 MISCREG_TTBR0
, currState
->tc
, !currState
->isSecure
));
422 DPRINTF(TLB
, " - Selecting TTBR1\n");
423 // Check if table walk is allowed when Security Extensions are enabled
424 if (haveSecurity
&& currState
->ttbcr
.pd1
) {
425 if (currState
->isFetch
)
426 return new PrefetchAbort(currState
->vaddr_tainted
,
427 ArmFault::TranslationLL
+ L1
,
431 return new DataAbort(currState
->vaddr_tainted
,
432 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
433 ArmFault::TranslationLL
+ L1
, isStage2
,
436 ttbr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
437 MISCREG_TTBR1
, currState
->tc
, !currState
->isSecure
));
438 currState
->ttbcr
.n
= 0;
441 Addr l1desc_addr
= mbits(ttbr
, 31, 14 - currState
->ttbcr
.n
) |
442 (bits(currState
->vaddr
, 31 - currState
->ttbcr
.n
, 20) << 2);
443 DPRINTF(TLB
, " - Descriptor at address %#x (%s)\n", l1desc_addr
,
444 currState
->isSecure
? "s" : "ns");
446 // Trickbox address check
448 f
= tlb
->walkTrickBoxCheck(l1desc_addr
, currState
->isSecure
,
449 currState
->vaddr
, sizeof(uint32_t), currState
->isFetch
,
450 currState
->isWrite
, TlbEntry::DomainType::NoAccess
, L1
);
452 DPRINTF(TLB
, "Trickbox check caused fault on %#x\n", currState
->vaddr_tainted
);
453 if (currState
->timing
) {
455 nextWalk(currState
->tc
);
458 currState
->tc
= NULL
;
459 currState
->req
= NULL
;
464 Request::Flags flag
= 0;
465 if (currState
->sctlr
.c
== 0) {
466 flag
= Request::UNCACHEABLE
;
470 delayed
= fetchDescriptor(l1desc_addr
, (uint8_t*)&currState
->l1Desc
.data
,
471 sizeof(uint32_t), flag
, L1
, &doL1DescEvent
,
472 &TableWalker::doL1Descriptor
);
474 f
= currState
->fault
;
481 TableWalker::processWalkLPAE()
483 Addr ttbr
, ttbr0_max
, ttbr1_min
, desc_addr
;
485 LookupLevel start_lookup_level
= L1
;
487 DPRINTF(TLB
, "Beginning table walk for address %#x, TTBCR: %#x\n",
488 currState
->vaddr_tainted
, currState
->ttbcr
);
490 Request::Flags flag
= 0;
491 if (currState
->isSecure
)
492 flag
.set(Request::SECURE
);
494 // work out which base address register to use, if in hyp mode we always
497 DPRINTF(TLB
, " - Selecting VTTBR (long-desc.)\n");
498 ttbr
= currState
->tc
->readMiscReg(MISCREG_VTTBR
);
499 tsz
= sext
<4>(currState
->vtcr
.t0sz
);
500 start_lookup_level
= currState
->vtcr
.sl0
? L1
: L2
;
501 } else if (currState
->isHyp
) {
502 DPRINTF(TLB
, " - Selecting HTTBR (long-desc.)\n");
503 ttbr
= currState
->tc
->readMiscReg(MISCREG_HTTBR
);
504 tsz
= currState
->htcr
.t0sz
;
506 assert(_haveLPAE
&& currState
->ttbcr
.eae
);
508 // Determine boundaries of TTBR0/1 regions
509 if (currState
->ttbcr
.t0sz
)
510 ttbr0_max
= (1ULL << (32 - currState
->ttbcr
.t0sz
)) - 1;
511 else if (currState
->ttbcr
.t1sz
)
512 ttbr0_max
= (1ULL << 32) -
513 (1ULL << (32 - currState
->ttbcr
.t1sz
)) - 1;
515 ttbr0_max
= (1ULL << 32) - 1;
516 if (currState
->ttbcr
.t1sz
)
517 ttbr1_min
= (1ULL << 32) - (1ULL << (32 - currState
->ttbcr
.t1sz
));
519 ttbr1_min
= (1ULL << (32 - currState
->ttbcr
.t0sz
));
521 // The following code snippet selects the appropriate translation table base
522 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
523 // depending on the address range supported by the translation table (ARM
524 // ARM issue C B3.6.4)
525 if (currState
->vaddr
<= ttbr0_max
) {
526 DPRINTF(TLB
, " - Selecting TTBR0 (long-desc.)\n");
527 // Check if table walk is allowed
528 if (currState
->ttbcr
.epd0
) {
529 if (currState
->isFetch
)
530 return new PrefetchAbort(currState
->vaddr_tainted
,
531 ArmFault::TranslationLL
+ L1
,
535 return new DataAbort(currState
->vaddr_tainted
,
536 TlbEntry::DomainType::NoAccess
,
538 ArmFault::TranslationLL
+ L1
,
542 ttbr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
543 MISCREG_TTBR0
, currState
->tc
, !currState
->isSecure
));
544 tsz
= currState
->ttbcr
.t0sz
;
545 if (ttbr0_max
< (1ULL << 30)) // Upper limit < 1 GB
546 start_lookup_level
= L2
;
547 } else if (currState
->vaddr
>= ttbr1_min
) {
548 DPRINTF(TLB
, " - Selecting TTBR1 (long-desc.)\n");
549 // Check if table walk is allowed
550 if (currState
->ttbcr
.epd1
) {
551 if (currState
->isFetch
)
552 return new PrefetchAbort(currState
->vaddr_tainted
,
553 ArmFault::TranslationLL
+ L1
,
557 return new DataAbort(currState
->vaddr_tainted
,
558 TlbEntry::DomainType::NoAccess
,
560 ArmFault::TranslationLL
+ L1
,
564 ttbr
= currState
->tc
->readMiscReg(flattenMiscRegNsBanked(
565 MISCREG_TTBR1
, currState
->tc
, !currState
->isSecure
));
566 tsz
= currState
->ttbcr
.t1sz
;
567 if (ttbr1_min
>= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
568 start_lookup_level
= L2
;
570 // Out of boundaries -> translation fault
571 if (currState
->isFetch
)
572 return new PrefetchAbort(currState
->vaddr_tainted
,
573 ArmFault::TranslationLL
+ L1
,
577 return new DataAbort(currState
->vaddr_tainted
,
578 TlbEntry::DomainType::NoAccess
,
579 currState
->isWrite
, ArmFault::TranslationLL
+ L1
,
580 isStage2
, ArmFault::LpaeTran
);
585 // Perform lookup (ARM ARM issue C B3.6.6)
586 if (start_lookup_level
== L1
) {
588 desc_addr
= mbits(ttbr
, 39, n
) |
589 (bits(currState
->vaddr
, n
+ 26, 30) << 3);
590 DPRINTF(TLB
, " - Descriptor at address %#x (%s) (long-desc.)\n",
591 desc_addr
, currState
->isSecure
? "s" : "ns");
593 // Skip first-level lookup
594 n
= (tsz
>= 2 ? 14 - tsz
: 12);
595 desc_addr
= mbits(ttbr
, 39, n
) |
596 (bits(currState
->vaddr
, n
+ 17, 21) << 3);
597 DPRINTF(TLB
, " - Descriptor at address %#x (%s) (long-desc.)\n",
598 desc_addr
, currState
->isSecure
? "s" : "ns");
601 // Trickbox address check
602 Fault f
= tlb
->walkTrickBoxCheck(desc_addr
, currState
->isSecure
,
603 currState
->vaddr
, sizeof(uint64_t), currState
->isFetch
,
604 currState
->isWrite
, TlbEntry::DomainType::NoAccess
,
607 DPRINTF(TLB
, "Trickbox check caused fault on %#x\n", currState
->vaddr_tainted
);
608 if (currState
->timing
) {
610 nextWalk(currState
->tc
);
613 currState
->tc
= NULL
;
614 currState
->req
= NULL
;
619 if (currState
->sctlr
.c
== 0) {
620 flag
= Request::UNCACHEABLE
;
623 if (currState
->isSecure
)
624 flag
.set(Request::SECURE
);
626 currState
->longDesc
.lookupLevel
= start_lookup_level
;
627 currState
->longDesc
.aarch64
= false;
628 currState
->longDesc
.largeGrain
= false;
629 currState
->longDesc
.grainSize
= 12;
631 Event
*event
= start_lookup_level
== L1
? (Event
*) &doL1LongDescEvent
632 : (Event
*) &doL2LongDescEvent
;
634 bool delayed
= fetchDescriptor(desc_addr
, (uint8_t*)&currState
->longDesc
.data
,
635 sizeof(uint64_t), flag
, start_lookup_level
,
636 event
, &TableWalker::doLongDescriptor
);
638 f
= currState
->fault
;
645 TableWalker::adjustTableSizeAArch64(unsigned tsz
)
655 TableWalker::checkAddrSizeFaultAArch64(Addr addr
, int currPhysAddrRange
)
657 return (currPhysAddrRange
!= MaxPhysAddrRange
&&
658 bits(addr
, MaxPhysAddrRange
- 1, currPhysAddrRange
));
662 TableWalker::processWalkAArch64()
664 assert(currState
->aarch64
);
666 DPRINTF(TLB
, "Beginning table walk for address %#llx, TTBCR: %#llx\n",
667 currState
->vaddr_tainted
, currState
->ttbcr
);
669 // Determine TTBR, table size, granule size and phys. address range
672 bool large_grain
= false;
674 switch (currState
->el
) {
677 switch (bits(currState
->vaddr
, 63,48)) {
679 DPRINTF(TLB
, " - Selecting TTBR0 (AArch64)\n");
680 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR0_EL1
);
681 tsz
= adjustTableSizeAArch64(64 - currState
->ttbcr
.t0sz
);
682 large_grain
= currState
->ttbcr
.tg0
;
683 if (bits(currState
->vaddr
, 63, tsz
) != 0x0 ||
684 currState
->ttbcr
.epd0
)
688 DPRINTF(TLB
, " - Selecting TTBR1 (AArch64)\n");
689 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR1_EL1
);
690 tsz
= adjustTableSizeAArch64(64 - currState
->ttbcr
.t1sz
);
691 large_grain
= currState
->ttbcr
.tg1
;
692 if (bits(currState
->vaddr
, 63, tsz
) != mask(64-tsz
) ||
693 currState
->ttbcr
.epd1
)
697 // top two bytes must be all 0s or all 1s, else invalid addr
700 ps
= currState
->ttbcr
.ips
;
704 switch(bits(currState
->vaddr
, 63,48)) {
706 DPRINTF(TLB
, " - Selecting TTBR0 (AArch64)\n");
707 if (currState
->el
== EL2
)
708 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR0_EL2
);
710 ttbr
= currState
->tc
->readMiscReg(MISCREG_TTBR0_EL3
);
711 tsz
= adjustTableSizeAArch64(64 - currState
->ttbcr
.t0sz
);
712 large_grain
= currState
->ttbcr
.tg0
;
715 // invalid addr if top two bytes are not all 0s
718 ps
= currState
->ttbcr
.ps
;
724 if (currState
->isFetch
)
725 f
= new PrefetchAbort(currState
->vaddr_tainted
,
726 ArmFault::TranslationLL
+ L0
, isStage2
,
729 f
= new DataAbort(currState
->vaddr_tainted
,
730 TlbEntry::DomainType::NoAccess
,
732 ArmFault::TranslationLL
+ L0
,
733 isStage2
, ArmFault::LpaeTran
);
735 if (currState
->timing
) {
737 nextWalk(currState
->tc
);
740 currState
->tc
= NULL
;
741 currState
->req
= NULL
;
747 // Determine starting lookup level
748 LookupLevel start_lookup_level
;
749 int grain_size
, stride
;
750 if (large_grain
) { // 64 KB granule
752 stride
= grain_size
- 3;
753 if (tsz
> grain_size
+ 2 * stride
)
754 start_lookup_level
= L1
;
755 else if (tsz
> grain_size
+ stride
)
756 start_lookup_level
= L2
;
758 start_lookup_level
= L3
;
759 } else { // 4 KB granule
761 stride
= grain_size
- 3;
762 if (tsz
> grain_size
+ 3 * stride
)
763 start_lookup_level
= L0
;
764 else if (tsz
> grain_size
+ 2 * stride
)
765 start_lookup_level
= L1
;
767 start_lookup_level
= L2
;
770 // Determine table base address
771 int base_addr_lo
= 3 + tsz
- stride
* (3 - start_lookup_level
) -
773 Addr base_addr
= mbits(ttbr
, 47, base_addr_lo
);
775 // Determine physical address size and raise an Address Size Fault if
777 int pa_range
= decodePhysAddrRange64(ps
);
778 // Clamp to lower limit
779 if (pa_range
> physAddrRange
)
780 currState
->physAddrRange
= physAddrRange
;
782 currState
->physAddrRange
= pa_range
;
783 if (checkAddrSizeFaultAArch64(base_addr
, currState
->physAddrRange
)) {
784 DPRINTF(TLB
, "Address size fault before any lookup\n");
786 if (currState
->isFetch
)
787 f
= new PrefetchAbort(currState
->vaddr_tainted
,
788 ArmFault::AddressSizeLL
+ start_lookup_level
,
792 f
= new DataAbort(currState
->vaddr_tainted
,
793 TlbEntry::DomainType::NoAccess
,
795 ArmFault::AddressSizeLL
+ start_lookup_level
,
800 if (currState
->timing
) {
802 nextWalk(currState
->tc
);
805 currState
->tc
= NULL
;
806 currState
->req
= NULL
;
812 // Determine descriptor address
813 Addr desc_addr
= base_addr
|
814 (bits(currState
->vaddr
, tsz
- 1,
815 stride
* (3 - start_lookup_level
) + grain_size
) << 3);
817 // Trickbox address check
818 Fault f
= tlb
->walkTrickBoxCheck(desc_addr
, currState
->isSecure
,
819 currState
->vaddr
, sizeof(uint64_t), currState
->isFetch
,
820 currState
->isWrite
, TlbEntry::DomainType::NoAccess
,
823 DPRINTF(TLB
, "Trickbox check caused fault on %#x\n", currState
->vaddr_tainted
);
824 if (currState
->timing
) {
826 nextWalk(currState
->tc
);
829 currState
->tc
= NULL
;
830 currState
->req
= NULL
;
835 Request::Flags flag
= 0;
836 if (currState
->sctlr
.c
== 0) {
837 flag
= Request::UNCACHEABLE
;
840 currState
->longDesc
.lookupLevel
= start_lookup_level
;
841 currState
->longDesc
.aarch64
= true;
842 currState
->longDesc
.largeGrain
= large_grain
;
843 currState
->longDesc
.grainSize
= grain_size
;
845 if (currState
->timing
) {
847 switch (start_lookup_level
) {
849 event
= (Event
*) &doL0LongDescEvent
;
852 event
= (Event
*) &doL1LongDescEvent
;
855 event
= (Event
*) &doL2LongDescEvent
;
858 event
= (Event
*) &doL3LongDescEvent
;
861 panic("Invalid table lookup level");
864 port
.dmaAction(MemCmd::ReadReq
, desc_addr
, sizeof(uint64_t), event
,
865 (uint8_t*) &currState
->longDesc
.data
,
866 currState
->tc
->getCpuPtr()->clockPeriod(), flag
);
868 "Adding to walker fifo: queue size before adding: %d\n",
869 stateQueues
[start_lookup_level
].size());
870 stateQueues
[start_lookup_level
].push_back(currState
);
872 } else if (!currState
->functional
) {
873 port
.dmaAction(MemCmd::ReadReq
, desc_addr
, sizeof(uint64_t),
874 NULL
, (uint8_t*) &currState
->longDesc
.data
,
875 currState
->tc
->getCpuPtr()->clockPeriod(), flag
);
877 f
= currState
->fault
;
879 RequestPtr req
= new Request(desc_addr
, sizeof(uint64_t), flag
,
881 PacketPtr pkt
= new Packet(req
, MemCmd::ReadReq
);
882 pkt
->dataStatic((uint8_t*) &currState
->longDesc
.data
);
883 port
.sendFunctional(pkt
);
887 f
= currState
->fault
;
894 TableWalker::memAttrs(ThreadContext
*tc
, TlbEntry
&te
, SCTLR sctlr
,
895 uint8_t texcb
, bool s
)
897 // Note: tc and sctlr local variables are hiding tc and sctrl class
899 DPRINTF(TLBVerbose
, "memAttrs texcb:%d s:%d\n", texcb
, s
);
900 te
.shareable
= false; // default value
901 te
.nonCacheable
= false;
902 te
.outerShareable
= false;
903 if (sctlr
.tre
== 0 || ((sctlr
.tre
== 1) && (sctlr
.m
== 0))) {
905 case 0: // Stongly-ordered
906 te
.nonCacheable
= true;
907 te
.mtype
= TlbEntry::MemoryType::StronglyOrdered
;
912 case 1: // Shareable Device
913 te
.nonCacheable
= true;
914 te
.mtype
= TlbEntry::MemoryType::Device
;
919 case 2: // Outer and Inner Write-Through, no Write-Allocate
920 te
.mtype
= TlbEntry::MemoryType::Normal
;
923 te
.outerAttrs
= bits(texcb
, 1, 0);
925 case 3: // Outer and Inner Write-Back, no Write-Allocate
926 te
.mtype
= TlbEntry::MemoryType::Normal
;
929 te
.outerAttrs
= bits(texcb
, 1, 0);
931 case 4: // Outer and Inner Non-cacheable
932 te
.nonCacheable
= true;
933 te
.mtype
= TlbEntry::MemoryType::Normal
;
936 te
.outerAttrs
= bits(texcb
, 1, 0);
939 panic("Reserved texcb value!\n");
941 case 6: // Implementation Defined
942 panic("Implementation-defined texcb value!\n");
944 case 7: // Outer and Inner Write-Back, Write-Allocate
945 te
.mtype
= TlbEntry::MemoryType::Normal
;
950 case 8: // Non-shareable Device
951 te
.nonCacheable
= true;
952 te
.mtype
= TlbEntry::MemoryType::Device
;
953 te
.shareable
= false;
957 case 9 ... 15: // Reserved
958 panic("Reserved texcb value!\n");
960 case 16 ... 31: // Cacheable Memory
961 te
.mtype
= TlbEntry::MemoryType::Normal
;
963 if (bits(texcb
, 1,0) == 0 || bits(texcb
, 3,2) == 0)
964 te
.nonCacheable
= true;
965 te
.innerAttrs
= bits(texcb
, 1, 0);
966 te
.outerAttrs
= bits(texcb
, 3, 2);
969 panic("More than 32 states for 5 bits?\n");
973 PRRR prrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR
,
974 currState
->tc
, !currState
->isSecure
));
975 NMRR nmrr
= tc
->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR
,
976 currState
->tc
, !currState
->isSecure
));
977 DPRINTF(TLBVerbose
, "memAttrs PRRR:%08x NMRR:%08x\n", prrr
, nmrr
);
978 uint8_t curr_tr
= 0, curr_ir
= 0, curr_or
= 0;
979 switch(bits(texcb
, 2,0)) {
984 te
.outerShareable
= (prrr
.nos0
== 0);
990 te
.outerShareable
= (prrr
.nos1
== 0);
996 te
.outerShareable
= (prrr
.nos2
== 0);
1002 te
.outerShareable
= (prrr
.nos3
== 0);
1008 te
.outerShareable
= (prrr
.nos4
== 0);
1014 te
.outerShareable
= (prrr
.nos5
== 0);
1017 panic("Imp defined type\n");
1022 te
.outerShareable
= (prrr
.nos7
== 0);
1028 DPRINTF(TLBVerbose
, "StronglyOrdered\n");
1029 te
.mtype
= TlbEntry::MemoryType::StronglyOrdered
;
1030 te
.nonCacheable
= true;
1033 te
.shareable
= true;
1036 DPRINTF(TLBVerbose
, "Device ds1:%d ds0:%d s:%d\n",
1037 prrr
.ds1
, prrr
.ds0
, s
);
1038 te
.mtype
= TlbEntry::MemoryType::Device
;
1039 te
.nonCacheable
= true;
1043 te
.shareable
= true;
1045 te
.shareable
= true;
1048 DPRINTF(TLBVerbose
, "Normal ns1:%d ns0:%d s:%d\n",
1049 prrr
.ns1
, prrr
.ns0
, s
);
1050 te
.mtype
= TlbEntry::MemoryType::Normal
;
1052 te
.shareable
= true;
1054 te
.shareable
= true;
1057 panic("Reserved type");
1060 if (te
.mtype
== TlbEntry::MemoryType::Normal
){
1063 te
.nonCacheable
= true;
1079 te
.nonCacheable
= true;
1094 DPRINTF(TLBVerbose
, "memAttrs: shareable: %d, innerAttrs: %d, \
1096 te
.shareable
, te
.innerAttrs
, te
.outerAttrs
);
1097 te
.setAttributes(false);
1101 TableWalker::memAttrsLPAE(ThreadContext
*tc
, TlbEntry
&te
,
1102 LongDescriptor
&lDescriptor
)
1107 uint8_t sh
= lDescriptor
.sh();
1108 // Different format and source of attributes if this is a stage 2
1111 attr
= lDescriptor
.memAttr();
1112 uint8_t attr_3_2
= (attr
>> 2) & 0x3;
1113 uint8_t attr_1_0
= attr
& 0x3;
1115 DPRINTF(TLBVerbose
, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr
, sh
);
1117 if (attr_3_2
== 0) {
1118 te
.mtype
= attr_1_0
== 0 ? TlbEntry::MemoryType::StronglyOrdered
1119 : TlbEntry::MemoryType::Device
;
1121 te
.innerAttrs
= attr_1_0
== 0 ? 1 : 3;
1122 te
.nonCacheable
= true;
1124 te
.mtype
= TlbEntry::MemoryType::Normal
;
1125 te
.outerAttrs
= attr_3_2
== 1 ? 0 :
1126 attr_3_2
== 2 ? 2 : 1;
1127 te
.innerAttrs
= attr_1_0
== 1 ? 0 :
1128 attr_1_0
== 2 ? 6 : 5;
1129 te
.nonCacheable
= (attr_3_2
== 1) || (attr_1_0
== 1);
1132 uint8_t attrIndx
= lDescriptor
.attrIndx();
1134 // LPAE always uses remapping of memory attributes, irrespective of the
1135 // value of SCTLR.TRE
1136 int reg
= attrIndx
& 0x4 ? MISCREG_MAIR1
: MISCREG_MAIR0
;
1137 reg
= flattenMiscRegNsBanked(reg
, currState
->tc
, !currState
->isSecure
);
1138 uint32_t mair
= currState
->tc
->readMiscReg(reg
);
1139 attr
= (mair
>> (8 * (attrIndx
% 4))) & 0xff;
1140 uint8_t attr_7_4
= bits(attr
, 7, 4);
1141 uint8_t attr_3_0
= bits(attr
, 3, 0);
1142 DPRINTF(TLBVerbose
, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx
, sh
, attr
);
1144 // Note: the memory subsystem only cares about the 'cacheable' memory
1145 // attribute. The other attributes are only used to fill the PAR register
1146 // accordingly to provide the illusion of full support
1147 te
.nonCacheable
= false;
1151 // Strongly-ordered or Device memory
1152 if (attr_3_0
== 0x0)
1153 te
.mtype
= TlbEntry::MemoryType::StronglyOrdered
;
1154 else if (attr_3_0
== 0x4)
1155 te
.mtype
= TlbEntry::MemoryType::Device
;
1157 panic("Unpredictable behavior\n");
1158 te
.nonCacheable
= true;
1162 // Normal memory, Outer Non-cacheable
1163 te
.mtype
= TlbEntry::MemoryType::Normal
;
1165 if (attr_3_0
== 0x4)
1166 // Inner Non-cacheable
1167 te
.nonCacheable
= true;
1168 else if (attr_3_0
< 0x8)
1169 panic("Unpredictable behavior\n");
1179 if (attr_7_4
& 0x4) {
1180 te
.outerAttrs
= (attr_7_4
& 1) ? 1 : 3;
1182 te
.outerAttrs
= 0x2;
1184 // Normal memory, Outer Cacheable
1185 te
.mtype
= TlbEntry::MemoryType::Normal
;
1186 if (attr_3_0
!= 0x4 && attr_3_0
< 0x8)
1187 panic("Unpredictable behavior\n");
1190 panic("Unpredictable behavior\n");
1196 te
.innerAttrs
= 0x1;
1199 te
.innerAttrs
= attr_7_4
== 0 ? 0x3 : 0;
1211 te
.innerAttrs
= attr_3_0
& 1 ? 0x5 : 0x7;
1214 panic("Unpredictable behavior\n");
1219 te
.outerShareable
= sh
== 2;
1220 te
.shareable
= (sh
& 0x2) ? true : false;
1221 te
.setAttributes(true);
1222 te
.attributes
|= (uint64_t) attr
<< 56;
1226 TableWalker::memAttrsAArch64(ThreadContext
*tc
, TlbEntry
&te
, uint8_t attrIndx
,
1229 DPRINTF(TLBVerbose
, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx
, sh
);
1233 switch (currState
->el
) {
1236 mair
= tc
->readMiscReg(MISCREG_MAIR_EL1
);
1239 mair
= tc
->readMiscReg(MISCREG_MAIR_EL2
);
1242 mair
= tc
->readMiscReg(MISCREG_MAIR_EL3
);
1245 panic("Invalid exception level");
1249 // Select attributes
1250 uint8_t attr
= bits(mair
, 8 * attrIndx
+ 7, 8 * attrIndx
);
1251 uint8_t attr_lo
= bits(attr
, 3, 0);
1252 uint8_t attr_hi
= bits(attr
, 7, 4);
1255 te
.mtype
= attr_hi
== 0 ? TlbEntry::MemoryType::Device
: TlbEntry::MemoryType::Normal
;
1258 te
.nonCacheable
= false;
1259 if (te
.mtype
== TlbEntry::MemoryType::Device
|| // Device memory
1260 attr_hi
== 0x8 || // Normal memory, Outer Non-cacheable
1261 attr_lo
== 0x8) { // Normal memory, Inner Non-cacheable
1262 te
.nonCacheable
= true;
1265 te
.shareable
= sh
== 2;
1266 te
.outerShareable
= (sh
& 0x2) ? true : false;
1267 // Attributes formatted according to the 64-bit PAR
1268 te
.attributes
= ((uint64_t) attr
<< 56) |
1269 (1 << 11) | // LPAE bit
1270 (te
.ns
<< 9) | // NS bit
1275 TableWalker::doL1Descriptor()
1277 if (currState
->fault
!= NoFault
) {
1281 DPRINTF(TLB
, "L1 descriptor for %#x is %#x\n",
1282 currState
->vaddr_tainted
, currState
->l1Desc
.data
);
1285 switch (currState
->l1Desc
.type()) {
1286 case L1Descriptor::Ignore
:
1287 case L1Descriptor::Reserved
:
1288 if (!currState
->timing
) {
1289 currState
->tc
= NULL
;
1290 currState
->req
= NULL
;
1292 DPRINTF(TLB
, "L1 Descriptor Reserved/Ignore, causing fault\n");
1293 if (currState
->isFetch
)
1295 new PrefetchAbort(currState
->vaddr_tainted
,
1296 ArmFault::TranslationLL
+ L1
,
1298 ArmFault::VmsaTran
);
1301 new DataAbort(currState
->vaddr_tainted
,
1302 TlbEntry::DomainType::NoAccess
,
1304 ArmFault::TranslationLL
+ L1
, isStage2
,
1305 ArmFault::VmsaTran
);
1307 case L1Descriptor::Section
:
1308 if (currState
->sctlr
.afe
&& bits(currState
->l1Desc
.ap(), 0) == 0) {
1309 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1310 * enabled if set, do l1.Desc.setAp0() instead of generating
1314 currState
->fault
= new DataAbort(currState
->vaddr_tainted
,
1315 currState
->l1Desc
.domain(),
1317 ArmFault::AccessFlagLL
+ L1
,
1319 ArmFault::VmsaTran
);
1321 if (currState
->l1Desc
.supersection()) {
1322 panic("Haven't implemented supersections\n");
1324 insertTableEntry(currState
->l1Desc
, false);
1326 case L1Descriptor::PageTable
:
1329 l2desc_addr
= currState
->l1Desc
.l2Addr() |
1330 (bits(currState
->vaddr
, 19, 12) << 2);
1331 DPRINTF(TLB
, "L1 descriptor points to page table at: %#x (%s)\n",
1332 l2desc_addr
, currState
->isSecure
? "s" : "ns");
1334 // Trickbox address check
1335 currState
->fault
= tlb
->walkTrickBoxCheck(
1336 l2desc_addr
, currState
->isSecure
, currState
->vaddr
,
1337 sizeof(uint32_t), currState
->isFetch
, currState
->isWrite
,
1338 currState
->l1Desc
.domain(), L2
);
1340 if (currState
->fault
) {
1341 if (!currState
->timing
) {
1342 currState
->tc
= NULL
;
1343 currState
->req
= NULL
;
1348 Request::Flags flag
= 0;
1349 if (currState
->isSecure
)
1350 flag
.set(Request::SECURE
);
1353 delayed
= fetchDescriptor(l2desc_addr
,
1354 (uint8_t*)&currState
->l2Desc
.data
,
1355 sizeof(uint32_t), flag
, -1, &doL2DescEvent
,
1356 &TableWalker::doL2Descriptor
);
1358 currState
->delayed
= true;
1364 panic("A new type in a 2 bit field?\n");
1369 TableWalker::doLongDescriptor()
1371 if (currState
->fault
!= NoFault
) {
1375 DPRINTF(TLB
, "L%d descriptor for %#llx is %#llx (%s)\n",
1376 currState
->longDesc
.lookupLevel
, currState
->vaddr_tainted
,
1377 currState
->longDesc
.data
,
1378 currState
->aarch64
? "AArch64" : "long-desc.");
1380 if ((currState
->longDesc
.type() == LongDescriptor::Block
) ||
1381 (currState
->longDesc
.type() == LongDescriptor::Page
)) {
1382 DPRINTF(TLBVerbose
, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1383 "xn: %d, ap: %d, af: %d, type: %d\n",
1384 currState
->longDesc
.lookupLevel
,
1385 currState
->longDesc
.data
,
1386 currState
->longDesc
.pxn(),
1387 currState
->longDesc
.xn(),
1388 currState
->longDesc
.ap(),
1389 currState
->longDesc
.af(),
1390 currState
->longDesc
.type());
1392 DPRINTF(TLBVerbose
, "Analyzing L%d descriptor: %#llx, type: %d\n",
1393 currState
->longDesc
.lookupLevel
,
1394 currState
->longDesc
.data
,
1395 currState
->longDesc
.type());
1400 switch (currState
->longDesc
.type()) {
1401 case LongDescriptor::Invalid
:
1402 if (!currState
->timing
) {
1403 currState
->tc
= NULL
;
1404 currState
->req
= NULL
;
1407 DPRINTF(TLB
, "L%d descriptor Invalid, causing fault type %d\n",
1408 currState
->longDesc
.lookupLevel
,
1409 ArmFault::TranslationLL
+ currState
->longDesc
.lookupLevel
);
1410 if (currState
->isFetch
)
1411 currState
->fault
= new PrefetchAbort(
1412 currState
->vaddr_tainted
,
1413 ArmFault::TranslationLL
+ currState
->longDesc
.lookupLevel
,
1415 ArmFault::LpaeTran
);
1417 currState
->fault
= new DataAbort(
1418 currState
->vaddr_tainted
,
1419 TlbEntry::DomainType::NoAccess
,
1421 ArmFault::TranslationLL
+ currState
->longDesc
.lookupLevel
,
1423 ArmFault::LpaeTran
);
1425 case LongDescriptor::Block
:
1426 case LongDescriptor::Page
:
1430 // Check for address size fault
1431 if (checkAddrSizeFaultAArch64(
1432 mbits(currState
->longDesc
.data
, MaxPhysAddrRange
- 1,
1433 currState
->longDesc
.offsetBits()),
1434 currState
->physAddrRange
)) {
1436 DPRINTF(TLB
, "L%d descriptor causing Address Size Fault\n",
1437 currState
->longDesc
.lookupLevel
);
1438 // Check for access fault
1439 } else if (currState
->longDesc
.af() == 0) {
1441 DPRINTF(TLB
, "L%d descriptor causing Access Fault\n",
1442 currState
->longDesc
.lookupLevel
);
1446 if (currState
->isFetch
)
1447 currState
->fault
= new PrefetchAbort(
1448 currState
->vaddr_tainted
,
1449 (aff
? ArmFault::AccessFlagLL
: ArmFault::AddressSizeLL
) +
1450 currState
->longDesc
.lookupLevel
,
1452 ArmFault::LpaeTran
);
1454 currState
->fault
= new DataAbort(
1455 currState
->vaddr_tainted
,
1456 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
1457 (aff
? ArmFault::AccessFlagLL
: ArmFault::AddressSizeLL
) +
1458 currState
->longDesc
.lookupLevel
,
1460 ArmFault::LpaeTran
);
1462 insertTableEntry(currState
->longDesc
, true);
1466 case LongDescriptor::Table
:
1468 // Set hierarchical permission flags
1469 currState
->secureLookup
= currState
->secureLookup
&&
1470 currState
->longDesc
.secureTable();
1471 currState
->rwTable
= currState
->rwTable
&&
1472 currState
->longDesc
.rwTable();
1473 currState
->userTable
= currState
->userTable
&&
1474 currState
->longDesc
.userTable();
1475 currState
->xnTable
= currState
->xnTable
||
1476 currState
->longDesc
.xnTable();
1477 currState
->pxnTable
= currState
->pxnTable
||
1478 currState
->longDesc
.pxnTable();
1480 // Set up next level lookup
1481 Addr next_desc_addr
= currState
->longDesc
.nextDescAddr(
1484 DPRINTF(TLB
, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1485 currState
->longDesc
.lookupLevel
,
1486 currState
->longDesc
.lookupLevel
+ 1,
1488 currState
->secureLookup
? "s" : "ns");
1490 // Check for address size fault
1491 if (currState
->aarch64
&& checkAddrSizeFaultAArch64(
1492 next_desc_addr
, currState
->physAddrRange
)) {
1493 DPRINTF(TLB
, "L%d descriptor causing Address Size Fault\n",
1494 currState
->longDesc
.lookupLevel
);
1495 if (currState
->isFetch
)
1496 currState
->fault
= new PrefetchAbort(
1497 currState
->vaddr_tainted
,
1498 ArmFault::AddressSizeLL
1499 + currState
->longDesc
.lookupLevel
,
1501 ArmFault::LpaeTran
);
1503 currState
->fault
= new DataAbort(
1504 currState
->vaddr_tainted
,
1505 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
1506 ArmFault::AddressSizeLL
1507 + currState
->longDesc
.lookupLevel
,
1509 ArmFault::LpaeTran
);
1513 // Trickbox address check
1514 currState
->fault
= tlb
->walkTrickBoxCheck(
1515 next_desc_addr
, currState
->vaddr
,
1516 currState
->vaddr
, sizeof(uint64_t),
1517 currState
->isFetch
, currState
->isWrite
,
1518 TlbEntry::DomainType::Client
,
1519 toLookupLevel(currState
->longDesc
.lookupLevel
+1));
1521 if (currState
->fault
) {
1522 if (!currState
->timing
) {
1523 currState
->tc
= NULL
;
1524 currState
->req
= NULL
;
1529 Request::Flags flag
= 0;
1530 if (currState
->secureLookup
)
1531 flag
.set(Request::SECURE
);
1533 currState
->longDesc
.lookupLevel
=
1534 (LookupLevel
) (currState
->longDesc
.lookupLevel
+ 1);
1535 Event
*event
= NULL
;
1536 switch (currState
->longDesc
.lookupLevel
) {
1538 assert(currState
->aarch64
);
1539 event
= &doL1LongDescEvent
;
1542 event
= &doL2LongDescEvent
;
1545 event
= &doL3LongDescEvent
;
1548 panic("Wrong lookup level in table walk\n");
1553 delayed
= fetchDescriptor(next_desc_addr
, (uint8_t*)&currState
->longDesc
.data
,
1554 sizeof(uint64_t), flag
, -1, event
,
1555 &TableWalker::doLongDescriptor
);
1557 currState
->delayed
= true;
1562 panic("A new type in a 2 bit field?\n");
1567 TableWalker::doL2Descriptor()
1569 if (currState
->fault
!= NoFault
) {
1573 DPRINTF(TLB
, "L2 descriptor for %#x is %#x\n",
1574 currState
->vaddr_tainted
, currState
->l2Desc
.data
);
1577 if (currState
->l2Desc
.invalid()) {
1578 DPRINTF(TLB
, "L2 descriptor invalid, causing fault\n");
1579 if (!currState
->timing
) {
1580 currState
->tc
= NULL
;
1581 currState
->req
= NULL
;
1583 if (currState
->isFetch
)
1585 new PrefetchAbort(currState
->vaddr_tainted
,
1586 ArmFault::TranslationLL
+ L2
,
1588 ArmFault::VmsaTran
);
1591 new DataAbort(currState
->vaddr_tainted
, currState
->l1Desc
.domain(),
1592 currState
->isWrite
, ArmFault::TranslationLL
+ L2
,
1594 ArmFault::VmsaTran
);
1598 if (currState
->sctlr
.afe
&& bits(currState
->l2Desc
.ap(), 0) == 0) {
1599 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1600 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1602 DPRINTF(TLB
, "Generating access fault at L2, afe: %d, ap: %d\n",
1603 currState
->sctlr
.afe
, currState
->l2Desc
.ap());
1606 new DataAbort(currState
->vaddr_tainted
,
1607 TlbEntry::DomainType::NoAccess
, currState
->isWrite
,
1608 ArmFault::AccessFlagLL
+ L2
, isStage2
,
1609 ArmFault::VmsaTran
);
1612 insertTableEntry(currState
->l2Desc
, false);
1616 TableWalker::doL1DescriptorWrapper()
1618 currState
= stateQueues
[L1
].front();
1619 currState
->delayed
= false;
1620 // if there's a stage2 translation object we don't need it any more
1621 if (currState
->stage2Tran
) {
1622 delete currState
->stage2Tran
;
1623 currState
->stage2Tran
= NULL
;
1627 DPRINTF(TLBVerbose
, "L1 Desc object host addr: %p\n",&currState
->l1Desc
.data
);
1628 DPRINTF(TLBVerbose
, "L1 Desc object data: %08x\n",currState
->l1Desc
.data
);
1630 DPRINTF(TLBVerbose
, "calling doL1Descriptor for vaddr:%#x\n", currState
->vaddr_tainted
);
1633 stateQueues
[L1
].pop_front();
1635 // Check if fault was generated
1636 if (currState
->fault
!= NoFault
) {
1637 currState
->transState
->finish(currState
->fault
, currState
->req
,
1638 currState
->tc
, currState
->mode
);
1641 nextWalk(currState
->tc
);
1643 currState
->req
= NULL
;
1644 currState
->tc
= NULL
;
1645 currState
->delayed
= false;
1648 else if (!currState
->delayed
) {
1649 // delay is not set so there is no L2 to do
1650 // Don't finish the translation if a stage 2 look up is underway
1651 if (!currState
->doingStage2
) {
1652 DPRINTF(TLBVerbose
, "calling translateTiming again\n");
1653 currState
->fault
= tlb
->translateTiming(currState
->req
, currState
->tc
,
1654 currState
->transState
, currState
->mode
);
1658 nextWalk(currState
->tc
);
1660 currState
->req
= NULL
;
1661 currState
->tc
= NULL
;
1662 currState
->delayed
= false;
1665 // need to do L2 descriptor
1666 stateQueues
[L2
].push_back(currState
);
1672 TableWalker::doL2DescriptorWrapper()
1674 currState
= stateQueues
[L2
].front();
1675 assert(currState
->delayed
);
1676 // if there's a stage2 translation object we don't need it any more
1677 if (currState
->stage2Tran
) {
1678 delete currState
->stage2Tran
;
1679 currState
->stage2Tran
= NULL
;
1682 DPRINTF(TLBVerbose
, "calling doL2Descriptor for vaddr:%#x\n",
1683 currState
->vaddr_tainted
);
1686 // Check if fault was generated
1687 if (currState
->fault
!= NoFault
) {
1688 currState
->transState
->finish(currState
->fault
, currState
->req
,
1689 currState
->tc
, currState
->mode
);
1692 // Don't finish the translation if a stage 2 look up is underway
1693 if (!currState
->doingStage2
) {
1694 DPRINTF(TLBVerbose
, "calling translateTiming again\n");
1695 currState
->fault
= tlb
->translateTiming(currState
->req
,
1696 currState
->tc
, currState
->transState
, currState
->mode
);
1701 stateQueues
[L2
].pop_front();
1704 nextWalk(currState
->tc
);
1706 currState
->req
= NULL
;
1707 currState
->tc
= NULL
;
1708 currState
->delayed
= false;
1715 TableWalker::doL0LongDescriptorWrapper()
1717 doLongDescriptorWrapper(L0
);
1721 TableWalker::doL1LongDescriptorWrapper()
1723 doLongDescriptorWrapper(L1
);
1727 TableWalker::doL2LongDescriptorWrapper()
1729 doLongDescriptorWrapper(L2
);
1733 TableWalker::doL3LongDescriptorWrapper()
1735 doLongDescriptorWrapper(L3
);
1739 TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level
)
1741 currState
= stateQueues
[curr_lookup_level
].front();
1742 assert(curr_lookup_level
== currState
->longDesc
.lookupLevel
);
1743 currState
->delayed
= false;
1745 // if there's a stage2 translation object we don't need it any more
1746 if (currState
->stage2Tran
) {
1747 delete currState
->stage2Tran
;
1748 currState
->stage2Tran
= NULL
;
1751 DPRINTF(TLBVerbose
, "calling doLongDescriptor for vaddr:%#x\n",
1752 currState
->vaddr_tainted
);
1755 stateQueues
[curr_lookup_level
].pop_front();
1757 if (currState
->fault
!= NoFault
) {
1758 // A fault was generated
1759 currState
->transState
->finish(currState
->fault
, currState
->req
,
1760 currState
->tc
, currState
->mode
);
1763 nextWalk(currState
->tc
);
1765 currState
->req
= NULL
;
1766 currState
->tc
= NULL
;
1767 currState
->delayed
= false;
1769 } else if (!currState
->delayed
) {
1770 // No additional lookups required
1771 // Don't finish the translation if a stage 2 look up is underway
1772 if (!currState
->doingStage2
) {
1773 DPRINTF(TLBVerbose
, "calling translateTiming again\n");
1774 currState
->fault
= tlb
->translateTiming(currState
->req
, currState
->tc
,
1775 currState
->transState
,
1780 nextWalk(currState
->tc
);
1782 currState
->req
= NULL
;
1783 currState
->tc
= NULL
;
1784 currState
->delayed
= false;
1787 if (curr_lookup_level
>= MAX_LOOKUP_LEVELS
- 1)
1788 panic("Max. number of lookups already reached in table walk\n");
1789 // Need to perform additional lookups
1790 stateQueues
[currState
->longDesc
.lookupLevel
].push_back(currState
);
1797 TableWalker::nextWalk(ThreadContext
*tc
)
1799 if (pendingQueue
.size())
1800 schedule(doProcessEvent
, clockEdge(Cycles(1)));
1804 TableWalker::fetchDescriptor(Addr descAddr
, uint8_t *data
, int numBytes
,
1805 Request::Flags flags
, int queueIndex
, Event
*event
,
1806 void (TableWalker::*doDescriptor
)())
1808 bool isTiming
= currState
->timing
;
1810 // do the requests for the page table descriptors have to go through the
1812 if (currState
->stage2Req
) {
1814 flags
= flags
| TLB::MustBeOne
;
1817 Stage2MMU::Stage2Translation
*tran
= new
1818 Stage2MMU::Stage2Translation(*stage2Mmu
, data
, event
,
1820 currState
->stage2Tran
= tran
;
1821 stage2Mmu
->readDataTimed(currState
->tc
, descAddr
, tran
, numBytes
,
1823 fault
= tran
->fault
;
1825 fault
= stage2Mmu
->readDataUntimed(currState
->tc
,
1826 currState
->vaddr
, descAddr
, data
, numBytes
, flags
, masterId
,
1827 currState
->functional
);
1830 if (fault
!= NoFault
) {
1831 currState
->fault
= fault
;
1834 if (queueIndex
>= 0) {
1835 DPRINTF(TLBVerbose
, "Adding to walker fifo: queue size before adding: %d\n",
1836 stateQueues
[queueIndex
].size());
1837 stateQueues
[queueIndex
].push_back(currState
);
1841 (this->*doDescriptor
)();
1845 port
.dmaAction(MemCmd::ReadReq
, descAddr
, numBytes
, event
, data
,
1846 currState
->tc
->getCpuPtr()->clockPeriod(), flags
);
1847 if (queueIndex
>= 0) {
1848 DPRINTF(TLBVerbose
, "Adding to walker fifo: queue size before adding: %d\n",
1849 stateQueues
[queueIndex
].size());
1850 stateQueues
[queueIndex
].push_back(currState
);
1853 } else if (!currState
->functional
) {
1854 port
.dmaAction(MemCmd::ReadReq
, descAddr
, numBytes
, NULL
, data
,
1855 currState
->tc
->getCpuPtr()->clockPeriod(), flags
);
1856 (this->*doDescriptor
)();
1858 RequestPtr req
= new Request(descAddr
, numBytes
, flags
, masterId
);
1859 req
->taskId(ContextSwitchTaskId::DMA
);
1860 PacketPtr pkt
= new Packet(req
, MemCmd::ReadReq
);
1861 pkt
->dataStatic(data
);
1862 port
.sendFunctional(pkt
);
1863 (this->*doDescriptor
)();
1872 TableWalker::insertTableEntry(DescriptorBase
&descriptor
, bool longDescriptor
)
1876 // Create and fill a new page table entry
1878 te
.longDescFormat
= longDescriptor
;
1879 te
.isHyp
= currState
->isHyp
;
1880 te
.asid
= currState
->asid
;
1881 te
.vmid
= currState
->vmid
;
1882 te
.N
= descriptor
.offsetBits();
1883 te
.vpn
= currState
->vaddr
>> te
.N
;
1884 te
.size
= (1<<te
.N
) - 1;
1885 te
.pfn
= descriptor
.pfn();
1886 te
.domain
= descriptor
.domain();
1887 te
.lookupLevel
= descriptor
.lookupLevel
;
1888 te
.ns
= !descriptor
.secure(haveSecurity
, currState
) || isStage2
;
1889 te
.nstid
= !currState
->isSecure
;
1890 te
.xn
= descriptor
.xn();
1891 if (currState
->aarch64
)
1892 te
.el
= currState
->el
;
1896 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1898 te
.global
= descriptor
.global(currState
) || isStage2
;
1899 if (longDescriptor
) {
1900 LongDescriptor lDescriptor
=
1901 dynamic_cast<LongDescriptor
&>(descriptor
);
1903 te
.xn
|= currState
->xnTable
;
1904 te
.pxn
= currState
->pxnTable
|| lDescriptor
.pxn();
1906 // this is actually the HAP field, but its stored in the same bit
1907 // possitions as the AP field in a stage 1 translation.
1908 te
.hap
= lDescriptor
.ap();
1910 te
.ap
= ((!currState
->rwTable
|| descriptor
.ap() >> 1) << 1) |
1911 (currState
->userTable
&& (descriptor
.ap() & 0x1));
1913 if (currState
->aarch64
)
1914 memAttrsAArch64(currState
->tc
, te
, currState
->longDesc
.attrIndx(),
1915 currState
->longDesc
.sh());
1917 memAttrsLPAE(currState
->tc
, te
, lDescriptor
);
1919 te
.ap
= descriptor
.ap();
1920 memAttrs(currState
->tc
, te
, currState
->sctlr
, descriptor
.texcb(),
1921 descriptor
.shareable());
1925 DPRINTF(TLB
, descriptor
.dbgHeader().c_str());
1926 DPRINTF(TLB
, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1927 te
.N
, te
.pfn
, te
.size
, te
.global
, te
.valid
);
1928 DPRINTF(TLB
, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1929 "vmid:%d hyp:%d nc:%d ns:%d\n", te
.vpn
, te
.xn
, te
.pxn
,
1930 te
.ap
, static_cast<uint8_t>(te
.domain
), te
.asid
, te
.vmid
, te
.isHyp
,
1931 te
.nonCacheable
, te
.ns
);
1932 DPRINTF(TLB
, " - domain from L%d desc:%d data:%#x\n",
1933 descriptor
.lookupLevel
, static_cast<uint8_t>(descriptor
.domain()),
1934 descriptor
.getRawData());
1936 // Insert the entry into the TLB
1937 tlb
->insert(currState
->vaddr
, te
);
1938 if (!currState
->timing
) {
1939 currState
->tc
= NULL
;
1940 currState
->req
= NULL
;
1944 ArmISA::TableWalker
*
1945 ArmTableWalkerParams::create()
1947 return new ArmISA::TableWalker(this);
1951 TableWalker::toLookupLevel(uint8_t lookup_level_as_int
)
1953 switch (lookup_level_as_int
) {
1961 panic("Invalid lookup level conversion");