misc: Replaced master/slave terminology
[gem5.git] / src / arch / arm / faults.cc
1 /*
2 * Copyright (c) 2010, 2012-2014, 2016-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include "arch/arm/faults.hh"
43
44 #include "arch/arm/insts/static_inst.hh"
45 #include "arch/arm/interrupts.hh"
46 #include "arch/arm/isa.hh"
47 #include "arch/arm/self_debug.hh"
48 #include "arch/arm/system.hh"
49 #include "arch/arm/utility.hh"
50 #include "base/compiler.hh"
51 #include "base/trace.hh"
52 #include "cpu/base.hh"
53 #include "cpu/thread_context.hh"
54 #include "debug/Faults.hh"
55 #include "sim/full_system.hh"
56
57 namespace ArmISA
58 {
59
60 const uint32_t HighVecs = 0xFFFF0000;
61
62 uint8_t ArmFault::shortDescFaultSources[] = {
63 0x01, // AlignmentFault
64 0x04, // InstructionCacheMaintenance
65 0xff, // SynchExtAbtOnTranslTableWalkL0 (INVALID)
66 0x0c, // SynchExtAbtOnTranslTableWalkL1
67 0x0e, // SynchExtAbtOnTranslTableWalkL2
68 0xff, // SynchExtAbtOnTranslTableWalkL3 (INVALID)
69 0xff, // SynchPtyErrOnTranslTableWalkL0 (INVALID)
70 0x1c, // SynchPtyErrOnTranslTableWalkL1
71 0x1e, // SynchPtyErrOnTranslTableWalkL2
72 0xff, // SynchPtyErrOnTranslTableWalkL3 (INVALID)
73 0xff, // TranslationL0 (INVALID)
74 0x05, // TranslationL1
75 0x07, // TranslationL2
76 0xff, // TranslationL3 (INVALID)
77 0xff, // AccessFlagL0 (INVALID)
78 0x03, // AccessFlagL1
79 0x06, // AccessFlagL2
80 0xff, // AccessFlagL3 (INVALID)
81 0xff, // DomainL0 (INVALID)
82 0x09, // DomainL1
83 0x0b, // DomainL2
84 0xff, // DomainL3 (INVALID)
85 0xff, // PermissionL0 (INVALID)
86 0x0d, // PermissionL1
87 0x0f, // PermissionL2
88 0xff, // PermissionL3 (INVALID)
89 0x02, // DebugEvent
90 0x08, // SynchronousExternalAbort
91 0x10, // TLBConflictAbort
92 0x19, // SynchPtyErrOnMemoryAccess
93 0x16, // AsynchronousExternalAbort
94 0x18, // AsynchPtyErrOnMemoryAccess
95 0xff, // AddressSizeL0 (INVALID)
96 0xff, // AddressSizeL1 (INVALID)
97 0xff, // AddressSizeL2 (INVALID)
98 0xff, // AddressSizeL3 (INVALID)
99 0x40, // PrefetchTLBMiss
100 0x80 // PrefetchUncacheable
101 };
102
103 static_assert(sizeof(ArmFault::shortDescFaultSources) ==
104 ArmFault::NumFaultSources,
105 "Invalid size of ArmFault::shortDescFaultSources[]");
106
107 uint8_t ArmFault::longDescFaultSources[] = {
108 0x21, // AlignmentFault
109 0xff, // InstructionCacheMaintenance (INVALID)
110 0xff, // SynchExtAbtOnTranslTableWalkL0 (INVALID)
111 0x15, // SynchExtAbtOnTranslTableWalkL1
112 0x16, // SynchExtAbtOnTranslTableWalkL2
113 0x17, // SynchExtAbtOnTranslTableWalkL3
114 0xff, // SynchPtyErrOnTranslTableWalkL0 (INVALID)
115 0x1d, // SynchPtyErrOnTranslTableWalkL1
116 0x1e, // SynchPtyErrOnTranslTableWalkL2
117 0x1f, // SynchPtyErrOnTranslTableWalkL3
118 0xff, // TranslationL0 (INVALID)
119 0x05, // TranslationL1
120 0x06, // TranslationL2
121 0x07, // TranslationL3
122 0xff, // AccessFlagL0 (INVALID)
123 0x09, // AccessFlagL1
124 0x0a, // AccessFlagL2
125 0x0b, // AccessFlagL3
126 0xff, // DomainL0 (INVALID)
127 0x3d, // DomainL1
128 0x3e, // DomainL2
129 0xff, // DomainL3 (RESERVED)
130 0xff, // PermissionL0 (INVALID)
131 0x0d, // PermissionL1
132 0x0e, // PermissionL2
133 0x0f, // PermissionL3
134 0x22, // DebugEvent
135 0x10, // SynchronousExternalAbort
136 0x30, // TLBConflictAbort
137 0x18, // SynchPtyErrOnMemoryAccess
138 0x11, // AsynchronousExternalAbort
139 0x19, // AsynchPtyErrOnMemoryAccess
140 0xff, // AddressSizeL0 (INVALID)
141 0xff, // AddressSizeL1 (INVALID)
142 0xff, // AddressSizeL2 (INVALID)
143 0xff, // AddressSizeL3 (INVALID)
144 0x40, // PrefetchTLBMiss
145 0x80 // PrefetchUncacheable
146 };
147
148 static_assert(sizeof(ArmFault::longDescFaultSources) ==
149 ArmFault::NumFaultSources,
150 "Invalid size of ArmFault::longDescFaultSources[]");
151
152 uint8_t ArmFault::aarch64FaultSources[] = {
153 0x21, // AlignmentFault
154 0xff, // InstructionCacheMaintenance (INVALID)
155 0x14, // SynchExtAbtOnTranslTableWalkL0
156 0x15, // SynchExtAbtOnTranslTableWalkL1
157 0x16, // SynchExtAbtOnTranslTableWalkL2
158 0x17, // SynchExtAbtOnTranslTableWalkL3
159 0x1c, // SynchPtyErrOnTranslTableWalkL0
160 0x1d, // SynchPtyErrOnTranslTableWalkL1
161 0x1e, // SynchPtyErrOnTranslTableWalkL2
162 0x1f, // SynchPtyErrOnTranslTableWalkL3
163 0x04, // TranslationL0
164 0x05, // TranslationL1
165 0x06, // TranslationL2
166 0x07, // TranslationL3
167 0x08, // AccessFlagL0
168 0x09, // AccessFlagL1
169 0x0a, // AccessFlagL2
170 0x0b, // AccessFlagL3
171 // @todo: Section & Page Domain Fault in AArch64?
172 0xff, // DomainL0 (INVALID)
173 0xff, // DomainL1 (INVALID)
174 0xff, // DomainL2 (INVALID)
175 0xff, // DomainL3 (INVALID)
176 0x0c, // PermissionL0
177 0x0d, // PermissionL1
178 0x0e, // PermissionL2
179 0x0f, // PermissionL3
180 0x22, // DebugEvent
181 0x10, // SynchronousExternalAbort
182 0x30, // TLBConflictAbort
183 0x18, // SynchPtyErrOnMemoryAccess
184 0xff, // AsynchronousExternalAbort (INVALID)
185 0xff, // AsynchPtyErrOnMemoryAccess (INVALID)
186 0x00, // AddressSizeL0
187 0x01, // AddressSizeL1
188 0x02, // AddressSizeL2
189 0x03, // AddressSizeL3
190 0x40, // PrefetchTLBMiss
191 0x80 // PrefetchUncacheable
192 };
193
194 static_assert(sizeof(ArmFault::aarch64FaultSources) ==
195 ArmFault::NumFaultSources,
196 "Invalid size of ArmFault::aarch64FaultSources[]");
197
198 // Fields: name, offset, cur{ELT,ELH}Offset, lowerEL{64,32}Offset, next mode,
199 // {ARM, Thumb, ARM_ELR, Thumb_ELR} PC offset, hyp trap,
200 // {A, F} disable, class, stat
201 template<> ArmFault::FaultVals ArmFaultVals<Reset>::vals(
202 // Some dummy values (the reset vector has an IMPLEMENTATION DEFINED
203 // location in AArch64)
204 "Reset", 0x000, 0x000, 0x000, 0x000, 0x000, MODE_SVC,
205 0, 0, 0, 0, false, true, true, EC_UNKNOWN
206 );
207 template<> ArmFault::FaultVals ArmFaultVals<UndefinedInstruction>::vals(
208 "Undefined Instruction", 0x004, 0x000, 0x200, 0x400, 0x600, MODE_UNDEFINED,
209 4, 2, 0, 0, true, false, false, EC_UNKNOWN
210 );
211 template<> ArmFault::FaultVals ArmFaultVals<SupervisorCall>::vals(
212 "Supervisor Call", 0x008, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
213 4, 2, 4, 2, true, false, false, EC_SVC_TO_HYP
214 );
215 template<> ArmFault::FaultVals ArmFaultVals<SecureMonitorCall>::vals(
216 "Secure Monitor Call", 0x008, 0x000, 0x200, 0x400, 0x600, MODE_MON,
217 4, 4, 4, 4, false, true, true, EC_SMC_TO_HYP
218 );
219 template<> ArmFault::FaultVals ArmFaultVals<HypervisorCall>::vals(
220 "Hypervisor Call", 0x008, 0x000, 0x200, 0x400, 0x600, MODE_HYP,
221 4, 4, 4, 4, true, false, false, EC_HVC
222 );
223 template<> ArmFault::FaultVals ArmFaultVals<PrefetchAbort>::vals(
224 "Prefetch Abort", 0x00C, 0x000, 0x200, 0x400, 0x600, MODE_ABORT,
225 4, 4, 0, 0, true, true, false, EC_PREFETCH_ABORT_TO_HYP
226 );
227 template<> ArmFault::FaultVals ArmFaultVals<DataAbort>::vals(
228 "Data Abort", 0x010, 0x000, 0x200, 0x400, 0x600, MODE_ABORT,
229 8, 8, 0, 0, true, true, false, EC_DATA_ABORT_TO_HYP
230 );
231 template<> ArmFault::FaultVals ArmFaultVals<VirtualDataAbort>::vals(
232 "Virtual Data Abort", 0x010, 0x000, 0x200, 0x400, 0x600, MODE_ABORT,
233 8, 8, 0, 0, true, true, false, EC_INVALID
234 );
235 template<> ArmFault::FaultVals ArmFaultVals<HypervisorTrap>::vals(
236 // @todo: double check these values
237 "Hypervisor Trap", 0x014, 0x000, 0x200, 0x400, 0x600, MODE_HYP,
238 0, 0, 0, 0, false, false, false, EC_UNKNOWN
239 );
240 template<> ArmFault::FaultVals ArmFaultVals<SecureMonitorTrap>::vals(
241 "Secure Monitor Trap", 0x004, 0x000, 0x200, 0x400, 0x600, MODE_MON,
242 4, 2, 0, 0, false, false, false, EC_UNKNOWN
243 );
244 template<> ArmFault::FaultVals ArmFaultVals<Interrupt>::vals(
245 "IRQ", 0x018, 0x080, 0x280, 0x480, 0x680, MODE_IRQ,
246 4, 4, 0, 0, false, true, false, EC_UNKNOWN
247 );
248 template<> ArmFault::FaultVals ArmFaultVals<VirtualInterrupt>::vals(
249 "Virtual IRQ", 0x018, 0x080, 0x280, 0x480, 0x680, MODE_IRQ,
250 4, 4, 0, 0, false, true, false, EC_INVALID
251 );
252 template<> ArmFault::FaultVals ArmFaultVals<FastInterrupt>::vals(
253 "FIQ", 0x01C, 0x100, 0x300, 0x500, 0x700, MODE_FIQ,
254 4, 4, 0, 0, false, true, true, EC_UNKNOWN
255 );
256 template<> ArmFault::FaultVals ArmFaultVals<VirtualFastInterrupt>::vals(
257 "Virtual FIQ", 0x01C, 0x100, 0x300, 0x500, 0x700, MODE_FIQ,
258 4, 4, 0, 0, false, true, true, EC_INVALID
259 );
260 template<> ArmFault::FaultVals ArmFaultVals<IllegalInstSetStateFault>::vals(
261 "Illegal Inst Set State Fault", 0x004, 0x000, 0x200, 0x400, 0x600, MODE_UNDEFINED,
262 4, 2, 0, 0, true, false, false, EC_ILLEGAL_INST
263 );
264 template<> ArmFault::FaultVals ArmFaultVals<SupervisorTrap>::vals(
265 // Some dummy values (SupervisorTrap is AArch64-only)
266 "Supervisor Trap", 0x014, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
267 0, 0, 0, 0, false, false, false, EC_UNKNOWN
268 );
269 template<> ArmFault::FaultVals ArmFaultVals<PCAlignmentFault>::vals(
270 // Some dummy values (PCAlignmentFault is AArch64-only)
271 "PC Alignment Fault", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
272 0, 0, 0, 0, true, false, false, EC_PC_ALIGNMENT
273 );
274 template<> ArmFault::FaultVals ArmFaultVals<SPAlignmentFault>::vals(
275 // Some dummy values (SPAlignmentFault is AArch64-only)
276 "SP Alignment Fault", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
277 0, 0, 0, 0, true, false, false, EC_STACK_PTR_ALIGNMENT
278 );
279 template<> ArmFault::FaultVals ArmFaultVals<SystemError>::vals(
280 // Some dummy values (SError is AArch64-only)
281 "SError", 0x000, 0x180, 0x380, 0x580, 0x780, MODE_SVC,
282 0, 0, 0, 0, false, true, true, EC_SERROR
283 );
284 template<> ArmFault::FaultVals ArmFaultVals<SoftwareBreakpoint>::vals(
285 // Some dummy values (SoftwareBreakpoint is AArch64-only)
286 "Software Breakpoint", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
287 0, 0, 0, 0, true, false, false, EC_SOFTWARE_BREAKPOINT
288 );
289 template<> ArmFault::FaultVals ArmFaultVals<HardwareBreakpoint>::vals(
290 "Hardware Breakpoint", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
291 0, 0, 0, 0, true, false, false, EC_HW_BREAKPOINT
292 );
293 template<> ArmFault::FaultVals ArmFaultVals<Watchpoint>::vals(
294 "Watchpoint", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
295 0, 0, 0, 0, true, false, false, EC_WATCHPOINT
296 );
297 template<> ArmFault::FaultVals ArmFaultVals<SoftwareStepFault>::vals(
298 "SoftwareStep", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
299 0, 0, 0, 0, true, false, false, EC_SOFTWARE_STEP
300 );
301 template<> ArmFault::FaultVals ArmFaultVals<ArmSev>::vals(
302 // Some dummy values
303 "ArmSev Flush", 0x000, 0x000, 0x000, 0x000, 0x000, MODE_SVC,
304 0, 0, 0, 0, false, true, true, EC_UNKNOWN
305 );
306
307 Addr
308 ArmFault::getVector(ThreadContext *tc)
309 {
310 Addr base;
311
312 // Check for invalid modes
313 CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
314 assert(ArmSystem::haveSecurity(tc) || cpsr.mode != MODE_MON);
315 assert(ArmSystem::haveVirtualization(tc) || cpsr.mode != MODE_HYP);
316
317 switch (cpsr.mode)
318 {
319 case MODE_MON:
320 base = tc->readMiscReg(MISCREG_MVBAR);
321 break;
322 case MODE_HYP:
323 base = tc->readMiscReg(MISCREG_HVBAR);
324 break;
325 default:
326 SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR);
327 if (sctlr.v) {
328 base = HighVecs;
329 } else {
330 base = ArmSystem::haveSecurity(tc) ?
331 tc->readMiscReg(MISCREG_VBAR) : 0;
332 }
333 break;
334 }
335
336 return base + offset(tc);
337 }
338
339 Addr
340 ArmFault::getVector64(ThreadContext *tc)
341 {
342 Addr vbar;
343 switch (toEL) {
344 case EL3:
345 assert(ArmSystem::haveSecurity(tc));
346 vbar = tc->readMiscReg(MISCREG_VBAR_EL3);
347 break;
348 case EL2:
349 assert(ArmSystem::haveVirtualization(tc));
350 vbar = tc->readMiscReg(MISCREG_VBAR_EL2);
351 break;
352 case EL1:
353 vbar = tc->readMiscReg(MISCREG_VBAR_EL1);
354 break;
355 default:
356 panic("Invalid target exception level");
357 break;
358 }
359 return vbar + offset64(tc);
360 }
361
362 MiscRegIndex
363 ArmFault::getSyndromeReg64() const
364 {
365 switch (toEL) {
366 case EL1:
367 return MISCREG_ESR_EL1;
368 case EL2:
369 return MISCREG_ESR_EL2;
370 case EL3:
371 return MISCREG_ESR_EL3;
372 default:
373 panic("Invalid exception level");
374 break;
375 }
376 }
377
378 MiscRegIndex
379 ArmFault::getFaultAddrReg64() const
380 {
381 switch (toEL) {
382 case EL1:
383 return MISCREG_FAR_EL1;
384 case EL2:
385 return MISCREG_FAR_EL2;
386 case EL3:
387 return MISCREG_FAR_EL3;
388 default:
389 panic("Invalid exception level");
390 break;
391 }
392 }
393
394 void
395 ArmFault::setSyndrome(ThreadContext *tc, MiscRegIndex syndrome_reg)
396 {
397 uint32_t value;
398 uint32_t exc_class = (uint32_t) ec(tc);
399 uint32_t issVal = iss();
400
401 assert(!from64 || ArmSystem::highestELIs64(tc));
402
403 value = exc_class << 26;
404
405 // HSR.IL not valid for Prefetch Aborts (0x20, 0x21) and Data Aborts (0x24,
406 // 0x25) for which the ISS information is not valid (ARMv7).
407 // @todo: ARMv8 revises AArch32 functionality: when HSR.IL is not
408 // valid it is treated as RES1.
409 if (to64) {
410 value |= 1 << 25;
411 } else if ((bits(exc_class, 5, 3) != 4) ||
412 (bits(exc_class, 2) && bits(issVal, 24))) {
413 if (!machInst.thumb || machInst.bigThumb)
414 value |= 1 << 25;
415 }
416 // Condition code valid for EC[5:4] nonzero
417 if (!from64 && ((bits(exc_class, 5, 4) == 0) &&
418 (bits(exc_class, 3, 0) != 0))) {
419 if (!machInst.thumb) {
420 uint32_t cond;
421 ConditionCode condCode = (ConditionCode) (uint32_t) machInst.condCode;
422 // If its on unconditional instruction report with a cond code of
423 // 0xE, ie the unconditional code
424 cond = (condCode == COND_UC) ? COND_AL : condCode;
425 value |= cond << 20;
426 value |= 1 << 24;
427 }
428 value |= bits(issVal, 19, 0);
429 } else {
430 value |= issVal;
431 }
432 tc->setMiscReg(syndrome_reg, value);
433 }
434
435 void
436 ArmFault::update(ThreadContext *tc)
437 {
438 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
439
440 // Determine source exception level and mode
441 fromMode = (OperatingMode) (uint8_t) cpsr.mode;
442 fromEL = opModeToEL(fromMode);
443 if (opModeIs64(fromMode))
444 from64 = true;
445
446 // Determine target exception level (aarch64) or target execution
447 // mode (aarch32).
448 if (ArmSystem::haveSecurity(tc) && routeToMonitor(tc)) {
449 toMode = MODE_MON;
450 toEL = EL3;
451 } else if (ArmSystem::haveVirtualization(tc) && routeToHyp(tc)) {
452 toMode = MODE_HYP;
453 toEL = EL2;
454 hypRouted = true;
455 } else {
456 toMode = nextMode();
457 toEL = opModeToEL(toMode);
458 }
459
460 if (fromEL > toEL)
461 toEL = fromEL;
462
463 // Check for Set Priviledge Access Never, if PAN is supported
464 AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
465 if (mmfr1.pan) {
466 if (toEL == EL1) {
467 const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
468 span = !sctlr.span;
469 }
470
471 const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
472 if (toEL == EL2 && hcr.e2h && hcr.tge) {
473 const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
474 span = !sctlr.span;
475 }
476 }
477
478 to64 = ELIs64(tc, toEL);
479
480 // The fault specific informations have been updated; it is
481 // now possible to use them inside the fault.
482 faultUpdated = true;
483 }
484
485 void
486 ArmFault::invoke(ThreadContext *tc, const StaticInstPtr &inst)
487 {
488 // Update fault state informations, like the starting mode (aarch32)
489 // or EL (aarch64) and the ending mode or EL.
490 // From the update function we are also evaluating if the fault must
491 // be handled in AArch64 mode (to64).
492 update(tc);
493
494 if (to64) {
495 // Invoke exception handler in AArch64 state
496 invoke64(tc, inst);
497 return;
498 }
499
500 if (vectorCatch(tc, inst))
501 return;
502
503 // ARMv7 (ARM ARM issue C B1.9)
504
505 bool have_security = ArmSystem::haveSecurity(tc);
506
507 FaultBase::invoke(tc);
508 if (!FullSystem)
509 return;
510 countStat()++;
511
512 SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR);
513 SCR scr = tc->readMiscReg(MISCREG_SCR);
514 CPSR saved_cpsr = tc->readMiscReg(MISCREG_CPSR);
515 saved_cpsr.nz = tc->readCCReg(CCREG_NZ);
516 saved_cpsr.c = tc->readCCReg(CCREG_C);
517 saved_cpsr.v = tc->readCCReg(CCREG_V);
518 saved_cpsr.ge = tc->readCCReg(CCREG_GE);
519
520 Addr curPc M5_VAR_USED = tc->pcState().pc();
521 ITSTATE it = tc->pcState().itstate();
522 saved_cpsr.it2 = it.top6;
523 saved_cpsr.it1 = it.bottom2;
524
525 // if we have a valid instruction then use it to annotate this fault with
526 // extra information. This is used to generate the correct fault syndrome
527 // information
528 ArmStaticInst *arm_inst M5_VAR_USED = instrAnnotate(inst);
529
530 // Ensure Secure state if initially in Monitor mode
531 if (have_security && saved_cpsr.mode == MODE_MON) {
532 SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
533 if (scr.ns) {
534 scr.ns = 0;
535 tc->setMiscRegNoEffect(MISCREG_SCR, scr);
536 }
537 }
538
539 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
540 cpsr.mode = toMode;
541
542 // some bits are set differently if we have been routed to hyp mode
543 if (cpsr.mode == MODE_HYP) {
544 SCTLR hsctlr = tc->readMiscReg(MISCREG_HSCTLR);
545 cpsr.t = hsctlr.te;
546 cpsr.e = hsctlr.ee;
547 if (!scr.ea) {cpsr.a = 1;}
548 if (!scr.fiq) {cpsr.f = 1;}
549 if (!scr.irq) {cpsr.i = 1;}
550 } else if (cpsr.mode == MODE_MON) {
551 // Special case handling when entering monitor mode
552 cpsr.t = sctlr.te;
553 cpsr.e = sctlr.ee;
554 cpsr.a = 1;
555 cpsr.f = 1;
556 cpsr.i = 1;
557 } else {
558 cpsr.t = sctlr.te;
559 cpsr.e = sctlr.ee;
560
561 // The *Disable functions are virtual and different per fault
562 cpsr.a = cpsr.a | abortDisable(tc);
563 cpsr.f = cpsr.f | fiqDisable(tc);
564 cpsr.i = 1;
565 }
566 cpsr.it1 = cpsr.it2 = 0;
567 cpsr.j = 0;
568 cpsr.pan = span ? 1 : saved_cpsr.pan;
569 tc->setMiscReg(MISCREG_CPSR, cpsr);
570
571 // Make sure mailbox sets to one always
572 tc->setMiscReg(MISCREG_SEV_MAILBOX, 1);
573
574 // Clear the exclusive monitor
575 tc->setMiscReg(MISCREG_LOCKFLAG, 0);
576
577 if (cpsr.mode == MODE_HYP) {
578 tc->setMiscReg(MISCREG_ELR_HYP, curPc +
579 (saved_cpsr.t ? thumbPcOffset(true) : armPcOffset(true)));
580 } else {
581 tc->setIntReg(INTREG_LR, curPc +
582 (saved_cpsr.t ? thumbPcOffset(false) : armPcOffset(false)));
583 }
584
585 switch (cpsr.mode) {
586 case MODE_FIQ:
587 tc->setMiscReg(MISCREG_SPSR_FIQ, saved_cpsr);
588 break;
589 case MODE_IRQ:
590 tc->setMiscReg(MISCREG_SPSR_IRQ, saved_cpsr);
591 break;
592 case MODE_SVC:
593 tc->setMiscReg(MISCREG_SPSR_SVC, saved_cpsr);
594 break;
595 case MODE_MON:
596 assert(have_security);
597 tc->setMiscReg(MISCREG_SPSR_MON, saved_cpsr);
598 break;
599 case MODE_ABORT:
600 tc->setMiscReg(MISCREG_SPSR_ABT, saved_cpsr);
601 break;
602 case MODE_UNDEFINED:
603 tc->setMiscReg(MISCREG_SPSR_UND, saved_cpsr);
604 if (ec(tc) != EC_UNKNOWN)
605 setSyndrome(tc, MISCREG_HSR);
606 break;
607 case MODE_HYP:
608 assert(ArmSystem::haveVirtualization(tc));
609 tc->setMiscReg(MISCREG_SPSR_HYP, saved_cpsr);
610 setSyndrome(tc, MISCREG_HSR);
611 break;
612 default:
613 panic("unknown Mode\n");
614 }
615
616 Addr newPc = getVector(tc);
617 DPRINTF(Faults, "Invoking Fault:%s cpsr:%#x PC:%#x lr:%#x newVec: %#x "
618 "%s\n", name(), cpsr, curPc, tc->readIntReg(INTREG_LR),
619 newPc, arm_inst ? csprintf("inst: %#x", arm_inst->encoding()) :
620 std::string());
621 PCState pc(newPc);
622 pc.thumb(cpsr.t);
623 pc.nextThumb(pc.thumb());
624 pc.jazelle(cpsr.j);
625 pc.nextJazelle(pc.jazelle());
626 pc.aarch64(!cpsr.width);
627 pc.nextAArch64(!cpsr.width);
628 pc.illegalExec(false);
629 tc->pcState(pc);
630 }
631
632 void
633 ArmFault::invoke64(ThreadContext *tc, const StaticInstPtr &inst)
634 {
635 // Determine actual misc. register indices for ELR_ELx and SPSR_ELx
636 MiscRegIndex elr_idx, spsr_idx;
637 switch (toEL) {
638 case EL1:
639 elr_idx = MISCREG_ELR_EL1;
640 spsr_idx = MISCREG_SPSR_EL1;
641 break;
642 case EL2:
643 assert(ArmSystem::haveVirtualization(tc));
644 elr_idx = MISCREG_ELR_EL2;
645 spsr_idx = MISCREG_SPSR_EL2;
646 break;
647 case EL3:
648 assert(ArmSystem::haveSecurity(tc));
649 elr_idx = MISCREG_ELR_EL3;
650 spsr_idx = MISCREG_SPSR_EL3;
651 break;
652 default:
653 panic("Invalid target exception level");
654 break;
655 }
656
657 // Save process state into SPSR_ELx
658 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
659 CPSR spsr = cpsr;
660 spsr.nz = tc->readCCReg(CCREG_NZ);
661 spsr.c = tc->readCCReg(CCREG_C);
662 spsr.v = tc->readCCReg(CCREG_V);
663 spsr.ss = isResetSPSR() ? 0: cpsr.ss;
664 if (from64) {
665 // Force some bitfields to 0
666 spsr.q = 0;
667 spsr.it1 = 0;
668 spsr.j = 0;
669 spsr.ge = 0;
670 spsr.it2 = 0;
671 spsr.t = 0;
672 } else {
673 spsr.ge = tc->readCCReg(CCREG_GE);
674 ITSTATE it = tc->pcState().itstate();
675 spsr.it2 = it.top6;
676 spsr.it1 = it.bottom2;
677 }
678 tc->setMiscReg(spsr_idx, spsr);
679
680 // Save preferred return address into ELR_ELx
681 Addr curr_pc = tc->pcState().pc();
682 Addr ret_addr = curr_pc;
683 if (from64)
684 ret_addr += armPcElrOffset();
685 else
686 ret_addr += spsr.t ? thumbPcElrOffset() : armPcElrOffset();
687 tc->setMiscReg(elr_idx, ret_addr);
688
689 Addr vec_address = getVector64(tc);
690
691 // Update process state
692 OperatingMode64 mode = 0;
693 mode.spX = 1;
694 mode.el = toEL;
695 mode.width = 0;
696 cpsr.mode = mode;
697 cpsr.daif = 0xf;
698 cpsr.il = 0;
699 cpsr.ss = 0;
700 cpsr.pan = span ? 1 : spsr.pan;
701 tc->setMiscReg(MISCREG_CPSR, cpsr);
702
703 // If we have a valid instruction then use it to annotate this fault with
704 // extra information. This is used to generate the correct fault syndrome
705 // information
706 ArmStaticInst *arm_inst M5_VAR_USED = instrAnnotate(inst);
707
708 // Set PC to start of exception handler
709 Addr new_pc = purifyTaggedAddr(vec_address, tc, toEL, true);
710 DPRINTF(Faults, "Invoking Fault (AArch64 target EL):%s cpsr:%#x PC:%#x "
711 "elr:%#x newVec: %#x %s\n", name(), cpsr, curr_pc, ret_addr,
712 new_pc, arm_inst ? csprintf("inst: %#x", arm_inst->encoding()) :
713 std::string());
714 PCState pc(new_pc);
715 pc.aarch64(!cpsr.width);
716 pc.nextAArch64(!cpsr.width);
717 pc.illegalExec(false);
718 pc.stepped(false);
719 tc->pcState(pc);
720
721 // Save exception syndrome
722 if ((nextMode() != MODE_IRQ) && (nextMode() != MODE_FIQ))
723 setSyndrome(tc, getSyndromeReg64());
724 }
725
726 bool
727 ArmFault::vectorCatch(ThreadContext *tc, const StaticInstPtr &inst)
728 {
729 SelfDebug *sd = ArmISA::ISA::getSelfDebug(tc);
730 VectorCatch* vc = sd->getVectorCatch(tc);
731 if (!vc->isVCMatch()) {
732 Fault fault = sd->testVectorCatch(tc, 0x0, this);
733 if (fault != NoFault)
734 fault->invoke(tc, inst);
735 return true;
736 }
737 return false;
738 }
739
740 ArmStaticInst *
741 ArmFault::instrAnnotate(const StaticInstPtr &inst)
742 {
743 if (inst) {
744 auto arm_inst = static_cast<ArmStaticInst *>(inst.get());
745 arm_inst->annotateFault(this);
746 return arm_inst;
747 } else {
748 return nullptr;
749 }
750 }
751
752 Addr
753 Reset::getVector(ThreadContext *tc)
754 {
755 Addr base;
756
757 // Check for invalid modes
758 CPSR M5_VAR_USED cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
759 assert(ArmSystem::haveSecurity(tc) || cpsr.mode != MODE_MON);
760 assert(ArmSystem::haveVirtualization(tc) || cpsr.mode != MODE_HYP);
761
762 // RVBAR is aliased (implemented as) MVBAR in gem5, since the two
763 // are mutually exclusive; there is no need to check here for
764 // which register to use since they hold the same value
765 base = tc->readMiscReg(MISCREG_MVBAR);
766
767 return base + offset(tc);
768 }
769
770 void
771 Reset::invoke(ThreadContext *tc, const StaticInstPtr &inst)
772 {
773 if (FullSystem) {
774 tc->getCpuPtr()->clearInterrupts(tc->threadId());
775 tc->clearArchRegs();
776 }
777 if (!ArmSystem::highestELIs64(tc)) {
778 ArmFault::invoke(tc, inst);
779 tc->setMiscReg(MISCREG_VMPIDR,
780 getMPIDR(dynamic_cast<ArmSystem*>(tc->getSystemPtr()), tc));
781
782 // Unless we have SMC code to get us there, boot in HYP!
783 if (ArmSystem::haveVirtualization(tc) &&
784 !ArmSystem::haveSecurity(tc)) {
785 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
786 cpsr.mode = MODE_HYP;
787 tc->setMiscReg(MISCREG_CPSR, cpsr);
788 }
789 } else {
790 // Advance the PC to the IMPLEMENTATION DEFINED reset value
791 PCState pc = ArmSystem::resetAddr(tc);
792 pc.aarch64(true);
793 pc.nextAArch64(true);
794 tc->pcState(pc);
795 }
796 }
797
798 void
799 UndefinedInstruction::invoke(ThreadContext *tc, const StaticInstPtr &inst)
800 {
801 if (FullSystem) {
802 ArmFault::invoke(tc, inst);
803 return;
804 }
805
806 // If the mnemonic isn't defined this has to be an unknown instruction.
807 assert(unknown || mnemonic != NULL);
808 auto arm_inst = static_cast<ArmStaticInst *>(inst.get());
809 if (disabled) {
810 panic("Attempted to execute disabled instruction "
811 "'%s' (inst 0x%08x)", mnemonic, arm_inst->encoding());
812 } else if (unknown) {
813 panic("Attempted to execute unknown instruction (inst 0x%08x)",
814 arm_inst->encoding());
815 } else {
816 panic("Attempted to execute unimplemented instruction "
817 "'%s' (inst 0x%08x)", mnemonic, arm_inst->encoding());
818 }
819 }
820
821 bool
822 UndefinedInstruction::routeToHyp(ThreadContext *tc) const
823 {
824 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
825 return fromEL == EL2 ||
826 (EL2Enabled(tc) && (fromEL == EL0) && hcr.tge);
827 }
828
829 uint32_t
830 UndefinedInstruction::iss() const
831 {
832
833 // If UndefinedInstruction is routed to hypervisor, iss field is 0.
834 if (hypRouted) {
835 return 0;
836 }
837
838 if (overrideEc == EC_INVALID)
839 return issRaw;
840
841 uint32_t new_iss = 0;
842 uint32_t op0, op1, op2, CRn, CRm, Rt, dir;
843
844 dir = bits(machInst, 21, 21);
845 op0 = bits(machInst, 20, 19);
846 op1 = bits(machInst, 18, 16);
847 CRn = bits(machInst, 15, 12);
848 CRm = bits(machInst, 11, 8);
849 op2 = bits(machInst, 7, 5);
850 Rt = bits(machInst, 4, 0);
851
852 new_iss = op0 << 20 | op2 << 17 | op1 << 14 | CRn << 10 |
853 Rt << 5 | CRm << 1 | dir;
854
855 return new_iss;
856 }
857
858 void
859 SupervisorCall::invoke(ThreadContext *tc, const StaticInstPtr &inst)
860 {
861 if (FullSystem) {
862 ArmFault::invoke(tc, inst);
863 return;
864 }
865
866 // As of now, there isn't a 32 bit thumb version of this instruction.
867 assert(!machInst.bigThumb);
868 tc->syscall();
869
870 // Advance the PC since that won't happen automatically.
871 PCState pc = tc->pcState();
872 assert(inst);
873 inst->advancePC(pc);
874 tc->pcState(pc);
875 }
876
877 bool
878 SupervisorCall::routeToHyp(ThreadContext *tc) const
879 {
880 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
881 return fromEL == EL2 ||
882 (EL2Enabled(tc) && fromEL == EL0 && hcr.tge);
883 }
884
885 ExceptionClass
886 SupervisorCall::ec(ThreadContext *tc) const
887 {
888 return (overrideEc != EC_INVALID) ? overrideEc :
889 (from64 ? EC_SVC_64 : vals.ec);
890 }
891
892 uint32_t
893 SupervisorCall::iss() const
894 {
895 // Even if we have a 24 bit imm from an arm32 instruction then we only use
896 // the bottom 16 bits for the ISS value (it doesn't hurt for AArch64 SVC).
897 return issRaw & 0xFFFF;
898 }
899
900 uint32_t
901 SecureMonitorCall::iss() const
902 {
903 if (from64)
904 return bits(machInst, 20, 5);
905 return 0;
906 }
907
908 ExceptionClass
909 UndefinedInstruction::ec(ThreadContext *tc) const
910 {
911 // If UndefinedInstruction is routed to hypervisor,
912 // HSR.EC field is 0.
913 if (hypRouted)
914 return EC_UNKNOWN;
915 else
916 return (overrideEc != EC_INVALID) ? overrideEc : vals.ec;
917 }
918
919
920 HypervisorCall::HypervisorCall(ExtMachInst _machInst, uint32_t _imm) :
921 ArmFaultVals<HypervisorCall>(_machInst, _imm)
922 {
923 bStep = true;
924 }
925
926 bool
927 HypervisorCall::routeToMonitor(ThreadContext *tc) const
928 {
929 return from64 && fromEL == EL3;
930 }
931
932 bool
933 HypervisorCall::routeToHyp(ThreadContext *tc) const
934 {
935 return !from64 || fromEL != EL3;
936 }
937
938 ExceptionClass
939 HypervisorCall::ec(ThreadContext *tc) const
940 {
941 return from64 ? EC_HVC_64 : vals.ec;
942 }
943
944 ExceptionClass
945 HypervisorTrap::ec(ThreadContext *tc) const
946 {
947 return (overrideEc != EC_INVALID) ? overrideEc : vals.ec;
948 }
949
950 template<class T>
951 FaultOffset
952 ArmFaultVals<T>::offset(ThreadContext *tc)
953 {
954 bool isHypTrap = false;
955
956 // Normally we just use the exception vector from the table at the top if
957 // this file, however if this exception has caused a transition to hype
958 // mode, and its an exception type that would only do this if it has been
959 // trapped then we use the hyp trap vector instead of the normal vector
960 if (vals.hypTrappable) {
961 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
962 if (cpsr.mode == MODE_HYP) {
963 CPSR spsr = tc->readMiscReg(MISCREG_SPSR_HYP);
964 isHypTrap = spsr.mode != MODE_HYP;
965 }
966 }
967 return isHypTrap ? 0x14 : vals.offset;
968 }
969
970 template<class T>
971 FaultOffset
972 ArmFaultVals<T>::offset64(ThreadContext *tc)
973 {
974 if (toEL == fromEL) {
975 if (opModeIsT(fromMode))
976 return vals.currELTOffset;
977 return vals.currELHOffset;
978 } else {
979 bool lower_32 = false;
980 if (toEL == EL3) {
981 if (EL2Enabled(tc))
982 lower_32 = ELIs32(tc, EL2);
983 else
984 lower_32 = ELIs32(tc, EL1);
985 } else if (ELIsInHost(tc, fromEL) && fromEL == EL0 && toEL == EL2) {
986 lower_32 = ELIs32(tc, EL0);
987 } else {
988 lower_32 = ELIs32(tc, static_cast<ExceptionLevel>(toEL - 1));
989 }
990
991 if (lower_32)
992 return vals.lowerEL32Offset;
993 return vals.lowerEL64Offset;
994 }
995 }
996
997 // void
998 // SupervisorCall::setSyndrome64(ThreadContext *tc, MiscRegIndex esr_idx)
999 // {
1000 // ESR esr = 0;
1001 // esr.ec = machInst.aarch64 ? SvcAArch64 : SvcAArch32;
1002 // esr.il = !machInst.thumb;
1003 // if (machInst.aarch64)
1004 // esr.imm16 = bits(machInst.instBits, 20, 5);
1005 // else if (machInst.thumb)
1006 // esr.imm16 = bits(machInst.instBits, 7, 0);
1007 // else
1008 // esr.imm16 = bits(machInst.instBits, 15, 0);
1009 // tc->setMiscReg(esr_idx, esr);
1010 // }
1011
1012 void
1013 SecureMonitorCall::invoke(ThreadContext *tc, const StaticInstPtr &inst)
1014 {
1015 if (FullSystem) {
1016 ArmFault::invoke(tc, inst);
1017 return;
1018 }
1019 }
1020
1021 ExceptionClass
1022 SecureMonitorCall::ec(ThreadContext *tc) const
1023 {
1024 return (from64 ? EC_SMC_64 : vals.ec);
1025 }
1026
1027 bool
1028 SupervisorTrap::routeToHyp(ThreadContext *tc) const
1029 {
1030 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
1031 return EL2Enabled(tc) && currEL(tc) <= EL1 && hcr.tge;
1032 }
1033
1034 uint32_t
1035 SupervisorTrap::iss() const
1036 {
1037 // If SupervisorTrap is routed to hypervisor, iss field is 0.
1038 if (hypRouted) {
1039 return 0;
1040 }
1041 return issRaw;
1042 }
1043
1044 ExceptionClass
1045 SupervisorTrap::ec(ThreadContext *tc) const
1046 {
1047 if (hypRouted)
1048 return EC_UNKNOWN;
1049 else
1050 return (overrideEc != EC_INVALID) ? overrideEc : vals.ec;
1051 }
1052
1053 ExceptionClass
1054 SecureMonitorTrap::ec(ThreadContext *tc) const
1055 {
1056 return (overrideEc != EC_INVALID) ? overrideEc :
1057 (from64 ? EC_SMC_64 : vals.ec);
1058 }
1059
1060 template<class T>
1061 void
1062 AbortFault<T>::invoke(ThreadContext *tc, const StaticInstPtr &inst)
1063 {
1064 if (tranMethod == ArmFault::UnknownTran) {
1065 tranMethod = longDescFormatInUse(tc) ? ArmFault::LpaeTran
1066 : ArmFault::VmsaTran;
1067
1068 if ((tranMethod == ArmFault::VmsaTran) && this->routeToMonitor(tc)) {
1069 // See ARM ARM B3-1416
1070 bool override_LPAE = false;
1071 TTBCR ttbcr_s = tc->readMiscReg(MISCREG_TTBCR_S);
1072 TTBCR M5_VAR_USED ttbcr_ns = tc->readMiscReg(MISCREG_TTBCR_NS);
1073 if (ttbcr_s.eae) {
1074 override_LPAE = true;
1075 } else {
1076 // Unimplemented code option, not seen in testing. May need
1077 // extension according to the manual exceprt above.
1078 DPRINTF(Faults, "Warning: Incomplete translation method "
1079 "override detected.\n");
1080 }
1081 if (override_LPAE)
1082 tranMethod = ArmFault::LpaeTran;
1083 }
1084 }
1085
1086 if (source == ArmFault::AsynchronousExternalAbort) {
1087 tc->getCpuPtr()->clearInterrupt(tc->threadId(), INT_ABT, 0);
1088 }
1089 // Get effective fault source encoding
1090 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1091
1092 // source must be determined BEFORE invoking generic routines which will
1093 // try to set hsr etc. and are based upon source!
1094 ArmFaultVals<T>::invoke(tc, inst);
1095
1096 if (!this->to64) { // AArch32
1097 FSR fsr = getFsr(tc);
1098 if (cpsr.mode == MODE_HYP) {
1099 tc->setMiscReg(T::HFarIndex, faultAddr);
1100 } else if (stage2) {
1101 tc->setMiscReg(MISCREG_HPFAR, (faultAddr >> 8) & ~0xf);
1102 tc->setMiscReg(T::HFarIndex, OVAddr);
1103 } else if (debug > ArmFault::NODEBUG) {
1104 DBGDS32 Rext = tc->readMiscReg(MISCREG_DBGDSCRext);
1105 tc->setMiscReg(T::FarIndex, faultAddr);
1106 if (debug == ArmFault::BRKPOINT){
1107 Rext.moe = 0x1;
1108 } else if (debug == ArmFault::VECTORCATCH){
1109 Rext.moe = 0x5;
1110 } else if (debug > ArmFault::VECTORCATCH) {
1111 Rext.moe = 0xa;
1112 fsr.cm = (debug == ArmFault::WPOINT_CM)? 1 : 0;
1113 }
1114
1115 tc->setMiscReg(T::FsrIndex, fsr);
1116 tc->setMiscReg(MISCREG_DBGDSCRext, Rext);
1117
1118 } else {
1119 tc->setMiscReg(T::FsrIndex, fsr);
1120 tc->setMiscReg(T::FarIndex, faultAddr);
1121 }
1122 DPRINTF(Faults, "Abort Fault source=%#x fsr=%#x faultAddr=%#x "\
1123 "tranMethod=%#x\n", source, fsr, faultAddr, tranMethod);
1124 } else { // AArch64
1125 // Set the FAR register. Nothing else to do if we are in AArch64 state
1126 // because the syndrome register has already been set inside invoke64()
1127 if (stage2) {
1128 // stage 2 fault, set HPFAR_EL2 to the faulting IPA
1129 // and FAR_EL2 to the Original VA
1130 tc->setMiscReg(AbortFault<T>::getFaultAddrReg64(), OVAddr);
1131 tc->setMiscReg(MISCREG_HPFAR_EL2, bits(faultAddr, 47, 12) << 4);
1132
1133 DPRINTF(Faults, "Abort Fault (Stage 2) VA: 0x%x IPA: 0x%x\n",
1134 OVAddr, faultAddr);
1135 } else {
1136 tc->setMiscReg(AbortFault<T>::getFaultAddrReg64(), faultAddr);
1137 }
1138 }
1139 }
1140
1141 template<class T>
1142 void
1143 AbortFault<T>::setSyndrome(ThreadContext *tc, MiscRegIndex syndrome_reg)
1144 {
1145 srcEncoded = getFaultStatusCode(tc);
1146 if (srcEncoded == ArmFault::FaultSourceInvalid) {
1147 panic("Invalid fault source\n");
1148 }
1149 ArmFault::setSyndrome(tc, syndrome_reg);
1150 }
1151
1152 template<class T>
1153 uint8_t
1154 AbortFault<T>::getFaultStatusCode(ThreadContext *tc) const
1155 {
1156
1157 panic_if(!this->faultUpdated,
1158 "Trying to use un-updated ArmFault internal variables\n");
1159
1160 uint8_t fsc = 0;
1161
1162 if (!this->to64) {
1163 // AArch32
1164 assert(tranMethod != ArmFault::UnknownTran);
1165 if (tranMethod == ArmFault::LpaeTran) {
1166 fsc = ArmFault::longDescFaultSources[source];
1167 } else {
1168 fsc = ArmFault::shortDescFaultSources[source];
1169 }
1170 } else {
1171 // AArch64
1172 fsc = ArmFault::aarch64FaultSources[source];
1173 }
1174
1175 return fsc;
1176 }
1177
1178 template<class T>
1179 FSR
1180 AbortFault<T>::getFsr(ThreadContext *tc) const
1181 {
1182 FSR fsr = 0;
1183
1184 auto fsc = getFaultStatusCode(tc);
1185
1186 // AArch32
1187 assert(tranMethod != ArmFault::UnknownTran);
1188 if (tranMethod == ArmFault::LpaeTran) {
1189 fsr.status = fsc;
1190 fsr.lpae = 1;
1191 } else {
1192 fsr.fsLow = bits(fsc, 3, 0);
1193 fsr.fsHigh = bits(fsc, 4);
1194 fsr.domain = static_cast<uint8_t>(domain);
1195 }
1196
1197 fsr.wnr = (write ? 1 : 0);
1198 fsr.ext = 0;
1199
1200 return fsr;
1201 }
1202
1203 template<class T>
1204 bool
1205 AbortFault<T>::abortDisable(ThreadContext *tc)
1206 {
1207 if (ArmSystem::haveSecurity(tc)) {
1208 SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
1209 return (!scr.ns || scr.aw);
1210 }
1211 return true;
1212 }
1213
1214 template<class T>
1215 void
1216 AbortFault<T>::annotate(ArmFault::AnnotationIDs id, uint64_t val)
1217 {
1218 switch (id)
1219 {
1220 case ArmFault::S1PTW:
1221 s1ptw = val;
1222 break;
1223 case ArmFault::OVA:
1224 OVAddr = val;
1225 break;
1226
1227 // Just ignore unknown ID's
1228 default:
1229 break;
1230 }
1231 }
1232
1233 template<class T>
1234 uint32_t
1235 AbortFault<T>::iss() const
1236 {
1237 uint32_t val;
1238
1239 val = srcEncoded & 0x3F;
1240 val |= write << 6;
1241 val |= s1ptw << 7;
1242 return (val);
1243 }
1244
1245 template<class T>
1246 bool
1247 AbortFault<T>::isMMUFault() const
1248 {
1249 // NOTE: Not relying on LL information being aligned to lowest bits here
1250 return
1251 (source == ArmFault::AlignmentFault) ||
1252 ((source >= ArmFault::TranslationLL) &&
1253 (source < ArmFault::TranslationLL + 4)) ||
1254 ((source >= ArmFault::AccessFlagLL) &&
1255 (source < ArmFault::AccessFlagLL + 4)) ||
1256 ((source >= ArmFault::DomainLL) &&
1257 (source < ArmFault::DomainLL + 4)) ||
1258 ((source >= ArmFault::PermissionLL) &&
1259 (source < ArmFault::PermissionLL + 4));
1260 }
1261
1262 template<class T>
1263 bool
1264 AbortFault<T>::getFaultVAddr(Addr &va) const
1265 {
1266 va = (stage2 ? OVAddr : faultAddr);
1267 return true;
1268 }
1269
1270 ExceptionClass
1271 PrefetchAbort::ec(ThreadContext *tc) const
1272 {
1273 if (to64) {
1274 // AArch64
1275 if (toEL == fromEL)
1276 return EC_PREFETCH_ABORT_CURR_EL;
1277 else
1278 return EC_PREFETCH_ABORT_LOWER_EL;
1279 } else {
1280 // AArch32
1281 // Abort faults have different EC codes depending on whether
1282 // the fault originated within HYP mode, or not. So override
1283 // the method and add the extra adjustment of the EC value.
1284
1285 ExceptionClass ec = ArmFaultVals<PrefetchAbort>::vals.ec;
1286
1287 CPSR spsr = tc->readMiscReg(MISCREG_SPSR_HYP);
1288 if (spsr.mode == MODE_HYP) {
1289 ec = ((ExceptionClass) (((uint32_t) ec) + 1));
1290 }
1291 return ec;
1292 }
1293 }
1294
1295 bool
1296 PrefetchAbort::routeToMonitor(ThreadContext *tc) const
1297 {
1298 SCR scr = 0;
1299 if (from64)
1300 scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
1301 else
1302 scr = tc->readMiscRegNoEffect(MISCREG_SCR);
1303
1304 return scr.ea && !isMMUFault();
1305 }
1306
1307 bool
1308 PrefetchAbort::routeToHyp(ThreadContext *tc) const
1309 {
1310 bool toHyp;
1311
1312 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
1313 HDCR hdcr = tc->readMiscRegNoEffect(MISCREG_HDCR);
1314
1315 toHyp = fromEL == EL2;
1316 toHyp |= ArmSystem::haveEL(tc, EL2) && !isSecure(tc) &&
1317 currEL(tc) <= EL1 && (hcr.tge || stage2 ||
1318 (source == DebugEvent && hdcr.tde));
1319 return toHyp;
1320 }
1321
1322 ExceptionClass
1323 DataAbort::ec(ThreadContext *tc) const
1324 {
1325 if (to64) {
1326 // AArch64
1327 if (source == ArmFault::AsynchronousExternalAbort) {
1328 panic("Asynchronous External Abort should be handled with "
1329 "SystemErrors (SErrors)!");
1330 }
1331 if (toEL == fromEL)
1332 return EC_DATA_ABORT_CURR_EL;
1333 else
1334 return EC_DATA_ABORT_LOWER_EL;
1335 } else {
1336 // AArch32
1337 // Abort faults have different EC codes depending on whether
1338 // the fault originated within HYP mode, or not. So override
1339 // the method and add the extra adjustment of the EC value.
1340
1341 ExceptionClass ec = ArmFaultVals<DataAbort>::vals.ec;
1342
1343 CPSR spsr = tc->readMiscReg(MISCREG_SPSR_HYP);
1344 if (spsr.mode == MODE_HYP) {
1345 ec = ((ExceptionClass) (((uint32_t) ec) + 1));
1346 }
1347 return ec;
1348 }
1349 }
1350
1351 bool
1352 DataAbort::routeToMonitor(ThreadContext *tc) const
1353 {
1354 SCR scr = 0;
1355 if (from64)
1356 scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
1357 else
1358 scr = tc->readMiscRegNoEffect(MISCREG_SCR);
1359
1360 return scr.ea && !isMMUFault();
1361 }
1362
1363 bool
1364 DataAbort::routeToHyp(ThreadContext *tc) const
1365 {
1366 bool toHyp;
1367
1368 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
1369 HDCR hdcr = tc->readMiscRegNoEffect(MISCREG_HDCR);
1370
1371 bool amo = hcr.amo;
1372 if (hcr.tge == 1)
1373 amo = (!HaveVirtHostExt(tc) || hcr.e2h == 0);
1374
1375 // if in Hyp mode then stay in Hyp mode
1376 toHyp = fromEL == EL2 ||
1377 (EL2Enabled(tc) && fromEL <= EL1
1378 && (hcr.tge || stage2 ||
1379 ((source == AsynchronousExternalAbort) && amo) ||
1380 ((fromEL == EL0) && hcr.tge &&
1381 ((source == AlignmentFault) ||
1382 (source == SynchronousExternalAbort))) ||
1383 ((source == DebugEvent) && (hdcr.tde || hcr.tge))));
1384 return toHyp;
1385 }
1386
1387 uint32_t
1388 DataAbort::iss() const
1389 {
1390 uint32_t val;
1391
1392 // Add on the data abort specific fields to the generic abort ISS value
1393 val = AbortFault<DataAbort>::iss();
1394
1395 val |= cm << 8;
1396
1397 // ISS is valid if not caused by a stage 1 page table walk, and when taken
1398 // to AArch64 only when directed to EL2
1399 if (!s1ptw && stage2 && (!to64 || toEL == EL2)) {
1400 val |= isv << 24;
1401 if (isv) {
1402 val |= sas << 22;
1403 val |= sse << 21;
1404 val |= srt << 16;
1405 // AArch64 only. These assignments are safe on AArch32 as well
1406 // because these vars are initialized to false
1407 val |= sf << 15;
1408 val |= ar << 14;
1409 }
1410 }
1411 return (val);
1412 }
1413
1414 void
1415 DataAbort::annotate(AnnotationIDs id, uint64_t val)
1416 {
1417 AbortFault<DataAbort>::annotate(id, val);
1418 switch (id)
1419 {
1420 case SAS:
1421 isv = true;
1422 sas = val;
1423 break;
1424 case SSE:
1425 isv = true;
1426 sse = val;
1427 break;
1428 case SRT:
1429 isv = true;
1430 srt = val;
1431 break;
1432 case SF:
1433 isv = true;
1434 sf = val;
1435 break;
1436 case AR:
1437 isv = true;
1438 ar = val;
1439 break;
1440 case CM:
1441 cm = val;
1442 break;
1443 case OFA:
1444 faultAddr = val;
1445 break;
1446 // Just ignore unknown ID's
1447 default:
1448 break;
1449 }
1450 }
1451
1452 void
1453 VirtualDataAbort::invoke(ThreadContext *tc, const StaticInstPtr &inst)
1454 {
1455 AbortFault<VirtualDataAbort>::invoke(tc, inst);
1456 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
1457 hcr.va = 0;
1458 tc->setMiscRegNoEffect(MISCREG_HCR, hcr);
1459 }
1460
1461 bool
1462 Interrupt::routeToMonitor(ThreadContext *tc) const
1463 {
1464 assert(ArmSystem::haveSecurity(tc));
1465 SCR scr = 0;
1466 if (from64)
1467 scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
1468 else
1469 scr = tc->readMiscRegNoEffect(MISCREG_SCR);
1470 return scr.irq;
1471 }
1472
1473 bool
1474 Interrupt::routeToHyp(ThreadContext *tc) const
1475 {
1476 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
1477 return fromEL == EL2 ||
1478 (EL2Enabled(tc) && fromEL <= EL1 && (hcr.tge || hcr.imo));
1479 }
1480
1481 bool
1482 Interrupt::abortDisable(ThreadContext *tc)
1483 {
1484 if (ArmSystem::haveSecurity(tc)) {
1485 SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
1486 return (!scr.ns || scr.aw);
1487 }
1488 return true;
1489 }
1490
1491 VirtualInterrupt::VirtualInterrupt()
1492 {}
1493
1494 bool
1495 FastInterrupt::routeToMonitor(ThreadContext *tc) const
1496 {
1497 assert(ArmSystem::haveSecurity(tc));
1498 SCR scr = 0;
1499 if (from64)
1500 scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
1501 else
1502 scr = tc->readMiscRegNoEffect(MISCREG_SCR);
1503 return scr.fiq;
1504 }
1505
1506 bool
1507 FastInterrupt::routeToHyp(ThreadContext *tc) const
1508 {
1509 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
1510 return fromEL == EL2 ||
1511 (EL2Enabled(tc) && fromEL <= EL1 && (hcr.tge || hcr.fmo));
1512 }
1513
1514 bool
1515 FastInterrupt::abortDisable(ThreadContext *tc)
1516 {
1517 if (ArmSystem::haveSecurity(tc)) {
1518 SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
1519 return (!scr.ns || scr.aw);
1520 }
1521 return true;
1522 }
1523
1524 bool
1525 FastInterrupt::fiqDisable(ThreadContext *tc)
1526 {
1527 if (ArmSystem::haveVirtualization(tc)) {
1528 return true;
1529 } else if (ArmSystem::haveSecurity(tc)) {
1530 SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
1531 return (!scr.ns || scr.fw);
1532 }
1533 return true;
1534 }
1535
1536 VirtualFastInterrupt::VirtualFastInterrupt()
1537 {}
1538
1539 void
1540 PCAlignmentFault::invoke(ThreadContext *tc, const StaticInstPtr &inst)
1541 {
1542 ArmFaultVals<PCAlignmentFault>::invoke(tc, inst);
1543 assert(from64);
1544 // Set the FAR
1545 tc->setMiscReg(getFaultAddrReg64(), faultPC);
1546 }
1547
1548 bool
1549 PCAlignmentFault::routeToHyp(ThreadContext *tc) const
1550 {
1551 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
1552 return fromEL == EL2 || (EL2Enabled(tc) && fromEL <= EL1 && hcr.tge);
1553 }
1554
1555 SPAlignmentFault::SPAlignmentFault()
1556 {}
1557
1558 bool
1559 SPAlignmentFault::routeToHyp(ThreadContext *tc) const
1560 {
1561 assert(from64);
1562 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
1563 return EL2Enabled(tc) && currEL(tc) <= EL1 && hcr.tge == 1;
1564 }
1565
1566 SystemError::SystemError()
1567 {}
1568
1569 void
1570 SystemError::invoke(ThreadContext *tc, const StaticInstPtr &inst)
1571 {
1572 tc->getCpuPtr()->clearInterrupt(tc->threadId(), INT_ABT, 0);
1573 ArmFault::invoke(tc, inst);
1574 }
1575
1576 bool
1577 SystemError::routeToMonitor(ThreadContext *tc) const
1578 {
1579 assert(ArmSystem::haveSecurity(tc));
1580 assert(from64);
1581 SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
1582 return scr.ea || fromEL == EL3;
1583 }
1584
1585 bool
1586 SystemError::routeToHyp(ThreadContext *tc) const
1587 {
1588 assert(from64);
1589
1590 HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
1591
1592 return fromEL == EL2 ||
1593 (EL2Enabled(tc) && fromEL <= EL1 && (hcr.tge || hcr.amo));
1594 }
1595
1596
1597 SoftwareBreakpoint::SoftwareBreakpoint(ExtMachInst _mach_inst, uint32_t _iss)
1598 : ArmFaultVals<SoftwareBreakpoint>(_mach_inst, _iss)
1599 {}
1600
1601 bool
1602 SoftwareBreakpoint::routeToHyp(ThreadContext *tc) const
1603 {
1604 const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
1605 const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2);
1606
1607 return fromEL == EL2 ||
1608 (EL2Enabled(tc) && fromEL <= EL1 && (hcr.tge || mdcr.tde));
1609 }
1610
1611 ExceptionClass
1612 SoftwareBreakpoint::ec(ThreadContext *tc) const
1613 {
1614 return from64 ? EC_SOFTWARE_BREAKPOINT_64 : vals.ec;
1615 }
1616
1617 HardwareBreakpoint::HardwareBreakpoint(Addr _vaddr, uint32_t _iss)
1618 : ArmFaultVals<HardwareBreakpoint>(0x0, _iss), vAddr(_vaddr)
1619 {}
1620
1621 bool
1622 HardwareBreakpoint::routeToHyp(ThreadContext *tc) const
1623 {
1624 const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
1625 const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2);
1626
1627 return EL2Enabled(tc) && fromEL <= EL1 && (hcr.tge || mdcr.tde);
1628 }
1629
1630 ExceptionClass
1631 HardwareBreakpoint::ec(ThreadContext *tc) const
1632 {
1633 // AArch64
1634 if (toEL == fromEL)
1635 return EC_HW_BREAKPOINT_CURR_EL;
1636 else
1637 return EC_HW_BREAKPOINT_LOWER_EL;
1638 }
1639
1640 void
1641 HardwareBreakpoint::invoke(ThreadContext *tc, const StaticInstPtr &inst)
1642 {
1643
1644 ArmFaultVals<HardwareBreakpoint>::invoke(tc, inst);
1645 MiscRegIndex elr_idx;
1646 switch (toEL) {
1647 case EL1:
1648 elr_idx = MISCREG_ELR_EL1;
1649 break;
1650 case EL2:
1651 assert(ArmSystem::haveVirtualization(tc));
1652 elr_idx = MISCREG_ELR_EL2;
1653 break;
1654 case EL3:
1655 assert(ArmSystem::haveSecurity(tc));
1656 elr_idx = MISCREG_ELR_EL3;
1657 break;
1658 default:
1659 panic("Invalid target exception level");
1660 break;
1661 }
1662
1663 tc->setMiscReg(elr_idx, vAddr);
1664
1665 }
1666
1667 Watchpoint::Watchpoint(ExtMachInst _mach_inst, Addr _vaddr,
1668 bool _write, bool _cm)
1669 : ArmFaultVals<Watchpoint>(_mach_inst), vAddr(_vaddr),
1670 write(_write), cm(_cm)
1671 {}
1672
1673 uint32_t
1674 Watchpoint::iss() const
1675 {
1676 uint32_t iss = 0x0022;
1677 // NV
1678 // if (toEL == EL2)
1679 // iss |= 0x02000;
1680 if (cm)
1681 iss |= 0x00100;
1682 if (write)
1683 iss |= 0x00040;
1684 return iss;
1685 }
1686
1687 void
1688 Watchpoint::invoke(ThreadContext *tc, const StaticInstPtr &inst)
1689 {
1690 ArmFaultVals<Watchpoint>::invoke(tc, inst);
1691 // Set the FAR
1692 tc->setMiscReg(getFaultAddrReg64(), vAddr);
1693
1694 }
1695
1696 bool
1697 Watchpoint::routeToHyp(ThreadContext *tc) const
1698 {
1699 const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
1700 const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2);
1701
1702 return fromEL == EL2 ||
1703 (EL2Enabled(tc) && fromEL <= EL1 && (hcr.tge || mdcr.tde));
1704 }
1705
1706 void
1707 Watchpoint::annotate(AnnotationIDs id, uint64_t val)
1708 {
1709 ArmFaultVals<Watchpoint>::annotate(id, val);
1710 switch (id)
1711 {
1712 case OFA:
1713 vAddr = val;
1714 break;
1715 // Just ignore unknown ID's
1716 default:
1717 break;
1718 }
1719 }
1720
1721 ExceptionClass
1722 Watchpoint::ec(ThreadContext *tc) const
1723 {
1724 // AArch64
1725 if (toEL == fromEL)
1726 return EC_WATCHPOINT_CURR_EL;
1727 else
1728 return EC_WATCHPOINT_LOWER_EL;
1729 }
1730
1731 SoftwareStepFault::SoftwareStepFault(ExtMachInst _mach_inst, bool is_ldx,
1732 bool _stepped)
1733 : ArmFaultVals<SoftwareStepFault>(_mach_inst), isldx(is_ldx),
1734 stepped(_stepped)
1735 {
1736 bStep = true;
1737 }
1738
1739 bool
1740 SoftwareStepFault::routeToHyp(ThreadContext *tc) const
1741 {
1742 const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
1743 const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2);
1744
1745 return fromEL == EL2 ||
1746 (EL2Enabled(tc) && fromEL <= EL1 && (hcr.tge || mdcr.tde));
1747 }
1748
1749 ExceptionClass
1750 SoftwareStepFault::ec(ThreadContext *tc) const
1751 {
1752 // AArch64
1753 if (toEL == fromEL)
1754 return EC_SOFTWARE_STEP_CURR_EL;
1755 else
1756 return EC_SOFTWARE_STEP_LOWER_EL;
1757 }
1758
1759 uint32_t
1760 SoftwareStepFault::iss() const
1761 {
1762 uint32_t iss= 0x0022;
1763 if (stepped) {
1764 iss |= 0x1000000;
1765 }
1766
1767 if (isldx) {
1768 iss |= 0x40;
1769 }
1770
1771 return iss;
1772
1773 }
1774
1775 void
1776 ArmSev::invoke(ThreadContext *tc, const StaticInstPtr &inst) {
1777 DPRINTF(Faults, "Invoking ArmSev Fault\n");
1778 if (!FullSystem)
1779 return;
1780
1781 // Set sev_mailbox to 1, clear the pending interrupt from remote
1782 // SEV execution and let pipeline continue as pcState is still
1783 // valid.
1784 tc->setMiscReg(MISCREG_SEV_MAILBOX, 1);
1785 tc->getCpuPtr()->clearInterrupt(tc->threadId(), INT_SEV, 0);
1786 }
1787
1788 // Instantiate all the templates to make the linker happy
1789 template class ArmFaultVals<Reset>;
1790 template class ArmFaultVals<UndefinedInstruction>;
1791 template class ArmFaultVals<SupervisorCall>;
1792 template class ArmFaultVals<SecureMonitorCall>;
1793 template class ArmFaultVals<HypervisorCall>;
1794 template class ArmFaultVals<PrefetchAbort>;
1795 template class ArmFaultVals<DataAbort>;
1796 template class ArmFaultVals<VirtualDataAbort>;
1797 template class ArmFaultVals<HypervisorTrap>;
1798 template class ArmFaultVals<Interrupt>;
1799 template class ArmFaultVals<VirtualInterrupt>;
1800 template class ArmFaultVals<FastInterrupt>;
1801 template class ArmFaultVals<VirtualFastInterrupt>;
1802 template class ArmFaultVals<SupervisorTrap>;
1803 template class ArmFaultVals<SecureMonitorTrap>;
1804 template class ArmFaultVals<PCAlignmentFault>;
1805 template class ArmFaultVals<SPAlignmentFault>;
1806 template class ArmFaultVals<SystemError>;
1807 template class ArmFaultVals<SoftwareBreakpoint>;
1808 template class ArmFaultVals<HardwareBreakpoint>;
1809 template class ArmFaultVals<Watchpoint>;
1810 template class ArmFaultVals<SoftwareStepFault>;
1811 template class ArmFaultVals<ArmSev>;
1812 template class AbortFault<PrefetchAbort>;
1813 template class AbortFault<DataAbort>;
1814 template class AbortFault<VirtualDataAbort>;
1815
1816
1817 IllegalInstSetStateFault::IllegalInstSetStateFault()
1818 {}
1819
1820 bool
1821 IllegalInstSetStateFault::routeToHyp(ThreadContext *tc) const
1822 {
1823 const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
1824 return EL2Enabled(tc) && fromEL == EL0 && hcr.tge;
1825 }
1826
1827 bool
1828 getFaultVAddr(Fault fault, Addr &va)
1829 {
1830 auto arm_fault = dynamic_cast<ArmFault *>(fault.get());
1831
1832 if (arm_fault) {
1833 return arm_fault->getFaultVAddr(va);
1834 } else {
1835 auto pgt_fault = dynamic_cast<GenericPageTableFault *>(fault.get());
1836 if (pgt_fault) {
1837 va = pgt_fault->getFaultVAddr();
1838 return true;
1839 }
1840
1841 auto align_fault = dynamic_cast<GenericAlignmentFault *>(fault.get());
1842 if (align_fault) {
1843 va = align_fault->getFaultVAddr();
1844 return true;
1845 }
1846
1847 // Return false since it's not an address triggered exception
1848 return false;
1849 }
1850 }
1851
1852 } // namespace ArmISA