arch,sim: Convert clone to GuestABI and define a cloneBackwardsFunc.
[gem5.git] / src / arch / arm / isa.cc
1 /*
2 * Copyright (c) 2010-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "arch/arm/isa.hh"
39
40 #include "arch/arm/faults.hh"
41 #include "arch/arm/interrupts.hh"
42 #include "arch/arm/pmu.hh"
43 #include "arch/arm/system.hh"
44 #include "arch/arm/tlb.hh"
45 #include "arch/arm/tlbi_op.hh"
46 #include "cpu/base.hh"
47 #include "cpu/checker/cpu.hh"
48 #include "debug/Arm.hh"
49 #include "debug/MiscRegs.hh"
50 #include "dev/arm/generic_timer.hh"
51 #include "dev/arm/gic_v3.hh"
52 #include "dev/arm/gic_v3_cpu_interface.hh"
53 #include "params/ArmISA.hh"
54 #include "sim/faults.hh"
55 #include "sim/stat_control.hh"
56 #include "sim/system.hh"
57
58 namespace ArmISA
59 {
60
61 ISA::ISA(Params *p) : BaseISA(p), system(NULL),
62 _decoderFlavor(p->decoderFlavor), _vecRegRenameMode(Enums::Full),
63 pmu(p->pmu), haveGICv3CPUInterface(false), impdefAsNop(p->impdef_nop),
64 afterStartup(false)
65 {
66 miscRegs[MISCREG_SCTLR_RST] = 0;
67
68 // Hook up a dummy device if we haven't been configured with a
69 // real PMU. By using a dummy device, we don't need to check that
70 // the PMU exist every time we try to access a PMU register.
71 if (!pmu)
72 pmu = &dummyDevice;
73
74 // Give all ISA devices a pointer to this ISA
75 pmu->setISA(this);
76
77 system = dynamic_cast<ArmSystem *>(p->system);
78
79 // Cache system-level properties
80 if (FullSystem && system) {
81 highestELIs64 = system->highestELIs64();
82 haveSecurity = system->haveSecurity();
83 haveLPAE = system->haveLPAE();
84 haveCrypto = system->haveCrypto();
85 haveVirtualization = system->haveVirtualization();
86 haveLargeAsid64 = system->haveLargeAsid64();
87 physAddrRange = system->physAddrRange();
88 haveSVE = system->haveSVE();
89 havePAN = system->havePAN();
90 sveVL = system->sveVL();
91 haveLSE = system->haveLSE();
92 } else {
93 highestELIs64 = true; // ArmSystem::highestELIs64 does the same
94 haveSecurity = haveLPAE = haveVirtualization = false;
95 haveCrypto = true;
96 haveLargeAsid64 = false;
97 physAddrRange = 32; // dummy value
98 haveSVE = true;
99 havePAN = false;
100 sveVL = p->sve_vl_se;
101 haveLSE = true;
102 }
103
104 // Initial rename mode depends on highestEL
105 const_cast<Enums::VecRegRenameMode&>(_vecRegRenameMode) =
106 highestELIs64 ? Enums::Full : Enums::Elem;
107
108 initializeMiscRegMetadata();
109 preUnflattenMiscReg();
110
111 clear();
112 }
113
114 std::vector<struct ISA::MiscRegLUTEntry> ISA::lookUpMiscReg(NUM_MISCREGS);
115
116 const ArmISAParams *
117 ISA::params() const
118 {
119 return dynamic_cast<const Params *>(_params);
120 }
121
122 void
123 ISA::clear(ThreadContext *tc)
124 {
125 clear();
126 // Invalidate cached copies of miscregs in the TLBs
127 getITBPtr(tc)->invalidateMiscReg();
128 getDTBPtr(tc)->invalidateMiscReg();
129 }
130
131 void
132 ISA::clear()
133 {
134 const Params *p(params());
135
136 SCTLR sctlr_rst = miscRegs[MISCREG_SCTLR_RST];
137 memset(miscRegs, 0, sizeof(miscRegs));
138
139 initID32(p);
140
141 // We always initialize AArch64 ID registers even
142 // if we are in AArch32. This is done since if we
143 // are in SE mode we don't know if our ArmProcess is
144 // AArch32 or AArch64
145 initID64(p);
146
147 // Start with an event in the mailbox
148 miscRegs[MISCREG_SEV_MAILBOX] = 1;
149
150 // Separate Instruction and Data TLBs
151 miscRegs[MISCREG_TLBTR] = 1;
152
153 MVFR0 mvfr0 = 0;
154 mvfr0.advSimdRegisters = 2;
155 mvfr0.singlePrecision = 2;
156 mvfr0.doublePrecision = 2;
157 mvfr0.vfpExceptionTrapping = 0;
158 mvfr0.divide = 1;
159 mvfr0.squareRoot = 1;
160 mvfr0.shortVectors = 1;
161 mvfr0.roundingModes = 1;
162 miscRegs[MISCREG_MVFR0] = mvfr0;
163
164 MVFR1 mvfr1 = 0;
165 mvfr1.flushToZero = 1;
166 mvfr1.defaultNaN = 1;
167 mvfr1.advSimdLoadStore = 1;
168 mvfr1.advSimdInteger = 1;
169 mvfr1.advSimdSinglePrecision = 1;
170 mvfr1.advSimdHalfPrecision = 1;
171 mvfr1.vfpHalfPrecision = 1;
172 miscRegs[MISCREG_MVFR1] = mvfr1;
173
174 // Reset values of PRRR and NMRR are implementation dependent
175
176 // @todo: PRRR and NMRR in secure state?
177 miscRegs[MISCREG_PRRR_NS] =
178 (1 << 19) | // 19
179 (0 << 18) | // 18
180 (0 << 17) | // 17
181 (1 << 16) | // 16
182 (2 << 14) | // 15:14
183 (0 << 12) | // 13:12
184 (2 << 10) | // 11:10
185 (2 << 8) | // 9:8
186 (2 << 6) | // 7:6
187 (2 << 4) | // 5:4
188 (1 << 2) | // 3:2
189 0; // 1:0
190
191 miscRegs[MISCREG_NMRR_NS] =
192 (1 << 30) | // 31:30
193 (0 << 26) | // 27:26
194 (0 << 24) | // 25:24
195 (3 << 22) | // 23:22
196 (2 << 20) | // 21:20
197 (0 << 18) | // 19:18
198 (0 << 16) | // 17:16
199 (1 << 14) | // 15:14
200 (0 << 12) | // 13:12
201 (2 << 10) | // 11:10
202 (0 << 8) | // 9:8
203 (3 << 6) | // 7:6
204 (2 << 4) | // 5:4
205 (0 << 2) | // 3:2
206 0; // 1:0
207
208 if (FullSystem && system->highestELIs64()) {
209 // Initialize AArch64 state
210 clear64(p);
211 return;
212 }
213
214 // Initialize AArch32 state...
215 clear32(p, sctlr_rst);
216 }
217
218 void
219 ISA::clear32(const ArmISAParams *p, const SCTLR &sctlr_rst)
220 {
221 CPSR cpsr = 0;
222 cpsr.mode = MODE_USER;
223
224 if (FullSystem) {
225 miscRegs[MISCREG_MVBAR] = system->resetAddr();
226 }
227
228 miscRegs[MISCREG_CPSR] = cpsr;
229 updateRegMap(cpsr);
230
231 SCTLR sctlr = 0;
232 sctlr.te = (bool) sctlr_rst.te;
233 sctlr.nmfi = (bool) sctlr_rst.nmfi;
234 sctlr.v = (bool) sctlr_rst.v;
235 sctlr.u = 1;
236 sctlr.xp = 1;
237 sctlr.rao2 = 1;
238 sctlr.rao3 = 1;
239 sctlr.rao4 = 0xf; // SCTLR[6:3]
240 sctlr.uci = 1;
241 sctlr.dze = 1;
242 miscRegs[MISCREG_SCTLR_NS] = sctlr;
243 miscRegs[MISCREG_SCTLR_RST] = sctlr_rst;
244 miscRegs[MISCREG_HCPTR] = 0;
245
246 miscRegs[MISCREG_CPACR] = 0;
247
248 miscRegs[MISCREG_FPSID] = p->fpsid;
249
250 if (haveLPAE) {
251 TTBCR ttbcr = miscRegs[MISCREG_TTBCR_NS];
252 ttbcr.eae = 0;
253 miscRegs[MISCREG_TTBCR_NS] = ttbcr;
254 // Enforce consistency with system-level settings
255 miscRegs[MISCREG_ID_MMFR0] = (miscRegs[MISCREG_ID_MMFR0] & ~0xf) | 0x5;
256 }
257
258 if (haveSecurity) {
259 miscRegs[MISCREG_SCTLR_S] = sctlr;
260 miscRegs[MISCREG_SCR] = 0;
261 miscRegs[MISCREG_VBAR_S] = 0;
262 } else {
263 // we're always non-secure
264 miscRegs[MISCREG_SCR] = 1;
265 }
266
267 //XXX We need to initialize the rest of the state.
268 }
269
270 void
271 ISA::clear64(const ArmISAParams *p)
272 {
273 CPSR cpsr = 0;
274 Addr rvbar = system->resetAddr();
275 switch (system->highestEL()) {
276 // Set initial EL to highest implemented EL using associated stack
277 // pointer (SP_ELx); set RVBAR_ELx to implementation defined reset
278 // value
279 case EL3:
280 cpsr.mode = MODE_EL3H;
281 miscRegs[MISCREG_RVBAR_EL3] = rvbar;
282 break;
283 case EL2:
284 cpsr.mode = MODE_EL2H;
285 miscRegs[MISCREG_RVBAR_EL2] = rvbar;
286 break;
287 case EL1:
288 cpsr.mode = MODE_EL1H;
289 miscRegs[MISCREG_RVBAR_EL1] = rvbar;
290 break;
291 default:
292 panic("Invalid highest implemented exception level");
293 break;
294 }
295
296 // Initialize rest of CPSR
297 cpsr.daif = 0xf; // Mask all interrupts
298 cpsr.ss = 0;
299 cpsr.il = 0;
300 miscRegs[MISCREG_CPSR] = cpsr;
301 updateRegMap(cpsr);
302
303 // Initialize other control registers
304 miscRegs[MISCREG_MPIDR_EL1] = 0x80000000;
305 if (haveSecurity) {
306 miscRegs[MISCREG_SCTLR_EL3] = 0x30c50830;
307 miscRegs[MISCREG_SCR_EL3] = 0x00000030; // RES1 fields
308 } else if (haveVirtualization) {
309 // also MISCREG_SCTLR_EL2 (by mapping)
310 miscRegs[MISCREG_HSCTLR] = 0x30c50830;
311 } else {
312 // also MISCREG_SCTLR_EL1 (by mapping)
313 miscRegs[MISCREG_SCTLR_NS] = 0x30d00800 | 0x00050030; // RES1 | init
314 // Always non-secure
315 miscRegs[MISCREG_SCR_EL3] = 1;
316 }
317 }
318
319 void
320 ISA::initID32(const ArmISAParams *p)
321 {
322 // Initialize configurable default values
323
324 uint32_t midr;
325 if (p->midr != 0x0)
326 midr = p->midr;
327 else if (highestELIs64)
328 // Cortex-A57 TRM r0p0 MIDR
329 midr = 0x410fd070;
330 else
331 // Cortex-A15 TRM r0p0 MIDR
332 midr = 0x410fc0f0;
333
334 miscRegs[MISCREG_MIDR] = midr;
335 miscRegs[MISCREG_MIDR_EL1] = midr;
336 miscRegs[MISCREG_VPIDR] = midr;
337
338 miscRegs[MISCREG_ID_ISAR0] = p->id_isar0;
339 miscRegs[MISCREG_ID_ISAR1] = p->id_isar1;
340 miscRegs[MISCREG_ID_ISAR2] = p->id_isar2;
341 miscRegs[MISCREG_ID_ISAR3] = p->id_isar3;
342 miscRegs[MISCREG_ID_ISAR4] = p->id_isar4;
343 miscRegs[MISCREG_ID_ISAR5] = p->id_isar5;
344
345 miscRegs[MISCREG_ID_MMFR0] = p->id_mmfr0;
346 miscRegs[MISCREG_ID_MMFR1] = p->id_mmfr1;
347 miscRegs[MISCREG_ID_MMFR2] = p->id_mmfr2;
348 miscRegs[MISCREG_ID_MMFR3] = p->id_mmfr3;
349
350 miscRegs[MISCREG_ID_ISAR5] = insertBits(
351 miscRegs[MISCREG_ID_ISAR5], 19, 4,
352 haveCrypto ? 0x1112 : 0x0);
353 }
354
355 void
356 ISA::initID64(const ArmISAParams *p)
357 {
358 // Initialize configurable id registers
359 miscRegs[MISCREG_ID_AA64AFR0_EL1] = p->id_aa64afr0_el1;
360 miscRegs[MISCREG_ID_AA64AFR1_EL1] = p->id_aa64afr1_el1;
361 miscRegs[MISCREG_ID_AA64DFR0_EL1] =
362 (p->id_aa64dfr0_el1 & 0xfffffffffffff0ffULL) |
363 (p->pmu ? 0x0000000000000100ULL : 0); // Enable PMUv3
364
365 miscRegs[MISCREG_ID_AA64DFR1_EL1] = p->id_aa64dfr1_el1;
366 miscRegs[MISCREG_ID_AA64ISAR0_EL1] = p->id_aa64isar0_el1;
367 miscRegs[MISCREG_ID_AA64ISAR1_EL1] = p->id_aa64isar1_el1;
368 miscRegs[MISCREG_ID_AA64MMFR0_EL1] = p->id_aa64mmfr0_el1;
369 miscRegs[MISCREG_ID_AA64MMFR1_EL1] = p->id_aa64mmfr1_el1;
370 miscRegs[MISCREG_ID_AA64MMFR2_EL1] = p->id_aa64mmfr2_el1;
371
372 miscRegs[MISCREG_ID_DFR0_EL1] =
373 (p->pmu ? 0x03000000ULL : 0); // Enable PMUv3
374
375 miscRegs[MISCREG_ID_DFR0] = miscRegs[MISCREG_ID_DFR0_EL1];
376
377 // SVE
378 miscRegs[MISCREG_ID_AA64ZFR0_EL1] = 0; // SVEver 0
379 if (haveSecurity) {
380 miscRegs[MISCREG_ZCR_EL3] = sveVL - 1;
381 } else if (haveVirtualization) {
382 miscRegs[MISCREG_ZCR_EL2] = sveVL - 1;
383 } else {
384 miscRegs[MISCREG_ZCR_EL1] = sveVL - 1;
385 }
386
387 // Enforce consistency with system-level settings...
388
389 // EL3
390 miscRegs[MISCREG_ID_AA64PFR0_EL1] = insertBits(
391 miscRegs[MISCREG_ID_AA64PFR0_EL1], 15, 12,
392 haveSecurity ? 0x2 : 0x0);
393 // EL2
394 miscRegs[MISCREG_ID_AA64PFR0_EL1] = insertBits(
395 miscRegs[MISCREG_ID_AA64PFR0_EL1], 11, 8,
396 haveVirtualization ? 0x2 : 0x0);
397 // SVE
398 miscRegs[MISCREG_ID_AA64PFR0_EL1] = insertBits(
399 miscRegs[MISCREG_ID_AA64PFR0_EL1], 35, 32,
400 haveSVE ? 0x1 : 0x0);
401 // Large ASID support
402 miscRegs[MISCREG_ID_AA64MMFR0_EL1] = insertBits(
403 miscRegs[MISCREG_ID_AA64MMFR0_EL1], 7, 4,
404 haveLargeAsid64 ? 0x2 : 0x0);
405 // Physical address size
406 miscRegs[MISCREG_ID_AA64MMFR0_EL1] = insertBits(
407 miscRegs[MISCREG_ID_AA64MMFR0_EL1], 3, 0,
408 encodePhysAddrRange64(physAddrRange));
409 // Crypto
410 miscRegs[MISCREG_ID_AA64ISAR0_EL1] = insertBits(
411 miscRegs[MISCREG_ID_AA64ISAR0_EL1], 19, 4,
412 haveCrypto ? 0x1112 : 0x0);
413 // LSE
414 miscRegs[MISCREG_ID_AA64ISAR0_EL1] = insertBits(
415 miscRegs[MISCREG_ID_AA64ISAR0_EL1], 23, 20,
416 haveLSE ? 0x2 : 0x0);
417 // PAN
418 miscRegs[MISCREG_ID_AA64MMFR1_EL1] = insertBits(
419 miscRegs[MISCREG_ID_AA64MMFR1_EL1], 23, 20,
420 havePAN ? 0x1 : 0x0);
421 }
422
423 void
424 ISA::startup(ThreadContext *tc)
425 {
426 pmu->setThreadContext(tc);
427
428 if (system) {
429 Gicv3 *gicv3 = dynamic_cast<Gicv3 *>(system->getGIC());
430 if (gicv3) {
431 haveGICv3CPUInterface = true;
432 gicv3CpuInterface.reset(gicv3->getCPUInterface(tc->contextId()));
433 gicv3CpuInterface->setISA(this);
434 gicv3CpuInterface->setThreadContext(tc);
435 }
436 }
437
438 afterStartup = true;
439 }
440
441
442 RegVal
443 ISA::readMiscRegNoEffect(int misc_reg) const
444 {
445 assert(misc_reg < NumMiscRegs);
446
447 const auto &reg = lookUpMiscReg[misc_reg]; // bit masks
448 const auto &map = getMiscIndices(misc_reg);
449 int lower = map.first, upper = map.second;
450 // NB!: apply architectural masks according to desired register,
451 // despite possibly getting value from different (mapped) register.
452 auto val = !upper ? miscRegs[lower] : ((miscRegs[lower] & mask(32))
453 |(miscRegs[upper] << 32));
454 if (val & reg.res0()) {
455 DPRINTF(MiscRegs, "Reading MiscReg %s with set res0 bits: %#x\n",
456 miscRegName[misc_reg], val & reg.res0());
457 }
458 if ((val & reg.res1()) != reg.res1()) {
459 DPRINTF(MiscRegs, "Reading MiscReg %s with clear res1 bits: %#x\n",
460 miscRegName[misc_reg], (val & reg.res1()) ^ reg.res1());
461 }
462 return (val & ~reg.raz()) | reg.rao(); // enforce raz/rao
463 }
464
465
466 RegVal
467 ISA::readMiscReg(int misc_reg, ThreadContext *tc)
468 {
469 CPSR cpsr = 0;
470 PCState pc = 0;
471 SCR scr = 0;
472
473 if (misc_reg == MISCREG_CPSR) {
474 cpsr = miscRegs[misc_reg];
475 pc = tc->pcState();
476 cpsr.j = pc.jazelle() ? 1 : 0;
477 cpsr.t = pc.thumb() ? 1 : 0;
478 return cpsr;
479 }
480
481 #ifndef NDEBUG
482 if (!miscRegInfo[misc_reg][MISCREG_IMPLEMENTED]) {
483 if (miscRegInfo[misc_reg][MISCREG_WARN_NOT_FAIL])
484 warn("Unimplemented system register %s read.\n",
485 miscRegName[misc_reg]);
486 else
487 panic("Unimplemented system register %s read.\n",
488 miscRegName[misc_reg]);
489 }
490 #endif
491
492 switch (unflattenMiscReg(misc_reg)) {
493 case MISCREG_HCR:
494 case MISCREG_HCR2:
495 if (!haveVirtualization)
496 return 0;
497 break;
498 case MISCREG_CPACR:
499 {
500 const uint32_t ones = (uint32_t)(-1);
501 CPACR cpacrMask = 0;
502 // Only cp10, cp11, and ase are implemented, nothing else should
503 // be readable? (straight copy from the write code)
504 cpacrMask.cp10 = ones;
505 cpacrMask.cp11 = ones;
506 cpacrMask.asedis = ones;
507
508 // Security Extensions may limit the readability of CPACR
509 if (haveSecurity) {
510 scr = readMiscRegNoEffect(MISCREG_SCR);
511 cpsr = readMiscRegNoEffect(MISCREG_CPSR);
512 if (scr.ns && (cpsr.mode != MODE_MON) && ELIs32(tc, EL3)) {
513 NSACR nsacr = readMiscRegNoEffect(MISCREG_NSACR);
514 // NB: Skipping the full loop, here
515 if (!nsacr.cp10) cpacrMask.cp10 = 0;
516 if (!nsacr.cp11) cpacrMask.cp11 = 0;
517 }
518 }
519 RegVal val = readMiscRegNoEffect(MISCREG_CPACR);
520 val &= cpacrMask;
521 DPRINTF(MiscRegs, "Reading misc reg %s: %#x\n",
522 miscRegName[misc_reg], val);
523 return val;
524 }
525 case MISCREG_MPIDR:
526 case MISCREG_MPIDR_EL1:
527 return readMPIDR(system, tc);
528 case MISCREG_VMPIDR:
529 case MISCREG_VMPIDR_EL2:
530 // top bit defined as RES1
531 return readMiscRegNoEffect(misc_reg) | 0x80000000;
532 case MISCREG_ID_AFR0: // not implemented, so alias MIDR
533 case MISCREG_REVIDR: // not implemented, so alias MIDR
534 case MISCREG_MIDR:
535 cpsr = readMiscRegNoEffect(MISCREG_CPSR);
536 scr = readMiscRegNoEffect(MISCREG_SCR);
537 if ((cpsr.mode == MODE_HYP) || inSecureState(scr, cpsr)) {
538 return readMiscRegNoEffect(misc_reg);
539 } else {
540 return readMiscRegNoEffect(MISCREG_VPIDR);
541 }
542 break;
543 case MISCREG_JOSCR: // Jazelle trivial implementation, RAZ/WI
544 case MISCREG_JMCR: // Jazelle trivial implementation, RAZ/WI
545 case MISCREG_JIDR: // Jazelle trivial implementation, RAZ/WI
546 case MISCREG_AIDR: // AUX ID set to 0
547 case MISCREG_TCMTR: // No TCM's
548 return 0;
549
550 case MISCREG_CLIDR:
551 warn_once("The clidr register always reports 0 caches.\n");
552 warn_once("clidr LoUIS field of 0b001 to match current "
553 "ARM implementations.\n");
554 return 0x00200000;
555 case MISCREG_CCSIDR:
556 warn_once("The ccsidr register isn't implemented and "
557 "always reads as 0.\n");
558 break;
559 case MISCREG_CTR: // AArch32, ARMv7, top bit set
560 case MISCREG_CTR_EL0: // AArch64
561 {
562 //all caches have the same line size in gem5
563 //4 byte words in ARM
564 unsigned lineSizeWords =
565 tc->getSystemPtr()->cacheLineSize() / 4;
566 unsigned log2LineSizeWords = 0;
567
568 while (lineSizeWords >>= 1) {
569 ++log2LineSizeWords;
570 }
571
572 CTR ctr = 0;
573 //log2 of minimun i-cache line size (words)
574 ctr.iCacheLineSize = log2LineSizeWords;
575 //b11 - gem5 uses pipt
576 ctr.l1IndexPolicy = 0x3;
577 //log2 of minimum d-cache line size (words)
578 ctr.dCacheLineSize = log2LineSizeWords;
579 //log2 of max reservation size (words)
580 ctr.erg = log2LineSizeWords;
581 //log2 of max writeback size (words)
582 ctr.cwg = log2LineSizeWords;
583 //b100 - gem5 format is ARMv7
584 ctr.format = 0x4;
585
586 return ctr;
587 }
588 case MISCREG_ACTLR:
589 warn("Not doing anything for miscreg ACTLR\n");
590 break;
591
592 case MISCREG_PMXEVTYPER_PMCCFILTR:
593 case MISCREG_PMINTENSET_EL1 ... MISCREG_PMOVSSET_EL0:
594 case MISCREG_PMEVCNTR0_EL0 ... MISCREG_PMEVTYPER5_EL0:
595 case MISCREG_PMCR ... MISCREG_PMOVSSET:
596 return pmu->readMiscReg(misc_reg);
597
598 case MISCREG_CPSR_Q:
599 panic("shouldn't be reading this register seperately\n");
600 case MISCREG_FPSCR_QC:
601 return readMiscRegNoEffect(MISCREG_FPSCR) & ~FpscrQcMask;
602 case MISCREG_FPSCR_EXC:
603 return readMiscRegNoEffect(MISCREG_FPSCR) & ~FpscrExcMask;
604 case MISCREG_FPSR:
605 {
606 const uint32_t ones = (uint32_t)(-1);
607 FPSCR fpscrMask = 0;
608 fpscrMask.ioc = ones;
609 fpscrMask.dzc = ones;
610 fpscrMask.ofc = ones;
611 fpscrMask.ufc = ones;
612 fpscrMask.ixc = ones;
613 fpscrMask.idc = ones;
614 fpscrMask.qc = ones;
615 fpscrMask.v = ones;
616 fpscrMask.c = ones;
617 fpscrMask.z = ones;
618 fpscrMask.n = ones;
619 return readMiscRegNoEffect(MISCREG_FPSCR) & (uint32_t)fpscrMask;
620 }
621 case MISCREG_FPCR:
622 {
623 const uint32_t ones = (uint32_t)(-1);
624 FPSCR fpscrMask = 0;
625 fpscrMask.len = ones;
626 fpscrMask.fz16 = ones;
627 fpscrMask.stride = ones;
628 fpscrMask.rMode = ones;
629 fpscrMask.fz = ones;
630 fpscrMask.dn = ones;
631 fpscrMask.ahp = ones;
632 return readMiscRegNoEffect(MISCREG_FPSCR) & (uint32_t)fpscrMask;
633 }
634 case MISCREG_NZCV:
635 {
636 CPSR cpsr = 0;
637 cpsr.nz = tc->readCCReg(CCREG_NZ);
638 cpsr.c = tc->readCCReg(CCREG_C);
639 cpsr.v = tc->readCCReg(CCREG_V);
640 return cpsr;
641 }
642 case MISCREG_DAIF:
643 {
644 CPSR cpsr = 0;
645 cpsr.daif = (uint8_t) ((CPSR) miscRegs[MISCREG_CPSR]).daif;
646 return cpsr;
647 }
648 case MISCREG_SP_EL0:
649 {
650 return tc->readIntReg(INTREG_SP0);
651 }
652 case MISCREG_SP_EL1:
653 {
654 return tc->readIntReg(INTREG_SP1);
655 }
656 case MISCREG_SP_EL2:
657 {
658 return tc->readIntReg(INTREG_SP2);
659 }
660 case MISCREG_SPSEL:
661 {
662 return miscRegs[MISCREG_CPSR] & 0x1;
663 }
664 case MISCREG_CURRENTEL:
665 {
666 return miscRegs[MISCREG_CPSR] & 0xc;
667 }
668 case MISCREG_PAN:
669 {
670 return miscRegs[MISCREG_CPSR] & 0x400000;
671 }
672 case MISCREG_L2CTLR:
673 {
674 // mostly unimplemented, just set NumCPUs field from sim and return
675 L2CTLR l2ctlr = 0;
676 // b00:1CPU to b11:4CPUs
677 l2ctlr.numCPUs = tc->getSystemPtr()->numContexts() - 1;
678 return l2ctlr;
679 }
680 case MISCREG_DBGDIDR:
681 /* For now just implement the version number.
682 * ARMv7, v7.1 Debug architecture (0b0101 --> 0x5)
683 */
684 return 0x5 << 16;
685 case MISCREG_DBGDSCRint:
686 return 0;
687 case MISCREG_ISR:
688 {
689 auto ic = dynamic_cast<ArmISA::Interrupts *>(
690 tc->getCpuPtr()->getInterruptController(tc->threadId()));
691 return ic->getISR(
692 readMiscRegNoEffect(MISCREG_HCR),
693 readMiscRegNoEffect(MISCREG_CPSR),
694 readMiscRegNoEffect(MISCREG_SCR));
695 }
696 case MISCREG_ISR_EL1:
697 {
698 auto ic = dynamic_cast<ArmISA::Interrupts *>(
699 tc->getCpuPtr()->getInterruptController(tc->threadId()));
700 return ic->getISR(
701 readMiscRegNoEffect(MISCREG_HCR_EL2),
702 readMiscRegNoEffect(MISCREG_CPSR),
703 readMiscRegNoEffect(MISCREG_SCR_EL3));
704 }
705 case MISCREG_DCZID_EL0:
706 return 0x04; // DC ZVA clear 64-byte chunks
707 case MISCREG_HCPTR:
708 {
709 RegVal val = readMiscRegNoEffect(misc_reg);
710 // The trap bit associated with CP14 is defined as RAZ
711 val &= ~(1 << 14);
712 // If a CP bit in NSACR is 0 then the corresponding bit in
713 // HCPTR is RAO/WI
714 bool secure_lookup = haveSecurity &&
715 inSecureState(readMiscRegNoEffect(MISCREG_SCR),
716 readMiscRegNoEffect(MISCREG_CPSR));
717 if (!secure_lookup) {
718 RegVal mask = readMiscRegNoEffect(MISCREG_NSACR);
719 val |= (mask ^ 0x7FFF) & 0xBFFF;
720 }
721 // Set the bits for unimplemented coprocessors to RAO/WI
722 val |= 0x33FF;
723 return (val);
724 }
725 case MISCREG_HDFAR: // alias for secure DFAR
726 return readMiscRegNoEffect(MISCREG_DFAR_S);
727 case MISCREG_HIFAR: // alias for secure IFAR
728 return readMiscRegNoEffect(MISCREG_IFAR_S);
729
730 case MISCREG_ID_PFR0:
731 // !ThumbEE | !Jazelle | Thumb | ARM
732 return 0x00000031;
733 case MISCREG_ID_PFR1:
734 { // Timer | Virti | !M Profile | TrustZone | ARMv4
735 bool haveTimer = (system->getGenericTimer() != NULL);
736 return 0x00000001
737 | (haveSecurity ? 0x00000010 : 0x0)
738 | (haveVirtualization ? 0x00001000 : 0x0)
739 | (haveTimer ? 0x00010000 : 0x0);
740 }
741 case MISCREG_ID_AA64PFR0_EL1:
742 return 0x0000000000000002 | // AArch{64,32} supported at EL0
743 0x0000000000000020 | // EL1
744 (haveVirtualization ? 0x0000000000000200 : 0) | // EL2
745 (haveSecurity ? 0x0000000000002000 : 0) | // EL3
746 (haveSVE ? 0x0000000100000000 : 0) | // SVE
747 (haveGICv3CPUInterface ? 0x0000000001000000 : 0);
748 case MISCREG_ID_AA64PFR1_EL1:
749 return 0; // bits [63:0] RES0 (reserved for future use)
750
751 // Generic Timer registers
752 case MISCREG_CNTFRQ ... MISCREG_CNTVOFF:
753 case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2:
754 return getGenericTimer(tc).readMiscReg(misc_reg);
755
756 case MISCREG_ICC_AP0R0 ... MISCREG_ICH_LRC15:
757 case MISCREG_ICC_PMR_EL1 ... MISCREG_ICC_IGRPEN1_EL3:
758 case MISCREG_ICH_AP0R0_EL2 ... MISCREG_ICH_LR15_EL2:
759 return getGICv3CPUInterface(tc).readMiscReg(misc_reg);
760
761 default:
762 break;
763
764 }
765 return readMiscRegNoEffect(misc_reg);
766 }
767
768 void
769 ISA::setMiscRegNoEffect(int misc_reg, RegVal val)
770 {
771 assert(misc_reg < NumMiscRegs);
772
773 const auto &reg = lookUpMiscReg[misc_reg]; // bit masks
774 const auto &map = getMiscIndices(misc_reg);
775 int lower = map.first, upper = map.second;
776
777 auto v = (val & ~reg.wi()) | reg.rao();
778 if (upper > 0) {
779 miscRegs[lower] = bits(v, 31, 0);
780 miscRegs[upper] = bits(v, 63, 32);
781 DPRINTF(MiscRegs, "Writing to misc reg %d (%d:%d) : %#x\n",
782 misc_reg, lower, upper, v);
783 } else {
784 miscRegs[lower] = v;
785 DPRINTF(MiscRegs, "Writing to misc reg %d (%d) : %#x\n",
786 misc_reg, lower, v);
787 }
788 }
789
790 void
791 ISA::setMiscReg(int misc_reg, RegVal val, ThreadContext *tc)
792 {
793
794 RegVal newVal = val;
795 bool secure_lookup;
796 SCR scr;
797
798 if (misc_reg == MISCREG_CPSR) {
799 updateRegMap(val);
800
801
802 CPSR old_cpsr = miscRegs[MISCREG_CPSR];
803 int old_mode = old_cpsr.mode;
804 CPSR cpsr = val;
805 if (old_mode != cpsr.mode || cpsr.il != old_cpsr.il) {
806 getITBPtr(tc)->invalidateMiscReg();
807 getDTBPtr(tc)->invalidateMiscReg();
808 }
809
810 if (cpsr.pan != old_cpsr.pan) {
811 getDTBPtr(tc)->invalidateMiscReg();
812 }
813
814 DPRINTF(Arm, "Updating CPSR from %#x to %#x f:%d i:%d a:%d mode:%#x\n",
815 miscRegs[misc_reg], cpsr, cpsr.f, cpsr.i, cpsr.a, cpsr.mode);
816 PCState pc = tc->pcState();
817 pc.nextThumb(cpsr.t);
818 pc.nextJazelle(cpsr.j);
819 pc.illegalExec(cpsr.il == 1);
820
821 tc->getDecoderPtr()->setSveLen((getCurSveVecLenInBits(tc) >> 7) - 1);
822
823 // Follow slightly different semantics if a CheckerCPU object
824 // is connected
825 CheckerCPU *checker = tc->getCheckerCpuPtr();
826 if (checker) {
827 tc->pcStateNoRecord(pc);
828 } else {
829 tc->pcState(pc);
830 }
831 } else {
832 #ifndef NDEBUG
833 if (!miscRegInfo[misc_reg][MISCREG_IMPLEMENTED]) {
834 if (miscRegInfo[misc_reg][MISCREG_WARN_NOT_FAIL])
835 warn("Unimplemented system register %s write with %#x.\n",
836 miscRegName[misc_reg], val);
837 else
838 panic("Unimplemented system register %s write with %#x.\n",
839 miscRegName[misc_reg], val);
840 }
841 #endif
842 switch (unflattenMiscReg(misc_reg)) {
843 case MISCREG_CPACR:
844 {
845
846 const uint32_t ones = (uint32_t)(-1);
847 CPACR cpacrMask = 0;
848 // Only cp10, cp11, and ase are implemented, nothing else should
849 // be writable
850 cpacrMask.cp10 = ones;
851 cpacrMask.cp11 = ones;
852 cpacrMask.asedis = ones;
853
854 // Security Extensions may limit the writability of CPACR
855 if (haveSecurity) {
856 scr = readMiscRegNoEffect(MISCREG_SCR);
857 CPSR cpsr = readMiscRegNoEffect(MISCREG_CPSR);
858 if (scr.ns && (cpsr.mode != MODE_MON) && ELIs32(tc, EL3)) {
859 NSACR nsacr = readMiscRegNoEffect(MISCREG_NSACR);
860 // NB: Skipping the full loop, here
861 if (!nsacr.cp10) cpacrMask.cp10 = 0;
862 if (!nsacr.cp11) cpacrMask.cp11 = 0;
863 }
864 }
865
866 RegVal old_val = readMiscRegNoEffect(MISCREG_CPACR);
867 newVal &= cpacrMask;
868 newVal |= old_val & ~cpacrMask;
869 DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n",
870 miscRegName[misc_reg], newVal);
871 }
872 break;
873 case MISCREG_CPACR_EL1:
874 {
875 const uint32_t ones = (uint32_t)(-1);
876 CPACR cpacrMask = 0;
877 cpacrMask.tta = ones;
878 cpacrMask.fpen = ones;
879 if (haveSVE) {
880 cpacrMask.zen = ones;
881 }
882 newVal &= cpacrMask;
883 DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n",
884 miscRegName[misc_reg], newVal);
885 }
886 break;
887 case MISCREG_CPTR_EL2:
888 {
889 const uint32_t ones = (uint32_t)(-1);
890 CPTR cptrMask = 0;
891 cptrMask.tcpac = ones;
892 cptrMask.tta = ones;
893 cptrMask.tfp = ones;
894 if (haveSVE) {
895 cptrMask.tz = ones;
896 }
897 newVal &= cptrMask;
898 cptrMask = 0;
899 cptrMask.res1_13_12_el2 = ones;
900 cptrMask.res1_7_0_el2 = ones;
901 if (!haveSVE) {
902 cptrMask.res1_8_el2 = ones;
903 }
904 cptrMask.res1_9_el2 = ones;
905 newVal |= cptrMask;
906 DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n",
907 miscRegName[misc_reg], newVal);
908 }
909 break;
910 case MISCREG_CPTR_EL3:
911 {
912 const uint32_t ones = (uint32_t)(-1);
913 CPTR cptrMask = 0;
914 cptrMask.tcpac = ones;
915 cptrMask.tta = ones;
916 cptrMask.tfp = ones;
917 if (haveSVE) {
918 cptrMask.ez = ones;
919 }
920 newVal &= cptrMask;
921 DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n",
922 miscRegName[misc_reg], newVal);
923 }
924 break;
925 case MISCREG_CSSELR:
926 warn_once("The csselr register isn't implemented.\n");
927 return;
928
929 case MISCREG_DC_ZVA_Xt:
930 warn("Calling DC ZVA! Not Implemeted! Expect WEIRD results\n");
931 return;
932
933 case MISCREG_FPSCR:
934 {
935 const uint32_t ones = (uint32_t)(-1);
936 FPSCR fpscrMask = 0;
937 fpscrMask.ioc = ones;
938 fpscrMask.dzc = ones;
939 fpscrMask.ofc = ones;
940 fpscrMask.ufc = ones;
941 fpscrMask.ixc = ones;
942 fpscrMask.idc = ones;
943 fpscrMask.ioe = ones;
944 fpscrMask.dze = ones;
945 fpscrMask.ofe = ones;
946 fpscrMask.ufe = ones;
947 fpscrMask.ixe = ones;
948 fpscrMask.ide = ones;
949 fpscrMask.len = ones;
950 fpscrMask.fz16 = ones;
951 fpscrMask.stride = ones;
952 fpscrMask.rMode = ones;
953 fpscrMask.fz = ones;
954 fpscrMask.dn = ones;
955 fpscrMask.ahp = ones;
956 fpscrMask.qc = ones;
957 fpscrMask.v = ones;
958 fpscrMask.c = ones;
959 fpscrMask.z = ones;
960 fpscrMask.n = ones;
961 newVal = (newVal & (uint32_t)fpscrMask) |
962 (readMiscRegNoEffect(MISCREG_FPSCR) &
963 ~(uint32_t)fpscrMask);
964 tc->getDecoderPtr()->setContext(newVal);
965 }
966 break;
967 case MISCREG_FPSR:
968 {
969 const uint32_t ones = (uint32_t)(-1);
970 FPSCR fpscrMask = 0;
971 fpscrMask.ioc = ones;
972 fpscrMask.dzc = ones;
973 fpscrMask.ofc = ones;
974 fpscrMask.ufc = ones;
975 fpscrMask.ixc = ones;
976 fpscrMask.idc = ones;
977 fpscrMask.qc = ones;
978 fpscrMask.v = ones;
979 fpscrMask.c = ones;
980 fpscrMask.z = ones;
981 fpscrMask.n = ones;
982 newVal = (newVal & (uint32_t)fpscrMask) |
983 (readMiscRegNoEffect(MISCREG_FPSCR) &
984 ~(uint32_t)fpscrMask);
985 misc_reg = MISCREG_FPSCR;
986 }
987 break;
988 case MISCREG_FPCR:
989 {
990 const uint32_t ones = (uint32_t)(-1);
991 FPSCR fpscrMask = 0;
992 fpscrMask.len = ones;
993 fpscrMask.fz16 = ones;
994 fpscrMask.stride = ones;
995 fpscrMask.rMode = ones;
996 fpscrMask.fz = ones;
997 fpscrMask.dn = ones;
998 fpscrMask.ahp = ones;
999 newVal = (newVal & (uint32_t)fpscrMask) |
1000 (readMiscRegNoEffect(MISCREG_FPSCR) &
1001 ~(uint32_t)fpscrMask);
1002 misc_reg = MISCREG_FPSCR;
1003 }
1004 break;
1005 case MISCREG_CPSR_Q:
1006 {
1007 assert(!(newVal & ~CpsrMaskQ));
1008 newVal = readMiscRegNoEffect(MISCREG_CPSR) | newVal;
1009 misc_reg = MISCREG_CPSR;
1010 }
1011 break;
1012 case MISCREG_FPSCR_QC:
1013 {
1014 newVal = readMiscRegNoEffect(MISCREG_FPSCR) |
1015 (newVal & FpscrQcMask);
1016 misc_reg = MISCREG_FPSCR;
1017 }
1018 break;
1019 case MISCREG_FPSCR_EXC:
1020 {
1021 newVal = readMiscRegNoEffect(MISCREG_FPSCR) |
1022 (newVal & FpscrExcMask);
1023 misc_reg = MISCREG_FPSCR;
1024 }
1025 break;
1026 case MISCREG_FPEXC:
1027 {
1028 // vfpv3 architecture, section B.6.1 of DDI04068
1029 // bit 29 - valid only if fpexc[31] is 0
1030 const uint32_t fpexcMask = 0x60000000;
1031 newVal = (newVal & fpexcMask) |
1032 (readMiscRegNoEffect(MISCREG_FPEXC) & ~fpexcMask);
1033 }
1034 break;
1035 case MISCREG_HCR:
1036 case MISCREG_HCR2:
1037 if (!haveVirtualization)
1038 return;
1039 break;
1040 case MISCREG_IFSR:
1041 {
1042 // ARM ARM (ARM DDI 0406C.b) B4.1.96
1043 const uint32_t ifsrMask =
1044 mask(31, 13) | mask(11, 11) | mask(8, 6);
1045 newVal = newVal & ~ifsrMask;
1046 }
1047 break;
1048 case MISCREG_DFSR:
1049 {
1050 // ARM ARM (ARM DDI 0406C.b) B4.1.52
1051 const uint32_t dfsrMask = mask(31, 14) | mask(8, 8);
1052 newVal = newVal & ~dfsrMask;
1053 }
1054 break;
1055 case MISCREG_AMAIR0:
1056 case MISCREG_AMAIR1:
1057 {
1058 // ARM ARM (ARM DDI 0406C.b) B4.1.5
1059 // Valid only with LPAE
1060 if (!haveLPAE)
1061 return;
1062 DPRINTF(MiscRegs, "Writing AMAIR: %#x\n", newVal);
1063 }
1064 break;
1065 case MISCREG_SCR:
1066 getITBPtr(tc)->invalidateMiscReg();
1067 getDTBPtr(tc)->invalidateMiscReg();
1068 break;
1069 case MISCREG_SCTLR:
1070 {
1071 DPRINTF(MiscRegs, "Writing SCTLR: %#x\n", newVal);
1072 scr = readMiscRegNoEffect(MISCREG_SCR);
1073
1074 MiscRegIndex sctlr_idx;
1075 if (haveSecurity && !highestELIs64 && !scr.ns) {
1076 sctlr_idx = MISCREG_SCTLR_S;
1077 } else {
1078 sctlr_idx = MISCREG_SCTLR_NS;
1079 }
1080
1081 SCTLR sctlr = miscRegs[sctlr_idx];
1082 SCTLR new_sctlr = newVal;
1083 new_sctlr.nmfi = ((bool)sctlr.nmfi) && !haveVirtualization;
1084 miscRegs[sctlr_idx] = (RegVal)new_sctlr;
1085 getITBPtr(tc)->invalidateMiscReg();
1086 getDTBPtr(tc)->invalidateMiscReg();
1087 }
1088 case MISCREG_MIDR:
1089 case MISCREG_ID_PFR0:
1090 case MISCREG_ID_PFR1:
1091 case MISCREG_ID_DFR0:
1092 case MISCREG_ID_MMFR0:
1093 case MISCREG_ID_MMFR1:
1094 case MISCREG_ID_MMFR2:
1095 case MISCREG_ID_MMFR3:
1096 case MISCREG_ID_ISAR0:
1097 case MISCREG_ID_ISAR1:
1098 case MISCREG_ID_ISAR2:
1099 case MISCREG_ID_ISAR3:
1100 case MISCREG_ID_ISAR4:
1101 case MISCREG_ID_ISAR5:
1102
1103 case MISCREG_MPIDR:
1104 case MISCREG_FPSID:
1105 case MISCREG_TLBTR:
1106 case MISCREG_MVFR0:
1107 case MISCREG_MVFR1:
1108
1109 case MISCREG_ID_AA64AFR0_EL1:
1110 case MISCREG_ID_AA64AFR1_EL1:
1111 case MISCREG_ID_AA64DFR0_EL1:
1112 case MISCREG_ID_AA64DFR1_EL1:
1113 case MISCREG_ID_AA64ISAR0_EL1:
1114 case MISCREG_ID_AA64ISAR1_EL1:
1115 case MISCREG_ID_AA64MMFR0_EL1:
1116 case MISCREG_ID_AA64MMFR1_EL1:
1117 case MISCREG_ID_AA64MMFR2_EL1:
1118 case MISCREG_ID_AA64PFR0_EL1:
1119 case MISCREG_ID_AA64PFR1_EL1:
1120 // ID registers are constants.
1121 return;
1122
1123 // TLB Invalidate All
1124 case MISCREG_TLBIALL: // TLBI all entries, EL0&1,
1125 {
1126 assert32(tc);
1127 scr = readMiscReg(MISCREG_SCR, tc);
1128
1129 TLBIALL tlbiOp(EL1, haveSecurity && !scr.ns);
1130 tlbiOp(tc);
1131 return;
1132 }
1133 // TLB Invalidate All, Inner Shareable
1134 case MISCREG_TLBIALLIS:
1135 {
1136 assert32(tc);
1137 scr = readMiscReg(MISCREG_SCR, tc);
1138
1139 TLBIALL tlbiOp(EL1, haveSecurity && !scr.ns);
1140 tlbiOp.broadcast(tc);
1141 return;
1142 }
1143 // Instruction TLB Invalidate All
1144 case MISCREG_ITLBIALL:
1145 {
1146 assert32(tc);
1147 scr = readMiscReg(MISCREG_SCR, tc);
1148
1149 ITLBIALL tlbiOp(EL1, haveSecurity && !scr.ns);
1150 tlbiOp(tc);
1151 return;
1152 }
1153 // Data TLB Invalidate All
1154 case MISCREG_DTLBIALL:
1155 {
1156 assert32(tc);
1157 scr = readMiscReg(MISCREG_SCR, tc);
1158
1159 DTLBIALL tlbiOp(EL1, haveSecurity && !scr.ns);
1160 tlbiOp(tc);
1161 return;
1162 }
1163 // TLB Invalidate by VA
1164 // mcr tlbimval(is) is invalidating all matching entries
1165 // regardless of the level of lookup, since in gem5 we cache
1166 // in the tlb the last level of lookup only.
1167 case MISCREG_TLBIMVA:
1168 case MISCREG_TLBIMVAL:
1169 {
1170 assert32(tc);
1171 scr = readMiscReg(MISCREG_SCR, tc);
1172
1173 TLBIMVA tlbiOp(EL1,
1174 haveSecurity && !scr.ns,
1175 mbits(newVal, 31, 12),
1176 bits(newVal, 7,0));
1177
1178 tlbiOp(tc);
1179 return;
1180 }
1181 // TLB Invalidate by VA, Inner Shareable
1182 case MISCREG_TLBIMVAIS:
1183 case MISCREG_TLBIMVALIS:
1184 {
1185 assert32(tc);
1186 scr = readMiscReg(MISCREG_SCR, tc);
1187
1188 TLBIMVA tlbiOp(EL1,
1189 haveSecurity && !scr.ns,
1190 mbits(newVal, 31, 12),
1191 bits(newVal, 7,0));
1192
1193 tlbiOp.broadcast(tc);
1194 return;
1195 }
1196 // TLB Invalidate by ASID match
1197 case MISCREG_TLBIASID:
1198 {
1199 assert32(tc);
1200 scr = readMiscReg(MISCREG_SCR, tc);
1201
1202 TLBIASID tlbiOp(EL1,
1203 haveSecurity && !scr.ns,
1204 bits(newVal, 7,0));
1205
1206 tlbiOp(tc);
1207 return;
1208 }
1209 // TLB Invalidate by ASID match, Inner Shareable
1210 case MISCREG_TLBIASIDIS:
1211 {
1212 assert32(tc);
1213 scr = readMiscReg(MISCREG_SCR, tc);
1214
1215 TLBIASID tlbiOp(EL1,
1216 haveSecurity && !scr.ns,
1217 bits(newVal, 7,0));
1218
1219 tlbiOp.broadcast(tc);
1220 return;
1221 }
1222 // mcr tlbimvaal(is) is invalidating all matching entries
1223 // regardless of the level of lookup, since in gem5 we cache
1224 // in the tlb the last level of lookup only.
1225 // TLB Invalidate by VA, All ASID
1226 case MISCREG_TLBIMVAA:
1227 case MISCREG_TLBIMVAAL:
1228 {
1229 assert32(tc);
1230 scr = readMiscReg(MISCREG_SCR, tc);
1231
1232 TLBIMVAA tlbiOp(EL1, haveSecurity && !scr.ns,
1233 mbits(newVal, 31,12));
1234
1235 tlbiOp(tc);
1236 return;
1237 }
1238 // TLB Invalidate by VA, All ASID, Inner Shareable
1239 case MISCREG_TLBIMVAAIS:
1240 case MISCREG_TLBIMVAALIS:
1241 {
1242 assert32(tc);
1243 scr = readMiscReg(MISCREG_SCR, tc);
1244
1245 TLBIMVAA tlbiOp(EL1, haveSecurity && !scr.ns,
1246 mbits(newVal, 31,12));
1247
1248 tlbiOp.broadcast(tc);
1249 return;
1250 }
1251 // mcr tlbimvalh(is) is invalidating all matching entries
1252 // regardless of the level of lookup, since in gem5 we cache
1253 // in the tlb the last level of lookup only.
1254 // TLB Invalidate by VA, Hyp mode
1255 case MISCREG_TLBIMVAH:
1256 case MISCREG_TLBIMVALH:
1257 {
1258 assert32(tc);
1259 scr = readMiscReg(MISCREG_SCR, tc);
1260
1261 TLBIMVAA tlbiOp(EL2, haveSecurity && !scr.ns,
1262 mbits(newVal, 31,12));
1263
1264 tlbiOp(tc);
1265 return;
1266 }
1267 // TLB Invalidate by VA, Hyp mode, Inner Shareable
1268 case MISCREG_TLBIMVAHIS:
1269 case MISCREG_TLBIMVALHIS:
1270 {
1271 assert32(tc);
1272 scr = readMiscReg(MISCREG_SCR, tc);
1273
1274 TLBIMVAA tlbiOp(EL2, haveSecurity && !scr.ns,
1275 mbits(newVal, 31,12));
1276
1277 tlbiOp.broadcast(tc);
1278 return;
1279 }
1280 // mcr tlbiipas2l(is) is invalidating all matching entries
1281 // regardless of the level of lookup, since in gem5 we cache
1282 // in the tlb the last level of lookup only.
1283 // TLB Invalidate by Intermediate Physical Address, Stage 2
1284 case MISCREG_TLBIIPAS2:
1285 case MISCREG_TLBIIPAS2L:
1286 {
1287 assert32(tc);
1288 scr = readMiscReg(MISCREG_SCR, tc);
1289
1290 TLBIIPA tlbiOp(EL1,
1291 haveSecurity && !scr.ns,
1292 static_cast<Addr>(bits(newVal, 35, 0)) << 12);
1293
1294 tlbiOp(tc);
1295 return;
1296 }
1297 // TLB Invalidate by Intermediate Physical Address, Stage 2,
1298 // Inner Shareable
1299 case MISCREG_TLBIIPAS2IS:
1300 case MISCREG_TLBIIPAS2LIS:
1301 {
1302 assert32(tc);
1303 scr = readMiscReg(MISCREG_SCR, tc);
1304
1305 TLBIIPA tlbiOp(EL1,
1306 haveSecurity && !scr.ns,
1307 static_cast<Addr>(bits(newVal, 35, 0)) << 12);
1308
1309 tlbiOp.broadcast(tc);
1310 return;
1311 }
1312 // Instruction TLB Invalidate by VA
1313 case MISCREG_ITLBIMVA:
1314 {
1315 assert32(tc);
1316 scr = readMiscReg(MISCREG_SCR, tc);
1317
1318 ITLBIMVA tlbiOp(EL1,
1319 haveSecurity && !scr.ns,
1320 mbits(newVal, 31, 12),
1321 bits(newVal, 7,0));
1322
1323 tlbiOp(tc);
1324 return;
1325 }
1326 // Data TLB Invalidate by VA
1327 case MISCREG_DTLBIMVA:
1328 {
1329 assert32(tc);
1330 scr = readMiscReg(MISCREG_SCR, tc);
1331
1332 DTLBIMVA tlbiOp(EL1,
1333 haveSecurity && !scr.ns,
1334 mbits(newVal, 31, 12),
1335 bits(newVal, 7,0));
1336
1337 tlbiOp(tc);
1338 return;
1339 }
1340 // Instruction TLB Invalidate by ASID match
1341 case MISCREG_ITLBIASID:
1342 {
1343 assert32(tc);
1344 scr = readMiscReg(MISCREG_SCR, tc);
1345
1346 ITLBIASID tlbiOp(EL1,
1347 haveSecurity && !scr.ns,
1348 bits(newVal, 7,0));
1349
1350 tlbiOp(tc);
1351 return;
1352 }
1353 // Data TLB Invalidate by ASID match
1354 case MISCREG_DTLBIASID:
1355 {
1356 assert32(tc);
1357 scr = readMiscReg(MISCREG_SCR, tc);
1358
1359 DTLBIASID tlbiOp(EL1,
1360 haveSecurity && !scr.ns,
1361 bits(newVal, 7,0));
1362
1363 tlbiOp(tc);
1364 return;
1365 }
1366 // TLB Invalidate All, Non-Secure Non-Hyp
1367 case MISCREG_TLBIALLNSNH:
1368 {
1369 assert32(tc);
1370
1371 TLBIALLN tlbiOp(EL1);
1372 tlbiOp(tc);
1373 return;
1374 }
1375 // TLB Invalidate All, Non-Secure Non-Hyp, Inner Shareable
1376 case MISCREG_TLBIALLNSNHIS:
1377 {
1378 assert32(tc);
1379
1380 TLBIALLN tlbiOp(EL1);
1381 tlbiOp.broadcast(tc);
1382 return;
1383 }
1384 // TLB Invalidate All, Hyp mode
1385 case MISCREG_TLBIALLH:
1386 {
1387 assert32(tc);
1388
1389 TLBIALLN tlbiOp(EL2);
1390 tlbiOp(tc);
1391 return;
1392 }
1393 // TLB Invalidate All, Hyp mode, Inner Shareable
1394 case MISCREG_TLBIALLHIS:
1395 {
1396 assert32(tc);
1397
1398 TLBIALLN tlbiOp(EL2);
1399 tlbiOp.broadcast(tc);
1400 return;
1401 }
1402 // AArch64 TLB Invalidate All, EL3
1403 case MISCREG_TLBI_ALLE3:
1404 {
1405 assert64(tc);
1406
1407 TLBIALL tlbiOp(EL3, true);
1408 tlbiOp(tc);
1409 return;
1410 }
1411 // AArch64 TLB Invalidate All, EL3, Inner Shareable
1412 case MISCREG_TLBI_ALLE3IS:
1413 {
1414 assert64(tc);
1415
1416 TLBIALL tlbiOp(EL3, true);
1417 tlbiOp.broadcast(tc);
1418 return;
1419 }
1420 // AArch64 TLB Invalidate All, EL2, Inner Shareable
1421 case MISCREG_TLBI_ALLE2:
1422 case MISCREG_TLBI_ALLE2IS:
1423 {
1424 assert64(tc);
1425 scr = readMiscReg(MISCREG_SCR, tc);
1426
1427 TLBIALL tlbiOp(EL2, haveSecurity && !scr.ns);
1428 tlbiOp(tc);
1429 return;
1430 }
1431 // AArch64 TLB Invalidate All, EL1
1432 case MISCREG_TLBI_ALLE1:
1433 case MISCREG_TLBI_VMALLE1:
1434 case MISCREG_TLBI_VMALLS12E1:
1435 // @todo: handle VMID and stage 2 to enable Virtualization
1436 {
1437 assert64(tc);
1438 scr = readMiscReg(MISCREG_SCR, tc);
1439
1440 TLBIALL tlbiOp(EL1, haveSecurity && !scr.ns);
1441 tlbiOp(tc);
1442 return;
1443 }
1444 // AArch64 TLB Invalidate All, EL1, Inner Shareable
1445 case MISCREG_TLBI_ALLE1IS:
1446 case MISCREG_TLBI_VMALLE1IS:
1447 case MISCREG_TLBI_VMALLS12E1IS:
1448 // @todo: handle VMID and stage 2 to enable Virtualization
1449 {
1450 assert64(tc);
1451 scr = readMiscReg(MISCREG_SCR, tc);
1452
1453 TLBIALL tlbiOp(EL1, haveSecurity && !scr.ns);
1454 tlbiOp.broadcast(tc);
1455 return;
1456 }
1457 // VAEx(IS) and VALEx(IS) are the same because TLBs
1458 // only store entries
1459 // from the last level of translation table walks
1460 // @todo: handle VMID to enable Virtualization
1461 // AArch64 TLB Invalidate by VA, EL3
1462 case MISCREG_TLBI_VAE3_Xt:
1463 case MISCREG_TLBI_VALE3_Xt:
1464 {
1465 assert64(tc);
1466
1467 TLBIMVA tlbiOp(EL3, true,
1468 static_cast<Addr>(bits(newVal, 43, 0)) << 12,
1469 0xbeef);
1470 tlbiOp(tc);
1471 return;
1472 }
1473 // AArch64 TLB Invalidate by VA, EL3, Inner Shareable
1474 case MISCREG_TLBI_VAE3IS_Xt:
1475 case MISCREG_TLBI_VALE3IS_Xt:
1476 {
1477 assert64(tc);
1478
1479 TLBIMVA tlbiOp(EL3, true,
1480 static_cast<Addr>(bits(newVal, 43, 0)) << 12,
1481 0xbeef);
1482
1483 tlbiOp.broadcast(tc);
1484 return;
1485 }
1486 // AArch64 TLB Invalidate by VA, EL2
1487 case MISCREG_TLBI_VAE2_Xt:
1488 case MISCREG_TLBI_VALE2_Xt:
1489 {
1490 assert64(tc);
1491 scr = readMiscReg(MISCREG_SCR, tc);
1492
1493 TLBIMVA tlbiOp(EL2, haveSecurity && !scr.ns,
1494 static_cast<Addr>(bits(newVal, 43, 0)) << 12,
1495 0xbeef);
1496 tlbiOp(tc);
1497 return;
1498 }
1499 // AArch64 TLB Invalidate by VA, EL2, Inner Shareable
1500 case MISCREG_TLBI_VAE2IS_Xt:
1501 case MISCREG_TLBI_VALE2IS_Xt:
1502 {
1503 assert64(tc);
1504 scr = readMiscReg(MISCREG_SCR, tc);
1505
1506 TLBIMVA tlbiOp(EL2, haveSecurity && !scr.ns,
1507 static_cast<Addr>(bits(newVal, 43, 0)) << 12,
1508 0xbeef);
1509
1510 tlbiOp.broadcast(tc);
1511 return;
1512 }
1513 // AArch64 TLB Invalidate by VA, EL1
1514 case MISCREG_TLBI_VAE1_Xt:
1515 case MISCREG_TLBI_VALE1_Xt:
1516 {
1517 assert64(tc);
1518 scr = readMiscReg(MISCREG_SCR, tc);
1519 auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) :
1520 bits(newVal, 55, 48);
1521
1522 TLBIMVA tlbiOp(EL1, haveSecurity && !scr.ns,
1523 static_cast<Addr>(bits(newVal, 43, 0)) << 12,
1524 asid);
1525
1526 tlbiOp(tc);
1527 return;
1528 }
1529 // AArch64 TLB Invalidate by VA, EL1, Inner Shareable
1530 case MISCREG_TLBI_VAE1IS_Xt:
1531 case MISCREG_TLBI_VALE1IS_Xt:
1532 {
1533 assert64(tc);
1534 scr = readMiscReg(MISCREG_SCR, tc);
1535 auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) :
1536 bits(newVal, 55, 48);
1537
1538 TLBIMVA tlbiOp(EL1, haveSecurity && !scr.ns,
1539 static_cast<Addr>(bits(newVal, 43, 0)) << 12,
1540 asid);
1541
1542 tlbiOp.broadcast(tc);
1543 return;
1544 }
1545 // AArch64 TLB Invalidate by ASID, EL1
1546 // @todo: handle VMID to enable Virtualization
1547 case MISCREG_TLBI_ASIDE1_Xt:
1548 {
1549 assert64(tc);
1550 scr = readMiscReg(MISCREG_SCR, tc);
1551 auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) :
1552 bits(newVal, 55, 48);
1553
1554 TLBIASID tlbiOp(EL1, haveSecurity && !scr.ns, asid);
1555 tlbiOp(tc);
1556 return;
1557 }
1558 // AArch64 TLB Invalidate by ASID, EL1, Inner Shareable
1559 case MISCREG_TLBI_ASIDE1IS_Xt:
1560 {
1561 assert64(tc);
1562 scr = readMiscReg(MISCREG_SCR, tc);
1563 auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) :
1564 bits(newVal, 55, 48);
1565
1566 TLBIASID tlbiOp(EL1, haveSecurity && !scr.ns, asid);
1567 tlbiOp.broadcast(tc);
1568 return;
1569 }
1570 // VAAE1(IS) and VAALE1(IS) are the same because TLBs only store
1571 // entries from the last level of translation table walks
1572 // AArch64 TLB Invalidate by VA, All ASID, EL1
1573 case MISCREG_TLBI_VAAE1_Xt:
1574 case MISCREG_TLBI_VAALE1_Xt:
1575 {
1576 assert64(tc);
1577 scr = readMiscReg(MISCREG_SCR, tc);
1578
1579 TLBIMVAA tlbiOp(EL1, haveSecurity && !scr.ns,
1580 static_cast<Addr>(bits(newVal, 43, 0)) << 12);
1581
1582 tlbiOp(tc);
1583 return;
1584 }
1585 // AArch64 TLB Invalidate by VA, All ASID, EL1, Inner Shareable
1586 case MISCREG_TLBI_VAAE1IS_Xt:
1587 case MISCREG_TLBI_VAALE1IS_Xt:
1588 {
1589 assert64(tc);
1590 scr = readMiscReg(MISCREG_SCR, tc);
1591
1592 TLBIMVAA tlbiOp(EL1, haveSecurity && !scr.ns,
1593 static_cast<Addr>(bits(newVal, 43, 0)) << 12);
1594
1595 tlbiOp.broadcast(tc);
1596 return;
1597 }
1598 // AArch64 TLB Invalidate by Intermediate Physical Address,
1599 // Stage 2, EL1
1600 case MISCREG_TLBI_IPAS2E1_Xt:
1601 case MISCREG_TLBI_IPAS2LE1_Xt:
1602 {
1603 assert64(tc);
1604 scr = readMiscReg(MISCREG_SCR, tc);
1605
1606 TLBIIPA tlbiOp(EL1, haveSecurity && !scr.ns,
1607 static_cast<Addr>(bits(newVal, 35, 0)) << 12);
1608
1609 tlbiOp(tc);
1610 return;
1611 }
1612 // AArch64 TLB Invalidate by Intermediate Physical Address,
1613 // Stage 2, EL1, Inner Shareable
1614 case MISCREG_TLBI_IPAS2E1IS_Xt:
1615 case MISCREG_TLBI_IPAS2LE1IS_Xt:
1616 {
1617 assert64(tc);
1618 scr = readMiscReg(MISCREG_SCR, tc);
1619
1620 TLBIIPA tlbiOp(EL1, haveSecurity && !scr.ns,
1621 static_cast<Addr>(bits(newVal, 35, 0)) << 12);
1622
1623 tlbiOp.broadcast(tc);
1624 return;
1625 }
1626 case MISCREG_ACTLR:
1627 warn("Not doing anything for write of miscreg ACTLR\n");
1628 break;
1629
1630 case MISCREG_PMXEVTYPER_PMCCFILTR:
1631 case MISCREG_PMINTENSET_EL1 ... MISCREG_PMOVSSET_EL0:
1632 case MISCREG_PMEVCNTR0_EL0 ... MISCREG_PMEVTYPER5_EL0:
1633 case MISCREG_PMCR ... MISCREG_PMOVSSET:
1634 pmu->setMiscReg(misc_reg, newVal);
1635 break;
1636
1637
1638 case MISCREG_HSTR: // TJDBX, now redifined to be RES0
1639 {
1640 HSTR hstrMask = 0;
1641 hstrMask.tjdbx = 1;
1642 newVal &= ~((uint32_t) hstrMask);
1643 break;
1644 }
1645 case MISCREG_HCPTR:
1646 {
1647 // If a CP bit in NSACR is 0 then the corresponding bit in
1648 // HCPTR is RAO/WI. Same applies to NSASEDIS
1649 secure_lookup = haveSecurity &&
1650 inSecureState(readMiscRegNoEffect(MISCREG_SCR),
1651 readMiscRegNoEffect(MISCREG_CPSR));
1652 if (!secure_lookup) {
1653 RegVal oldValue = readMiscRegNoEffect(MISCREG_HCPTR);
1654 RegVal mask =
1655 (readMiscRegNoEffect(MISCREG_NSACR) ^ 0x7FFF) & 0xBFFF;
1656 newVal = (newVal & ~mask) | (oldValue & mask);
1657 }
1658 break;
1659 }
1660 case MISCREG_HDFAR: // alias for secure DFAR
1661 misc_reg = MISCREG_DFAR_S;
1662 break;
1663 case MISCREG_HIFAR: // alias for secure IFAR
1664 misc_reg = MISCREG_IFAR_S;
1665 break;
1666 case MISCREG_ATS1CPR:
1667 case MISCREG_ATS1CPW:
1668 case MISCREG_ATS1CUR:
1669 case MISCREG_ATS1CUW:
1670 case MISCREG_ATS12NSOPR:
1671 case MISCREG_ATS12NSOPW:
1672 case MISCREG_ATS12NSOUR:
1673 case MISCREG_ATS12NSOUW:
1674 case MISCREG_ATS1HR:
1675 case MISCREG_ATS1HW:
1676 {
1677 Request::Flags flags = 0;
1678 BaseTLB::Mode mode = BaseTLB::Read;
1679 TLB::ArmTranslationType tranType = TLB::NormalTran;
1680 Fault fault;
1681 switch(misc_reg) {
1682 case MISCREG_ATS1CPR:
1683 flags = TLB::MustBeOne;
1684 tranType = TLB::S1CTran;
1685 mode = BaseTLB::Read;
1686 break;
1687 case MISCREG_ATS1CPW:
1688 flags = TLB::MustBeOne;
1689 tranType = TLB::S1CTran;
1690 mode = BaseTLB::Write;
1691 break;
1692 case MISCREG_ATS1CUR:
1693 flags = TLB::MustBeOne | TLB::UserMode;
1694 tranType = TLB::S1CTran;
1695 mode = BaseTLB::Read;
1696 break;
1697 case MISCREG_ATS1CUW:
1698 flags = TLB::MustBeOne | TLB::UserMode;
1699 tranType = TLB::S1CTran;
1700 mode = BaseTLB::Write;
1701 break;
1702 case MISCREG_ATS12NSOPR:
1703 if (!haveSecurity)
1704 panic("Security Extensions required for ATS12NSOPR");
1705 flags = TLB::MustBeOne;
1706 tranType = TLB::S1S2NsTran;
1707 mode = BaseTLB::Read;
1708 break;
1709 case MISCREG_ATS12NSOPW:
1710 if (!haveSecurity)
1711 panic("Security Extensions required for ATS12NSOPW");
1712 flags = TLB::MustBeOne;
1713 tranType = TLB::S1S2NsTran;
1714 mode = BaseTLB::Write;
1715 break;
1716 case MISCREG_ATS12NSOUR:
1717 if (!haveSecurity)
1718 panic("Security Extensions required for ATS12NSOUR");
1719 flags = TLB::MustBeOne | TLB::UserMode;
1720 tranType = TLB::S1S2NsTran;
1721 mode = BaseTLB::Read;
1722 break;
1723 case MISCREG_ATS12NSOUW:
1724 if (!haveSecurity)
1725 panic("Security Extensions required for ATS12NSOUW");
1726 flags = TLB::MustBeOne | TLB::UserMode;
1727 tranType = TLB::S1S2NsTran;
1728 mode = BaseTLB::Write;
1729 break;
1730 case MISCREG_ATS1HR: // only really useful from secure mode.
1731 flags = TLB::MustBeOne;
1732 tranType = TLB::HypMode;
1733 mode = BaseTLB::Read;
1734 break;
1735 case MISCREG_ATS1HW:
1736 flags = TLB::MustBeOne;
1737 tranType = TLB::HypMode;
1738 mode = BaseTLB::Write;
1739 break;
1740 }
1741 // If we're in timing mode then doing the translation in
1742 // functional mode then we're slightly distorting performance
1743 // results obtained from simulations. The translation should be
1744 // done in the same mode the core is running in. NOTE: This
1745 // can't be an atomic translation because that causes problems
1746 // with unexpected atomic snoop requests.
1747 warn("Translating via %s in functional mode! Fix Me!\n",
1748 miscRegName[misc_reg]);
1749
1750 auto req = std::make_shared<Request>(
1751 val, 0, flags, Request::funcMasterId,
1752 tc->pcState().pc(), tc->contextId());
1753
1754 fault = getDTBPtr(tc)->translateFunctional(
1755 req, tc, mode, tranType);
1756
1757 TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
1758 HCR hcr = readMiscRegNoEffect(MISCREG_HCR);
1759
1760 RegVal newVal;
1761 if (fault == NoFault) {
1762 Addr paddr = req->getPaddr();
1763 if (haveLPAE && (ttbcr.eae || tranType & TLB::HypMode ||
1764 ((tranType & TLB::S1S2NsTran) && hcr.vm) )) {
1765 newVal = (paddr & mask(39, 12)) |
1766 (getDTBPtr(tc)->getAttr());
1767 } else {
1768 newVal = (paddr & 0xfffff000) |
1769 (getDTBPtr(tc)->getAttr());
1770 }
1771 DPRINTF(MiscRegs,
1772 "MISCREG: Translated addr 0x%08x: PAR: 0x%08x\n",
1773 val, newVal);
1774 } else {
1775 ArmFault *armFault = static_cast<ArmFault *>(fault.get());
1776 armFault->update(tc);
1777 // Set fault bit and FSR
1778 FSR fsr = armFault->getFsr(tc);
1779
1780 newVal = ((fsr >> 9) & 1) << 11;
1781 if (newVal) {
1782 // LPAE - rearange fault status
1783 newVal |= ((fsr >> 0) & 0x3f) << 1;
1784 } else {
1785 // VMSA - rearange fault status
1786 newVal |= ((fsr >> 0) & 0xf) << 1;
1787 newVal |= ((fsr >> 10) & 0x1) << 5;
1788 newVal |= ((fsr >> 12) & 0x1) << 6;
1789 }
1790 newVal |= 0x1; // F bit
1791 newVal |= ((armFault->iss() >> 7) & 0x1) << 8;
1792 newVal |= armFault->isStage2() ? 0x200 : 0;
1793 DPRINTF(MiscRegs,
1794 "MISCREG: Translated addr 0x%08x fault fsr %#x: PAR: 0x%08x\n",
1795 val, fsr, newVal);
1796 }
1797 setMiscRegNoEffect(MISCREG_PAR, newVal);
1798 return;
1799 }
1800 case MISCREG_TTBCR:
1801 {
1802 TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
1803 const uint32_t ones = (uint32_t)(-1);
1804 TTBCR ttbcrMask = 0;
1805 TTBCR ttbcrNew = newVal;
1806
1807 // ARM DDI 0406C.b, ARMv7-32
1808 ttbcrMask.n = ones; // T0SZ
1809 if (haveSecurity) {
1810 ttbcrMask.pd0 = ones;
1811 ttbcrMask.pd1 = ones;
1812 }
1813 ttbcrMask.epd0 = ones;
1814 ttbcrMask.irgn0 = ones;
1815 ttbcrMask.orgn0 = ones;
1816 ttbcrMask.sh0 = ones;
1817 ttbcrMask.ps = ones; // T1SZ
1818 ttbcrMask.a1 = ones;
1819 ttbcrMask.epd1 = ones;
1820 ttbcrMask.irgn1 = ones;
1821 ttbcrMask.orgn1 = ones;
1822 ttbcrMask.sh1 = ones;
1823 if (haveLPAE)
1824 ttbcrMask.eae = ones;
1825
1826 if (haveLPAE && ttbcrNew.eae) {
1827 newVal = newVal & ttbcrMask;
1828 } else {
1829 newVal = (newVal & ttbcrMask) | (ttbcr & (~ttbcrMask));
1830 }
1831 // Invalidate TLB MiscReg
1832 getITBPtr(tc)->invalidateMiscReg();
1833 getDTBPtr(tc)->invalidateMiscReg();
1834 break;
1835 }
1836 case MISCREG_TTBR0:
1837 case MISCREG_TTBR1:
1838 {
1839 TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
1840 if (haveLPAE) {
1841 if (ttbcr.eae) {
1842 // ARMv7 bit 63-56, 47-40 reserved, UNK/SBZP
1843 // ARMv8 AArch32 bit 63-56 only
1844 uint64_t ttbrMask = mask(63,56) | mask(47,40);
1845 newVal = (newVal & (~ttbrMask));
1846 }
1847 }
1848 // Invalidate TLB MiscReg
1849 getITBPtr(tc)->invalidateMiscReg();
1850 getDTBPtr(tc)->invalidateMiscReg();
1851 break;
1852 }
1853 case MISCREG_SCTLR_EL1:
1854 case MISCREG_CONTEXTIDR:
1855 case MISCREG_PRRR:
1856 case MISCREG_NMRR:
1857 case MISCREG_MAIR0:
1858 case MISCREG_MAIR1:
1859 case MISCREG_DACR:
1860 case MISCREG_VTTBR:
1861 case MISCREG_SCR_EL3:
1862 case MISCREG_HCR_EL2:
1863 case MISCREG_TCR_EL1:
1864 case MISCREG_TCR_EL2:
1865 case MISCREG_TCR_EL3:
1866 case MISCREG_SCTLR_EL2:
1867 case MISCREG_SCTLR_EL3:
1868 case MISCREG_HSCTLR:
1869 case MISCREG_TTBR0_EL1:
1870 case MISCREG_TTBR1_EL1:
1871 case MISCREG_TTBR0_EL2:
1872 case MISCREG_TTBR1_EL2:
1873 case MISCREG_TTBR0_EL3:
1874 getITBPtr(tc)->invalidateMiscReg();
1875 getDTBPtr(tc)->invalidateMiscReg();
1876 break;
1877 case MISCREG_NZCV:
1878 {
1879 CPSR cpsr = val;
1880
1881 tc->setCCReg(CCREG_NZ, cpsr.nz);
1882 tc->setCCReg(CCREG_C, cpsr.c);
1883 tc->setCCReg(CCREG_V, cpsr.v);
1884 }
1885 break;
1886 case MISCREG_DAIF:
1887 {
1888 CPSR cpsr = miscRegs[MISCREG_CPSR];
1889 cpsr.daif = (uint8_t) ((CPSR) newVal).daif;
1890 newVal = cpsr;
1891 misc_reg = MISCREG_CPSR;
1892 }
1893 break;
1894 case MISCREG_SP_EL0:
1895 tc->setIntReg(INTREG_SP0, newVal);
1896 break;
1897 case MISCREG_SP_EL1:
1898 tc->setIntReg(INTREG_SP1, newVal);
1899 break;
1900 case MISCREG_SP_EL2:
1901 tc->setIntReg(INTREG_SP2, newVal);
1902 break;
1903 case MISCREG_SPSEL:
1904 {
1905 CPSR cpsr = miscRegs[MISCREG_CPSR];
1906 cpsr.sp = (uint8_t) ((CPSR) newVal).sp;
1907 newVal = cpsr;
1908 misc_reg = MISCREG_CPSR;
1909 }
1910 break;
1911 case MISCREG_CURRENTEL:
1912 {
1913 CPSR cpsr = miscRegs[MISCREG_CPSR];
1914 cpsr.el = (uint8_t) ((CPSR) newVal).el;
1915 newVal = cpsr;
1916 misc_reg = MISCREG_CPSR;
1917 }
1918 break;
1919 case MISCREG_PAN:
1920 {
1921 // PAN is affecting data accesses
1922 getDTBPtr(tc)->invalidateMiscReg();
1923
1924 CPSR cpsr = miscRegs[MISCREG_CPSR];
1925 cpsr.pan = (uint8_t) ((CPSR) newVal).pan;
1926 newVal = cpsr;
1927 misc_reg = MISCREG_CPSR;
1928 }
1929 break;
1930 case MISCREG_AT_S1E1R_Xt:
1931 case MISCREG_AT_S1E1W_Xt:
1932 case MISCREG_AT_S1E0R_Xt:
1933 case MISCREG_AT_S1E0W_Xt:
1934 case MISCREG_AT_S1E2R_Xt:
1935 case MISCREG_AT_S1E2W_Xt:
1936 case MISCREG_AT_S12E1R_Xt:
1937 case MISCREG_AT_S12E1W_Xt:
1938 case MISCREG_AT_S12E0R_Xt:
1939 case MISCREG_AT_S12E0W_Xt:
1940 case MISCREG_AT_S1E3R_Xt:
1941 case MISCREG_AT_S1E3W_Xt:
1942 {
1943 RequestPtr req = std::make_shared<Request>();
1944 Request::Flags flags = 0;
1945 BaseTLB::Mode mode = BaseTLB::Read;
1946 TLB::ArmTranslationType tranType = TLB::NormalTran;
1947 Fault fault;
1948 switch(misc_reg) {
1949 case MISCREG_AT_S1E1R_Xt:
1950 flags = TLB::MustBeOne;
1951 tranType = TLB::S1E1Tran;
1952 mode = BaseTLB::Read;
1953 break;
1954 case MISCREG_AT_S1E1W_Xt:
1955 flags = TLB::MustBeOne;
1956 tranType = TLB::S1E1Tran;
1957 mode = BaseTLB::Write;
1958 break;
1959 case MISCREG_AT_S1E0R_Xt:
1960 flags = TLB::MustBeOne | TLB::UserMode;
1961 tranType = TLB::S1E0Tran;
1962 mode = BaseTLB::Read;
1963 break;
1964 case MISCREG_AT_S1E0W_Xt:
1965 flags = TLB::MustBeOne | TLB::UserMode;
1966 tranType = TLB::S1E0Tran;
1967 mode = BaseTLB::Write;
1968 break;
1969 case MISCREG_AT_S1E2R_Xt:
1970 flags = TLB::MustBeOne;
1971 tranType = TLB::S1E2Tran;
1972 mode = BaseTLB::Read;
1973 break;
1974 case MISCREG_AT_S1E2W_Xt:
1975 flags = TLB::MustBeOne;
1976 tranType = TLB::S1E2Tran;
1977 mode = BaseTLB::Write;
1978 break;
1979 case MISCREG_AT_S12E0R_Xt:
1980 flags = TLB::MustBeOne | TLB::UserMode;
1981 tranType = TLB::S12E0Tran;
1982 mode = BaseTLB::Read;
1983 break;
1984 case MISCREG_AT_S12E0W_Xt:
1985 flags = TLB::MustBeOne | TLB::UserMode;
1986 tranType = TLB::S12E0Tran;
1987 mode = BaseTLB::Write;
1988 break;
1989 case MISCREG_AT_S12E1R_Xt:
1990 flags = TLB::MustBeOne;
1991 tranType = TLB::S12E1Tran;
1992 mode = BaseTLB::Read;
1993 break;
1994 case MISCREG_AT_S12E1W_Xt:
1995 flags = TLB::MustBeOne;
1996 tranType = TLB::S12E1Tran;
1997 mode = BaseTLB::Write;
1998 break;
1999 case MISCREG_AT_S1E3R_Xt:
2000 flags = TLB::MustBeOne;
2001 tranType = TLB::S1E3Tran;
2002 mode = BaseTLB::Read;
2003 break;
2004 case MISCREG_AT_S1E3W_Xt:
2005 flags = TLB::MustBeOne;
2006 tranType = TLB::S1E3Tran;
2007 mode = BaseTLB::Write;
2008 break;
2009 }
2010 // If we're in timing mode then doing the translation in
2011 // functional mode then we're slightly distorting performance
2012 // results obtained from simulations. The translation should be
2013 // done in the same mode the core is running in. NOTE: This
2014 // can't be an atomic translation because that causes problems
2015 // with unexpected atomic snoop requests.
2016 warn("Translating via %s in functional mode! Fix Me!\n",
2017 miscRegName[misc_reg]);
2018
2019 req->setVirt(val, 0, flags, Request::funcMasterId,
2020 tc->pcState().pc());
2021 req->setContext(tc->contextId());
2022 fault = getDTBPtr(tc)->translateFunctional(req, tc, mode,
2023 tranType);
2024
2025 RegVal newVal;
2026 if (fault == NoFault) {
2027 Addr paddr = req->getPaddr();
2028 uint64_t attr = getDTBPtr(tc)->getAttr();
2029 uint64_t attr1 = attr >> 56;
2030 if (!attr1 || attr1 ==0x44) {
2031 attr |= 0x100;
2032 attr &= ~ uint64_t(0x80);
2033 }
2034 newVal = (paddr & mask(47, 12)) | attr;
2035 DPRINTF(MiscRegs,
2036 "MISCREG: Translated addr %#x: PAR_EL1: %#xx\n",
2037 val, newVal);
2038 } else {
2039 ArmFault *armFault = static_cast<ArmFault *>(fault.get());
2040 armFault->update(tc);
2041 // Set fault bit and FSR
2042 FSR fsr = armFault->getFsr(tc);
2043
2044 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
2045 if (cpsr.width) { // AArch32
2046 newVal = ((fsr >> 9) & 1) << 11;
2047 // rearrange fault status
2048 newVal |= ((fsr >> 0) & 0x3f) << 1;
2049 newVal |= 0x1; // F bit
2050 newVal |= ((armFault->iss() >> 7) & 0x1) << 8;
2051 newVal |= armFault->isStage2() ? 0x200 : 0;
2052 } else { // AArch64
2053 newVal = 1; // F bit
2054 newVal |= fsr << 1; // FST
2055 // TODO: DDI 0487A.f D7-2083, AbortFault's s1ptw bit.
2056 newVal |= armFault->isStage2() ? 1 << 8 : 0; // PTW
2057 newVal |= armFault->isStage2() ? 1 << 9 : 0; // S
2058 newVal |= 1 << 11; // RES1
2059 }
2060 DPRINTF(MiscRegs,
2061 "MISCREG: Translated addr %#x fault fsr %#x: PAR: %#x\n",
2062 val, fsr, newVal);
2063 }
2064 setMiscRegNoEffect(MISCREG_PAR_EL1, newVal);
2065 return;
2066 }
2067 case MISCREG_SPSR_EL3:
2068 case MISCREG_SPSR_EL2:
2069 case MISCREG_SPSR_EL1:
2070 {
2071 RegVal spsr_mask = havePAN ?
2072 ~(0x5 << 21) : ~(0x7 << 21);
2073
2074 newVal = val & spsr_mask;
2075 break;
2076 }
2077 case MISCREG_L2CTLR:
2078 warn("miscreg L2CTLR (%s) written with %#x. ignored...\n",
2079 miscRegName[misc_reg], uint32_t(val));
2080 break;
2081
2082 // Generic Timer registers
2083 case MISCREG_CNTFRQ ... MISCREG_CNTVOFF:
2084 case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2:
2085 getGenericTimer(tc).setMiscReg(misc_reg, newVal);
2086 break;
2087 case MISCREG_ICC_AP0R0 ... MISCREG_ICH_LRC15:
2088 case MISCREG_ICC_PMR_EL1 ... MISCREG_ICC_IGRPEN1_EL3:
2089 case MISCREG_ICH_AP0R0_EL2 ... MISCREG_ICH_LR15_EL2:
2090 getGICv3CPUInterface(tc).setMiscReg(misc_reg, newVal);
2091 return;
2092 case MISCREG_ZCR_EL3:
2093 case MISCREG_ZCR_EL2:
2094 case MISCREG_ZCR_EL1:
2095 tc->getDecoderPtr()->setSveLen(
2096 (getCurSveVecLenInBits(tc) >> 7) - 1);
2097 break;
2098 }
2099 }
2100 setMiscRegNoEffect(misc_reg, newVal);
2101 }
2102
2103 BaseISADevice &
2104 ISA::getGenericTimer(ThreadContext *tc)
2105 {
2106 // We only need to create an ISA interface the first time we try
2107 // to access the timer.
2108 if (timer)
2109 return *timer.get();
2110
2111 assert(system);
2112 GenericTimer *generic_timer(system->getGenericTimer());
2113 if (!generic_timer) {
2114 panic("Trying to get a generic timer from a system that hasn't "
2115 "been configured to use a generic timer.\n");
2116 }
2117
2118 timer.reset(new GenericTimerISA(*generic_timer, tc->contextId()));
2119 timer->setThreadContext(tc);
2120
2121 return *timer.get();
2122 }
2123
2124 BaseISADevice &
2125 ISA::getGICv3CPUInterface(ThreadContext *tc)
2126 {
2127 panic_if(!gicv3CpuInterface, "GICV3 cpu interface is not registered!");
2128 return *gicv3CpuInterface.get();
2129 }
2130
2131 unsigned
2132 ISA::getCurSveVecLenInBits(ThreadContext *tc) const
2133 {
2134 if (!FullSystem) {
2135 return sveVL * 128;
2136 }
2137
2138 panic_if(!tc,
2139 "A ThreadContext is needed to determine the SVE vector length "
2140 "in full-system mode");
2141
2142 CPSR cpsr = miscRegs[MISCREG_CPSR];
2143 ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
2144
2145 unsigned len = 0;
2146
2147 if (el == EL1 || (el == EL0 && !ELIsInHost(tc, el))) {
2148 len = static_cast<ZCR>(miscRegs[MISCREG_ZCR_EL1]).len;
2149 }
2150
2151 if (el == EL2 || (el == EL0 && ELIsInHost(tc, el))) {
2152 len = static_cast<ZCR>(miscRegs[MISCREG_ZCR_EL2]).len;
2153 } else if (haveVirtualization && !inSecureState(tc) &&
2154 (el == EL0 || el == EL1)) {
2155 len = std::min(
2156 len,
2157 static_cast<unsigned>(
2158 static_cast<ZCR>(miscRegs[MISCREG_ZCR_EL2]).len));
2159 }
2160
2161 if (el == EL3) {
2162 len = static_cast<ZCR>(miscRegs[MISCREG_ZCR_EL3]).len;
2163 } else if (haveSecurity) {
2164 len = std::min(
2165 len,
2166 static_cast<unsigned>(
2167 static_cast<ZCR>(miscRegs[MISCREG_ZCR_EL3]).len));
2168 }
2169
2170 len = std::min(len, sveVL - 1);
2171
2172 return (len + 1) * 128;
2173 }
2174
2175 void
2176 ISA::zeroSveVecRegUpperPart(VecRegContainer &vc, unsigned eCount)
2177 {
2178 auto vv = vc.as<uint64_t>();
2179 for (int i = 2; i < eCount; ++i) {
2180 vv[i] = 0;
2181 }
2182 }
2183
2184 ISA::MiscRegLUTEntryInitializer::chain
2185 ISA::MiscRegLUTEntryInitializer::highest(ArmSystem *const sys) const
2186 {
2187 switch (FullSystem ? sys->highestEL() : EL1) {
2188 case EL0:
2189 case EL1: priv(); break;
2190 case EL2: hyp(); break;
2191 case EL3: mon(); break;
2192 }
2193 return *this;
2194 }
2195
2196 } // namespace ArmISA
2197
2198 ArmISA::ISA *
2199 ArmISAParams::create()
2200 {
2201 return new ArmISA::ISA(this);
2202 }