# 4K | 64K | !16K | !BigEndEL0 | !SNSMem | !BigEnd | 8b ASID | 40b PA
id_aa64mmfr0_el1 = Param.UInt64(0x0000000000f00002,
"AArch64 Memory Model Feature Register 0")
- # PAN | HPDS
- id_aa64mmfr1_el1 = Param.UInt64(0x0000000000101000,
+ # PAN | HPDS | VHE
+ id_aa64mmfr1_el1 = Param.UInt64(0x0000000000101100,
"AArch64 Memory Model Feature Register 1")
id_aa64mmfr2_el1 = Param.UInt64(0x0000000000000000,
"AArch64 Memory Model Feature Register 2")
uint32_t &immediate) const
{
const CPTR cptr = tc->readMiscReg(MISCREG_CPTR_EL2);
+ const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
+ const SCTLR sctlr2 = tc->readMiscReg(MISCREG_SCTLR_EL2);
const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3);
- const CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ const HDCR mdcr = tc->readMiscReg(MISCREG_MDCR_EL3);
bool trap_to_hyp = false;
- if (!inSecureState(scr, cpsr) && (el != EL2)) {
- switch (misc_reg) {
- // FP/SIMD regs
- case MISCREG_FPCR:
- case MISCREG_FPSR:
- case MISCREG_FPEXC32_EL2:
- trap_to_hyp = cptr.tfp;
+ switch (misc_reg) {
+ case MISCREG_IMPDEF_UNIMPL:
+ trap_to_hyp = EL2Enabled(tc) && hcr.tidcp && el == EL1;
+ break;
+ // GICv3 regs
+ case MISCREG_ICC_SGI0R_EL1:
+ {
+ auto *isa = static_cast<ArmISA::ISA *>(tc->getIsaPtr());
+ if (isa->haveGICv3CpuIfc())
+ trap_to_hyp = EL2Enabled(tc) && hcr.fmo && el == EL1;
+ }
+ break;
+ case MISCREG_ICC_SGI1R_EL1:
+ case MISCREG_ICC_ASGI1R_EL1:
+ {
+ auto *isa = static_cast<ArmISA::ISA *>(tc->getIsaPtr());
+ if (isa->haveGICv3CpuIfc())
+ trap_to_hyp = EL2Enabled(tc) && hcr.imo && el == EL1;
+ }
+ break;
+ case MISCREG_FPCR:
+ case MISCREG_FPSR:
+ case MISCREG_FPEXC32_EL2:
+ {
+ bool from_el2 = (el == EL2) && (scr.ns || scr.eel2) &&
+ ELIs64(tc,EL2) &&
+ ((!hcr.e2h && cptr.tfp) ||
+ (hcr.e2h && (cptr.fpen == 0x0 ||
+ cptr.fpen == 0xa)));
+ bool from_el1 = (el == EL1) && hcr.nv &&
+ (!hcr.e2h || (hcr.e2h && !hcr.tge));
+ trap_to_hyp = from_el2 || from_el1;
ec = EC_TRAPPED_SIMD_FP;
immediate = 0x1E00000;
- break;
- // CPACR
- case MISCREG_CPACR_EL1:
- trap_to_hyp = cptr.tcpac && el == EL1;
- break;
- // Virtual memory control regs
- case MISCREG_SCTLR_EL1:
- case MISCREG_TTBR0_EL1:
- case MISCREG_TTBR1_EL1:
- case MISCREG_TCR_EL1:
- case MISCREG_ESR_EL1:
- case MISCREG_FAR_EL1:
- case MISCREG_AFSR0_EL1:
- case MISCREG_AFSR1_EL1:
- case MISCREG_MAIR_EL1:
- case MISCREG_AMAIR_EL1:
- case MISCREG_CONTEXTIDR_EL1:
- trap_to_hyp =
- ((hcr.trvm && miscRead) || (hcr.tvm && !miscRead)) &&
- el == EL1;
- break;
- // TLB maintenance instructions
- case MISCREG_TLBI_VMALLE1:
- case MISCREG_TLBI_VAE1_Xt:
- case MISCREG_TLBI_ASIDE1_Xt:
- case MISCREG_TLBI_VAAE1_Xt:
- case MISCREG_TLBI_VALE1_Xt:
- case MISCREG_TLBI_VAALE1_Xt:
- case MISCREG_TLBI_VMALLE1IS:
- case MISCREG_TLBI_VAE1IS_Xt:
- case MISCREG_TLBI_ASIDE1IS_Xt:
- case MISCREG_TLBI_VAAE1IS_Xt:
- case MISCREG_TLBI_VALE1IS_Xt:
- case MISCREG_TLBI_VAALE1IS_Xt:
- trap_to_hyp = hcr.ttlb && el == EL1;
- break;
- // Cache maintenance instructions to the point of unification
- case MISCREG_IC_IVAU_Xt:
- case MISCREG_ICIALLU:
- case MISCREG_ICIALLUIS:
- case MISCREG_DC_CVAU_Xt:
- trap_to_hyp = hcr.tpu && el <= EL1;
- break;
- // Data/Unified cache maintenance instructions to the
- // point of coherency
- case MISCREG_DC_IVAC_Xt:
- case MISCREG_DC_CIVAC_Xt:
- case MISCREG_DC_CVAC_Xt:
- trap_to_hyp = hcr.tpc && el <= EL1;
- break;
- // Data/Unified cache maintenance instructions by set/way
- case MISCREG_DC_ISW_Xt:
- case MISCREG_DC_CSW_Xt:
- case MISCREG_DC_CISW_Xt:
- trap_to_hyp = hcr.tsw && el == EL1;
- break;
- // ACTLR
- case MISCREG_ACTLR_EL1:
- trap_to_hyp = hcr.tacr && el == EL1;
- break;
-
- case MISCREG_APDAKeyHi_EL1:
- case MISCREG_APDAKeyLo_EL1:
- case MISCREG_APDBKeyHi_EL1:
- case MISCREG_APDBKeyLo_EL1:
- case MISCREG_APGAKeyHi_EL1:
- case MISCREG_APGAKeyLo_EL1:
- case MISCREG_APIAKeyHi_EL1:
- case MISCREG_APIAKeyLo_EL1:
- case MISCREG_APIBKeyHi_EL1:
- case MISCREG_APIBKeyLo_EL1:
- trap_to_hyp = el==EL1 && hcr.apk == 0;
- break;
- // @todo: Trap implementation-dependent functionality based on
- // hcr.tidcp
+ }
+ break;
+ case MISCREG_CPACR_EL1:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && cptr.tcpac;
+ break;
+ case MISCREG_SCTLR_EL1:
+ case MISCREG_TTBR0_EL1:
+ case MISCREG_TTBR1_EL1:
+ case MISCREG_TCR_EL1:
+ case MISCREG_ESR_EL1:
+ case MISCREG_FAR_EL1:
+ case MISCREG_AFSR0_EL1:
+ case MISCREG_AFSR1_EL1:
+ case MISCREG_MAIR_EL1:
+ case MISCREG_AMAIR_EL1:
+ case MISCREG_CONTEXTIDR_EL1:
+ {
+ bool tvm = miscRead? hcr.trvm: hcr.tvm;
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && tvm;
+ }
+ break;
+ case MISCREG_CPACR_EL12:
+ case MISCREG_SCTLR_EL12:
+ case MISCREG_TTBR0_EL12:
+ case MISCREG_TTBR1_EL12:
+ case MISCREG_TCR_EL12:
+ case MISCREG_ESR_EL12:
+ case MISCREG_FAR_EL12:
+ case MISCREG_AFSR0_EL12:
+ case MISCREG_AFSR1_EL12:
+ case MISCREG_MAIR_EL12:
+ case MISCREG_AMAIR_EL12:
+ case MISCREG_CONTEXTIDR_EL12:
+ case MISCREG_SPSR_EL12:
+ case MISCREG_ELR_EL12:
+ case MISCREG_VBAR_EL12:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) &&
+ (hcr.nv && (hcr.nv1 || !hcr.nv2));
+ break;
+ case MISCREG_TLBI_VMALLE1:
+ case MISCREG_TLBI_VAE1_Xt:
+ case MISCREG_TLBI_ASIDE1_Xt:
+ case MISCREG_TLBI_VAAE1_Xt:
+ case MISCREG_TLBI_VALE1_Xt:
+ case MISCREG_TLBI_VAALE1_Xt:
+// case MISCREG_TLBI_RVAE1:
+// case MISCREG_TLBI_RVAAE1:
+// case MISCREG_TLBI_RVALE1:
+// case MISCREG_TLBI_RVAALE1:
+ case MISCREG_TLBI_VMALLE1IS:
+ case MISCREG_TLBI_VAE1IS_Xt:
+ case MISCREG_TLBI_ASIDE1IS_Xt:
+ case MISCREG_TLBI_VAAE1IS_Xt:
+ case MISCREG_TLBI_VALE1IS_Xt:
+ case MISCREG_TLBI_VAALE1IS_Xt:
+// case MISCREG_TLBI_RVAE1IS:
+// case MISCREG_TLBI_RVAAE1IS:
+// case MISCREG_TLBI_RVALE1IS:
+// case MISCREG_TLBI_RVAALE1IS:
+// case MISCREG_TLBI_VMALLE1OS:
+// case MISCREG_TLBI_VAE1OS:
+// case MISCREG_TLBI_ASIDE1OS:
+// case MISCREG_TLBI_VAAE1OS:
+// case MISCREG_TLBI_VALE1OS:
+// case MISCREG_TLBI_VAALE1OS:
+// case MISCREG_TLBI_RVAE1OS:
+// case MISCREG_TLBI_RVAAE1OS:
+// case MISCREG_TLBI_RVALE1OS:
+// case MISCREG_TLBI_RVAALE1OS:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.ttlb;
+ break;
+ case MISCREG_IC_IVAU_Xt:
+ case MISCREG_ICIALLU:
+ case MISCREG_ICIALLUIS:
+ trap_to_hyp = (el == EL1) && EL2Enabled(tc) && hcr.tpu;
+ break;
+ case MISCREG_DC_CVAU_Xt:
+ {
+ const bool el2_en = EL2Enabled(tc);
+ if (el == EL0 && el2_en) {
+ const bool in_host = hcr.e2h && hcr.tge;
+ const bool general_trap = el2_en && !in_host && hcr.tge &&
+ !sctlr.uci;
+ const bool tpu_trap = el2_en && !in_host && hcr.tpu;
+ const bool host_trap = el2_en && in_host && !sctlr2.uci;
+ trap_to_hyp = general_trap || tpu_trap || host_trap;
+ }
+ else if (el == EL1 && el2_en) {
+ trap_to_hyp = hcr.tpu;
+ }
+ }
+ break;
+ case MISCREG_DC_IVAC_Xt:
+ trap_to_hyp = EL2Enabled(tc) && el == EL1 && hcr.tpc;
+ break;
+ case MISCREG_DC_CVAC_Xt:
+// case MISCREG_DC_CVAP_Xt:
+ case MISCREG_DC_CIVAC_Xt:
+ {
+ const bool el2_en = EL2Enabled(tc);
+ if (el == EL0 && el2_en) {
- // ID regs, group 3
- case MISCREG_ID_PFR0_EL1:
- case MISCREG_ID_PFR1_EL1:
- case MISCREG_ID_DFR0_EL1:
- case MISCREG_ID_AFR0_EL1:
- case MISCREG_ID_MMFR0_EL1:
- case MISCREG_ID_MMFR1_EL1:
- case MISCREG_ID_MMFR2_EL1:
- case MISCREG_ID_MMFR3_EL1:
- case MISCREG_ID_ISAR0_EL1:
- case MISCREG_ID_ISAR1_EL1:
- case MISCREG_ID_ISAR2_EL1:
- case MISCREG_ID_ISAR3_EL1:
- case MISCREG_ID_ISAR4_EL1:
- case MISCREG_ID_ISAR5_EL1:
- case MISCREG_MVFR0_EL1:
- case MISCREG_MVFR1_EL1:
- case MISCREG_MVFR2_EL1:
- case MISCREG_ID_AA64PFR0_EL1:
- case MISCREG_ID_AA64PFR1_EL1:
- case MISCREG_ID_AA64DFR0_EL1:
- case MISCREG_ID_AA64DFR1_EL1:
- case MISCREG_ID_AA64ISAR0_EL1:
- case MISCREG_ID_AA64ISAR1_EL1:
- case MISCREG_ID_AA64MMFR0_EL1:
- case MISCREG_ID_AA64MMFR1_EL1:
- case MISCREG_ID_AA64MMFR2_EL1:
- case MISCREG_ID_AA64AFR0_EL1:
- case MISCREG_ID_AA64AFR1_EL1:
- assert(miscRead);
- trap_to_hyp = hcr.tid3 && el == EL1;
- break;
- // ID regs, group 2
- case MISCREG_CTR_EL0:
- case MISCREG_CCSIDR_EL1:
- case MISCREG_CLIDR_EL1:
- case MISCREG_CSSELR_EL1:
- trap_to_hyp = hcr.tid2 && el <= EL1;
- break;
- // ID regs, group 1
- case MISCREG_AIDR_EL1:
- case MISCREG_REVIDR_EL1:
- assert(miscRead);
- trap_to_hyp = hcr.tid1 && el == EL1;
- break;
- case MISCREG_IMPDEF_UNIMPL:
- trap_to_hyp = hcr.tidcp && el == EL1;
- break;
- // GICv3 regs
- case MISCREG_ICC_SGI0R_EL1:
- {
- auto *isa = static_cast<ArmISA::ISA *>(tc->getIsaPtr());
- if (isa->haveGICv3CpuIfc())
- trap_to_hyp = hcr.fmo && el == EL1;
+ const bool in_host = hcr.e2h && hcr.tge;
+ const bool general_trap = el2_en && !in_host && hcr.tge &&
+ !sctlr.uci;
+ const bool tpc_trap = el2_en && !in_host && hcr.tpc;
+ const bool host_trap = el2_en && in_host && !sctlr2.uci;
+ trap_to_hyp = general_trap || tpc_trap || host_trap;
+ } else if (el == EL1 && el2_en) {
+ trap_to_hyp = hcr.tpc;
+ }
+ }
+ break;
+ case MISCREG_DC_ISW_Xt:
+ case MISCREG_DC_CSW_Xt:
+ case MISCREG_DC_CISW_Xt:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.tsw;
+ break;
+ case MISCREG_ACTLR_EL1:
+ trap_to_hyp = EL2Enabled (tc) && (el == EL1) && hcr.tacr;
+ break;
+ case MISCREG_APDAKeyHi_EL1:
+ case MISCREG_APDAKeyLo_EL1:
+ case MISCREG_APDBKeyHi_EL1:
+ case MISCREG_APDBKeyLo_EL1:
+ case MISCREG_APGAKeyHi_EL1:
+ case MISCREG_APGAKeyLo_EL1:
+ case MISCREG_APIAKeyHi_EL1:
+ case MISCREG_APIAKeyLo_EL1:
+ case MISCREG_APIBKeyHi_EL1:
+ case MISCREG_APIBKeyLo_EL1:
+ trap_to_hyp = EL2Enabled(tc) && el == EL1 && !hcr.apk;
+ break;
+ case MISCREG_ID_PFR0_EL1:
+ case MISCREG_ID_PFR1_EL1:
+ //case MISCREG_ID_PFR2_EL1:
+ case MISCREG_ID_DFR0_EL1:
+ case MISCREG_ID_AFR0_EL1:
+ case MISCREG_ID_MMFR0_EL1:
+ case MISCREG_ID_MMFR1_EL1:
+ case MISCREG_ID_MMFR2_EL1:
+ case MISCREG_ID_MMFR3_EL1:
+ //case MISCREG_ID_MMFR4_EL1:
+ case MISCREG_ID_ISAR0_EL1:
+ case MISCREG_ID_ISAR1_EL1:
+ case MISCREG_ID_ISAR2_EL1:
+ case MISCREG_ID_ISAR3_EL1:
+ case MISCREG_ID_ISAR4_EL1:
+ case MISCREG_ID_ISAR5_EL1:
+ case MISCREG_MVFR0_EL1:
+ case MISCREG_MVFR1_EL1:
+ case MISCREG_MVFR2_EL1:
+ case MISCREG_ID_AA64PFR0_EL1:
+ case MISCREG_ID_AA64PFR1_EL1:
+ case MISCREG_ID_AA64DFR0_EL1:
+ case MISCREG_ID_AA64DFR1_EL1:
+ case MISCREG_ID_AA64ISAR0_EL1:
+ case MISCREG_ID_AA64ISAR1_EL1:
+ case MISCREG_ID_AA64MMFR0_EL1:
+ case MISCREG_ID_AA64MMFR1_EL1:
+ case MISCREG_ID_AA64MMFR2_EL1:
+ case MISCREG_ID_AA64AFR0_EL1:
+ case MISCREG_ID_AA64AFR1_EL1:
+ trap_to_hyp = EL2Enabled(tc) && el == EL1 && hcr.tid3;
+ break;
+ case MISCREG_CTR_EL0:
+ {
+ const bool el2_en = EL2Enabled(tc);
+ if (el == EL0 && el2_en) {
+ const bool in_host = hcr.e2h && hcr.tge;
+ const bool general_trap = el2_en && !in_host && hcr.tge &&
+ !sctlr.uct;
+ const bool tid_trap = el2_en && !in_host && hcr.tid2;
+ const bool host_trap = el2_en && in_host && !sctlr2.uct;
+ trap_to_hyp = general_trap || tid_trap || host_trap;
+ } else if (el == EL1 && el2_en) {
+ trap_to_hyp = hcr.tid2;
}
- break;
- case MISCREG_ICC_SGI1R_EL1:
- case MISCREG_ICC_ASGI1R_EL1:
- {
- auto *isa = static_cast<ArmISA::ISA *>(tc->getIsaPtr());
- if (isa->haveGICv3CpuIfc())
- trap_to_hyp = hcr.imo && el == EL1;
+ }
+ break;
+ case MISCREG_CCSIDR_EL1:
+// case MISCREG_CCSIDR2_EL1:
+ case MISCREG_CLIDR_EL1:
+ case MISCREG_CSSELR_EL1:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.tid2;
+ break;
+ case MISCREG_AIDR_EL1:
+ case MISCREG_REVIDR_EL1:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.tid1;
+ break;
+ // Generic Timer
+ case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2:
+ trap_to_hyp = el <= EL1 &&
+ isGenericTimerSystemAccessTrapEL2(misc_reg, tc);
+ break;
+ case MISCREG_DAIF:
+ trap_to_hyp = EL2Enabled(tc) && el == EL0 &&
+ (hcr.tge && (hcr.e2h || !sctlr.uma));
+ break;
+ case MISCREG_SPSR_EL1:
+ case MISCREG_ELR_EL1:
+ case MISCREG_VBAR_EL1:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.nv1 && !hcr.nv2;
+ break;
+ case MISCREG_HCR_EL2:
+ case MISCREG_HSTR_EL2:
+ case MISCREG_SP_EL1:
+ case MISCREG_TPIDR_EL2:
+ case MISCREG_VTCR_EL2:
+ case MISCREG_VTTBR_EL2:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.nv && !hcr.nv2;
+ break;
+// case MISCREG_AT_S1E1WP_Xt:
+// case MISCREG_AT_S1E1RP_Xt:
+ case MISCREG_AT_S1E1R_Xt:
+ case MISCREG_AT_S1E1W_Xt:
+ case MISCREG_AT_S1E0W_Xt:
+ case MISCREG_AT_S1E0R_Xt:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.at;
+ break;
+ case MISCREG_ACTLR_EL2:
+ case MISCREG_AFSR0_EL2:
+ case MISCREG_AFSR1_EL2:
+ case MISCREG_AMAIR_EL2:
+ case MISCREG_CONTEXTIDR_EL2:
+ case MISCREG_CPTR_EL2:
+ case MISCREG_DACR32_EL2:
+ case MISCREG_ESR_EL2:
+ case MISCREG_FAR_EL2:
+ case MISCREG_HACR_EL2:
+ case MISCREG_HPFAR_EL2:
+ case MISCREG_MAIR_EL2:
+// case MISCREG_RMR_EL2:
+ case MISCREG_SCTLR_EL2:
+ case MISCREG_TCR_EL2:
+ case MISCREG_TTBR0_EL2:
+ case MISCREG_TTBR1_EL2:
+ case MISCREG_VBAR_EL2:
+ case MISCREG_VMPIDR_EL2:
+ case MISCREG_VPIDR_EL2:
+ case MISCREG_TLBI_ALLE1:
+ case MISCREG_TLBI_ALLE1IS:
+// case MISCREG_TLBI_ALLE1OS:
+ case MISCREG_TLBI_ALLE2:
+ case MISCREG_TLBI_ALLE2IS:
+// case MISCREG_TLBI_ALLE2OS:
+ case MISCREG_TLBI_IPAS2E1_Xt:
+ case MISCREG_TLBI_IPAS2E1IS_Xt:
+// case MISCREG_TLBI_IPAS2E1OS:
+ case MISCREG_TLBI_IPAS2LE1_Xt:
+ case MISCREG_TLBI_IPAS2LE1IS_Xt:
+// case MISCREG_TLBI_IPAS2LE1OS:
+// case MISCREG_TLBI_RIPAS2E1:
+// case MISCREG_TLBI_RIPAS2E1IS:
+// case MISCREG_TLBI_RIPAS2E1OS:
+// case MISCREG_TLBI_RIPAS2LE1:
+// case MISCREG_TLBI_RIPAS2LE1IS:
+// case MISCREG_TLBI_RIPAS2LE1OS:
+// case MISCREG_TLBI_RVAE2:
+// case MISCREG_TLBI_RVAE2IS:
+// case MISCREG_TLBI_RVAE2OS:
+// case MISCREG_TLBI_RVALE2:
+// case MISCREG_TLBI_RVALE2IS:
+// case MISCREG_TLBI_RVALE2OS:
+ case MISCREG_TLBI_VAE2_Xt:
+ case MISCREG_TLBI_VAE2IS_Xt:
+// case MISCREG_TLBI_VAE2OS:
+ case MISCREG_TLBI_VALE2_Xt:
+ case MISCREG_TLBI_VALE2IS_Xt:
+// case MISCREG_TLBI_VALE2OS:
+ case MISCREG_TLBI_VMALLS12E1:
+ case MISCREG_TLBI_VMALLS12E1IS:
+// case MISCREG_TLBI_VMALLS12E1OS:
+ case MISCREG_AT_S1E2W_Xt:
+ case MISCREG_AT_S1E2R_Xt:
+ case MISCREG_AT_S12E1R_Xt:
+ case MISCREG_AT_S12E1W_Xt:
+ case MISCREG_AT_S12E0W_Xt:
+ case MISCREG_AT_S12E0R_Xt:
+ case MISCREG_SPSR_UND:
+ case MISCREG_SPSR_IRQ:
+ case MISCREG_SPSR_FIQ:
+ case MISCREG_SPSR_ABT:
+ case MISCREG_SPSR_EL2:
+ case MISCREG_ELR_EL2:
+ case MISCREG_IFSR32_EL2:
+ case MISCREG_DBGVCR32_EL2:
+ case MISCREG_MDCR_EL2:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.nv;
+ break;
+// case MISCREG_VSTTBR_EL2:
+// case MISCREG_VSTCR_EL2:
+// trap_to_hyp = (el == EL1) && !scr.ns && scr.eel2 && ELIs64(tc,EL2)
+// && !hcr.nv2 && hcr.nv && (!hcr.e2h|| (hcr.e2h && !hcr.tge));
+// break;
+
+ //case MISCREG_LORC_EL1:
+ //case MISCREG_LOREA_EL1:
+ //case MISCREG_LORID_EL1:
+ //case MISCREG_LORN_EL1:
+ //case MISCREG_LORSA_EL1:
+ // trap_to_hyp = (el == EL1) && (scr.ns || scr.eel2) && ELIs64(tc,EL2)
+ // && hcr.tlor && (!hcr.e2h || (hcr.e2h && !hcr.tge));
+ // break;
+
+ case MISCREG_DC_ZVA_Xt:
+ {
+ const bool el2_en = EL2Enabled(tc);
+ if (el == EL0 && el2_en) {
+ const bool in_host = hcr.e2h && hcr.tge;
+ const bool general_trap = el2_en && !in_host && hcr.tge &&
+ !sctlr.dze;
+ const bool tdz_trap = el2_en && !in_host && hcr.tdz;
+ const bool host_trap = el2_en && in_host && !sctlr2.dze;
+ trap_to_hyp = general_trap || tdz_trap || host_trap;
+ } else if (el == EL1 && el2_en) {
+ trap_to_hyp = hcr.tdz;
}
- break;
- // Generic Timer
- case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2:
- trap_to_hyp = el <= EL1 &&
- isGenericTimerSystemAccessTrapEL2(misc_reg, tc);
- break;
- default:
- break;
}
+ break;
+ case MISCREG_DBGBVR0_EL1:
+ case MISCREG_DBGBVR1_EL1:
+ case MISCREG_DBGBVR2_EL1:
+ case MISCREG_DBGBVR3_EL1:
+ case MISCREG_DBGBVR4_EL1:
+ case MISCREG_DBGBVR5_EL1:
+ case MISCREG_DBGBVR6_EL1:
+ case MISCREG_DBGBVR7_EL1:
+ case MISCREG_DBGBVR8_EL1:
+ case MISCREG_DBGBVR9_EL1:
+ case MISCREG_DBGBVR10_EL1:
+ case MISCREG_DBGBVR11_EL1:
+ case MISCREG_DBGBVR12_EL1:
+ case MISCREG_DBGBVR13_EL1:
+ case MISCREG_DBGBVR14_EL1:
+ case MISCREG_DBGBVR15_EL1:
+ case MISCREG_DBGBCR0_EL1:
+ case MISCREG_DBGBCR1_EL1:
+ case MISCREG_DBGBCR2_EL1:
+ case MISCREG_DBGBCR3_EL1:
+ case MISCREG_DBGBCR4_EL1:
+ case MISCREG_DBGBCR5_EL1:
+ case MISCREG_DBGBCR6_EL1:
+ case MISCREG_DBGBCR7_EL1:
+ case MISCREG_DBGBCR8_EL1:
+ case MISCREG_DBGBCR9_EL1:
+ case MISCREG_DBGBCR10_EL1:
+ case MISCREG_DBGBCR11_EL1:
+ case MISCREG_DBGBCR12_EL1:
+ case MISCREG_DBGBCR13_EL1:
+ case MISCREG_DBGBCR14_EL1:
+ case MISCREG_DBGBCR15_EL1:
+ case MISCREG_DBGWVR0_EL1:
+ case MISCREG_DBGWVR1_EL1:
+ case MISCREG_DBGWVR2_EL1:
+ case MISCREG_DBGWVR3_EL1:
+ case MISCREG_DBGWVR4_EL1:
+ case MISCREG_DBGWVR5_EL1:
+ case MISCREG_DBGWVR6_EL1:
+ case MISCREG_DBGWVR7_EL1:
+ case MISCREG_DBGWVR8_EL1:
+ case MISCREG_DBGWVR9_EL1:
+ case MISCREG_DBGWVR10_EL1:
+ case MISCREG_DBGWVR11_EL1:
+ case MISCREG_DBGWVR12_EL1:
+ case MISCREG_DBGWVR13_EL1:
+ case MISCREG_DBGWVR14_EL1:
+ case MISCREG_DBGWVR15_EL1:
+ case MISCREG_DBGWCR0_EL1:
+ case MISCREG_DBGWCR1_EL1:
+ case MISCREG_DBGWCR2_EL1:
+ case MISCREG_DBGWCR3_EL1:
+ case MISCREG_DBGWCR4_EL1:
+ case MISCREG_DBGWCR5_EL1:
+ case MISCREG_DBGWCR6_EL1:
+ case MISCREG_DBGWCR7_EL1:
+ case MISCREG_DBGWCR8_EL1:
+ case MISCREG_DBGWCR9_EL1:
+ case MISCREG_DBGWCR10_EL1:
+ case MISCREG_DBGWCR11_EL1:
+ case MISCREG_DBGWCR12_EL1:
+ case MISCREG_DBGWCR13_EL1:
+ case MISCREG_DBGWCR14_EL1:
+ case MISCREG_DBGWCR15_EL1:
+ case MISCREG_MDCCINT_EL1:
+ trap_to_hyp = EL2Enabled(tc) && (el == EL1) && mdcr.tda;
+ break;
+ case MISCREG_ZCR_EL1:
+ {
+ bool from_el1 = (el == EL1) && EL2Enabled(tc) &&
+ ELIs64(tc, EL2) && ((!hcr.e2h && cptr.tz) ||
+ (hcr.e2h && ((cptr.zen & 0x1) == 0x0)));
+ bool from_el2 = (el == EL2) && ((!hcr.e2h && cptr.tz) ||
+ (hcr.e2h && ((cptr.zen & 0x1) == 0x0)));
+ trap_to_hyp = from_el1 || from_el2;
+ }
+ ec = EC_TRAPPED_SVE;
+ immediate = 0;
+ break;
+ case MISCREG_ZCR_EL2:
+ {
+ bool from_el1 = (el == EL1) && EL2Enabled(tc) && hcr.nv;
+ bool from_el2 = (el == EL2) && ((!hcr.e2h && cptr.tz) ||
+ (hcr.e2h && ((cptr.zen & 0x1) == 0x0)));
+ trap_to_hyp = from_el1 || from_el2;
+ ec = from_el1 ? EC_TRAPPED_MSR_MRS_64: EC_TRAPPED_SVE;
+ }
+ immediate = 0;
+ break;
+ default:
+ break;
}
return trap_to_hyp;
}
{
const CPTR cptr = tc->readMiscReg(MISCREG_CPTR_EL3);
const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3);
+ const HDCR mdcr = tc->readMiscReg(MISCREG_MDCR_EL3);
+ const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
bool trap_to_mon = false;
switch (misc_reg) {
case MISCREG_FPCR:
case MISCREG_FPSR:
case MISCREG_FPEXC32_EL2:
- trap_to_mon = cptr.tfp;
+ trap_to_mon = cptr.tfp && ELIs64(tc, EL3);
ec = EC_TRAPPED_SIMD_FP;
immediate = 0x1E00000;
break;
// CPACR, CPTR
+ case MISCREG_CPACR_EL12:
+ trap_to_mon = ((el == EL2 && cptr.tcpac && ELIs64(tc, EL3)) ||
+ (el == EL1 && cptr.tcpac && ELIs64(tc, EL3) &&
+ (!hcr.nv2 || hcr.nv1 || !hcr.nv))) ;
+ break;
case MISCREG_CPACR_EL1:
- if (el == EL1 || el == EL2) {
- trap_to_mon = cptr.tcpac;
- }
+ trap_to_mon = el <= EL2 && cptr.tcpac && ELIs64(tc, EL3);
break;
case MISCREG_CPTR_EL2:
if (el == EL2) {
trap_to_mon = cptr.tcpac;
}
break;
+// case MISCREG_LORC_EL1:
+// case MISCREG_LOREA_EL1:
+// case MISCREG_LORID_EL1:
+// case MISCREG_LORN_EL1:
+// case MISCREG_LORSA_EL1:
+// trap_to_mon = (el <= EL2) && scr.ns && ELIs64(tc,EL3)
+// && hcr.tlor && (!hcr.e2h || (hcr.e2h && !hcr.tge));
+// break;
+ case MISCREG_MDCCSR_EL0:
+ trap_to_mon = (el <= EL2) && ELIs64(tc, EL3) && mdcr.tda == 0x1;
+ break;
case MISCREG_APDAKeyHi_EL1:
case MISCREG_APDAKeyLo_EL1:
case MISCREG_APDBKeyHi_EL1:
case MISCREG_APIAKeyLo_EL1:
case MISCREG_APIBKeyHi_EL1:
case MISCREG_APIBKeyLo_EL1:
- trap_to_mon = (el==EL1 || el==EL2) && scr.apk==0 && ELIs64(tc, EL3);
+ trap_to_mon = (el == EL1 || el == EL2) && scr.apk == 0 &&
+ ELIs64(tc, EL3);
break;
// Generic Timer
case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2:
trap_to_mon = el == EL1 &&
isGenericTimerSystemAccessTrapEL3(misc_reg, tc);
break;
+ case MISCREG_DBGBVR0_EL1:
+ case MISCREG_DBGBVR1_EL1:
+ case MISCREG_DBGBVR2_EL1:
+ case MISCREG_DBGBVR3_EL1:
+ case MISCREG_DBGBVR4_EL1:
+ case MISCREG_DBGBVR5_EL1:
+ case MISCREG_DBGBVR6_EL1:
+ case MISCREG_DBGBVR7_EL1:
+ case MISCREG_DBGBVR8_EL1:
+ case MISCREG_DBGBVR9_EL1:
+ case MISCREG_DBGBVR10_EL1:
+ case MISCREG_DBGBVR11_EL1:
+ case MISCREG_DBGBVR12_EL1:
+ case MISCREG_DBGBVR13_EL1:
+ case MISCREG_DBGBVR14_EL1:
+ case MISCREG_DBGBVR15_EL1:
+ case MISCREG_DBGBCR0_EL1:
+ case MISCREG_DBGBCR1_EL1:
+ case MISCREG_DBGBCR2_EL1:
+ case MISCREG_DBGBCR3_EL1:
+ case MISCREG_DBGBCR4_EL1:
+ case MISCREG_DBGBCR5_EL1:
+ case MISCREG_DBGBCR6_EL1:
+ case MISCREG_DBGBCR7_EL1:
+ case MISCREG_DBGBCR8_EL1:
+ case MISCREG_DBGBCR9_EL1:
+ case MISCREG_DBGBCR10_EL1:
+ case MISCREG_DBGBCR11_EL1:
+ case MISCREG_DBGBCR12_EL1:
+ case MISCREG_DBGBCR13_EL1:
+ case MISCREG_DBGBCR14_EL1:
+ case MISCREG_DBGBCR15_EL1:
+ case MISCREG_DBGVCR32_EL2:
+ case MISCREG_DBGWVR0_EL1:
+ case MISCREG_DBGWVR1_EL1:
+ case MISCREG_DBGWVR2_EL1:
+ case MISCREG_DBGWVR3_EL1:
+ case MISCREG_DBGWVR4_EL1:
+ case MISCREG_DBGWVR5_EL1:
+ case MISCREG_DBGWVR6_EL1:
+ case MISCREG_DBGWVR7_EL1:
+ case MISCREG_DBGWVR8_EL1:
+ case MISCREG_DBGWVR9_EL1:
+ case MISCREG_DBGWVR10_EL1:
+ case MISCREG_DBGWVR11_EL1:
+ case MISCREG_DBGWVR12_EL1:
+ case MISCREG_DBGWVR13_EL1:
+ case MISCREG_DBGWVR14_EL1:
+ case MISCREG_DBGWVR15_EL1:
+ case MISCREG_DBGWCR0_EL1:
+ case MISCREG_DBGWCR1_EL1:
+ case MISCREG_DBGWCR2_EL1:
+ case MISCREG_DBGWCR3_EL1:
+ case MISCREG_DBGWCR4_EL1:
+ case MISCREG_DBGWCR5_EL1:
+ case MISCREG_DBGWCR6_EL1:
+ case MISCREG_DBGWCR7_EL1:
+ case MISCREG_DBGWCR8_EL1:
+ case MISCREG_DBGWCR9_EL1:
+ case MISCREG_DBGWCR10_EL1:
+ case MISCREG_DBGWCR11_EL1:
+ case MISCREG_DBGWCR12_EL1:
+ case MISCREG_DBGWCR13_EL1:
+ case MISCREG_DBGWCR14_EL1:
+ case MISCREG_DBGWCR15_EL1:
+ case MISCREG_MDCCINT_EL1:
+ case MISCREG_MDCR_EL2:
+ trap_to_mon = ELIs64(tc, EL3) && mdcr.tda && (el == EL2);
+ break;
+ case MISCREG_ZCR_EL1:
+ trap_to_mon = !cptr.ez && ((el == EL3) ||
+ ((el <= EL2) && ArmSystem::haveEL(tc,EL3) && ELIs64(tc, EL3)));
+ ec = EC_TRAPPED_SVE;
+ immediate = 0;
+ break;
+ case MISCREG_ZCR_EL2:
+ trap_to_mon = !cptr.ez && ((el == EL3) ||
+ ((el == EL2) && ArmSystem::haveEL(tc,EL3) && ELIs64(tc, EL3)));
+ ec = EC_TRAPPED_SVE;
+ immediate = 0;
+ break;
+ case MISCREG_ZCR_EL3:
+ trap_to_mon = !cptr.ez && (el == EL3);
+ ec = EC_TRAPPED_SVE;
+ immediate = 0;
+ break;
default:
break;
}
Fault
ArmStaticInst::checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const
{
- if (ArmSystem::haveVirtualization(tc) && !inSecureState(tc)) {
- HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL2);
- if (cptrEnCheck.tfp)
+ if (currEL(tc) <= EL2 && EL2Enabled(tc)) {
+ bool trap_el2 = false;
+ CPTR cptr_en_check = tc->readMiscReg(MISCREG_CPTR_EL2);
+ HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+ if (HaveVirtHostExt(tc) && hcr.e2h == 0x1) {
+ switch (cptr_en_check.fpen) {
+ case 0:
+ case 2:
+ trap_el2 = !(currEL(tc) == EL1 && hcr.tge == 1);
+ break;
+ case 1:
+ trap_el2 = (currEL(tc) == EL0 && hcr.tge == 1);
+ break;
+ default:
+ trap_el2 = false;
+ break;
+ }
+ } else if (cptr_en_check.tfp) {
+ trap_el2 = true;
+ }
+
+ if (trap_el2) {
return advSIMDFPAccessTrap64(EL2);
+ }
}
if (ArmSystem::haveSecurity(tc)) {
- HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
- if (cptrEnCheck.tfp)
+ CPTR cptr_en_check = tc->readMiscReg(MISCREG_CPTR_EL3);
+ if (cptr_en_check.tfp) {
return advSIMDFPAccessTrap64(EL3);
+ }
}
return NoFault;
}
if (have_security && ELIs64(tc, EL3)) {
- HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
- if (cptrEnCheck.tfp)
+ HCPTR cptr_en_check = tc->readMiscReg(MISCREG_CPTR_EL3);
+ if (cptr_en_check.tfp)
return advSIMDFPAccessTrap64(EL3);
}
// Check if access disabled in CPTR_EL2
if (el <= EL2 && EL2Enabled(tc)) {
CPTR cptr_en_check = tc->readMiscReg(MISCREG_CPTR_EL2);
- if (cptr_en_check.tz)
- return sveAccessTrap(EL2);
- if (cptr_en_check.tfp)
- return advSIMDFPAccessTrap64(EL2);
+ HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+ if (HaveVirtHostExt(tc) && hcr.e2h) {
+ if (((cptr_en_check.zen & 0x1) == 0x0) ||
+ (cptr_en_check.zen == 0x1 && el == EL0 &&
+ hcr.tge == 0x1)) {
+ return sveAccessTrap(EL2);
+ }
+ if (((cptr_en_check.fpen & 0x1) == 0x0) ||
+ (cptr_en_check.fpen == 0x1 && el == EL0 &&
+ hcr.tge == 0x1)) {
+ return advSIMDFPAccessTrap64(EL2);
+ }
+ } else {
+ if (cptr_en_check.tz == 1)
+ return sveAccessTrap(EL2);
+ if (cptr_en_check.tfp == 1)
+ return advSIMDFPAccessTrap64(EL2);
+ }
}
// Check if access disabled in CPTR_EL3
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ bool no_vhe = !HaveVirtHostExt(tc);
+ bool amo, fmo, imo;
+ if (hcr.tge == 1){
+ amo = (no_vhe || hcr.e2h == 0);
+ fmo = (no_vhe || hcr.e2h == 0);
+ imo = (no_vhe || hcr.e2h == 0);
+ } else {
+ amo = hcr.amo;
+ fmo = hcr.fmo;
+ imo = hcr.imo;
+ }
+
bool isHypMode = currEL(tc) == EL2;
bool isSecure = inSecureState(tc);
- bool allowVIrq = !cpsr.i && hcr.imo && !isSecure && !isHypMode;
- bool allowVFiq = !cpsr.f && hcr.fmo && !isSecure && !isHypMode;
- bool allowVAbort = !cpsr.a && hcr.amo && !isSecure && !isHypMode;
+ bool allowVIrq = !cpsr.i && imo && !isSecure && !isHypMode;
+ bool allowVFiq = !cpsr.f && fmo && !isSecure && !isHypMode;
+ bool allowVAbort = !cpsr.a && amo && !isSecure && !isHypMode;
if ( !(intStatus || (hcr.vi && allowVIrq) || (hcr.vf && allowVFiq) ||
(hcr.va && allowVAbort)) )
HCR hcr = tc->readMiscReg(MISCREG_HCR);
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ bool no_vhe = !HaveVirtHostExt(tc);
+ bool amo, fmo, imo;
+ if (hcr.tge == 1){
+ amo = (no_vhe || hcr.e2h == 0);
+ fmo = (no_vhe || hcr.e2h == 0);
+ imo = (no_vhe || hcr.e2h == 0);
+ } else {
+ amo = hcr.amo;
+ fmo = hcr.fmo;
+ imo = hcr.imo;
+ }
+
// Calculate a few temp vars so we can work out if there's a pending
// virtual interrupt, and if its allowed to happen
// ARM ARM Issue C section B1.9.9, B1.9.11, and B1.9.13
bool isHypMode = currEL(tc) == EL2;
bool isSecure = inSecureState(tc);
- bool allowVIrq = !cpsr.i && hcr.imo && !isSecure && !isHypMode;
- bool allowVFiq = !cpsr.f && hcr.fmo && !isSecure && !isHypMode;
- bool allowVAbort = !cpsr.a && hcr.amo && !isSecure && !isHypMode;
+ bool allowVIrq = !cpsr.i && imo && !isSecure && !isHypMode;
+ bool allowVFiq = !cpsr.f && fmo && !isSecure && !isHypMode;
+ bool allowVAbort = !cpsr.a && amo && !isSecure && !isHypMode;
bool take_irq = takeInt(INT_IRQ);
bool take_fiq = takeInt(INT_FIQ);
miscRegName[misc_reg]);
}
#endif
+ misc_reg = redirectRegVHE(tc, misc_reg);
switch (unflattenMiscReg(misc_reg)) {
case MISCREG_HCR:
miscRegName[misc_reg], val);
}
#endif
+ misc_reg = redirectRegVHE(tc, misc_reg);
+
switch (unflattenMiscReg(misc_reg)) {
case MISCREG_CPACR:
{
}
// AArch64 TLB Invalidate All, EL1
case MISCREG_TLBI_ALLE1:
- case MISCREG_TLBI_VMALLE1:
case MISCREG_TLBI_VMALLS12E1:
// @todo: handle VMID and stage 2 to enable Virtualization
{
tlbiOp(tc);
return;
}
+ case MISCREG_TLBI_VMALLE1:
+ // @todo: handle VMID and stage 2 to enable Virtualization
+ {
+ assert64();
+ scr = readMiscReg(MISCREG_SCR);
+
+ HCR hcr = readMiscReg(MISCREG_HCR_EL2);
+ bool is_host = (hcr.tge && hcr.e2h);
+ ExceptionLevel target_el = is_host ? EL2 : EL1;
+ TLBIALL tlbiOp(target_el, haveSecurity && !scr.ns);
+ tlbiOp(tc);
+ return;
+ }
// AArch64 TLB Invalidate All, EL1, Inner Shareable
case MISCREG_TLBI_ALLE1IS:
- case MISCREG_TLBI_VMALLE1IS:
case MISCREG_TLBI_VMALLS12E1IS:
// @todo: handle VMID and stage 2 to enable Virtualization
{
tlbiOp.broadcast(tc);
return;
}
+ case MISCREG_TLBI_VMALLE1IS:
+ // @todo: handle VMID and stage 2 to enable Virtualization
+ {
+ assert64();
+ scr = readMiscReg(MISCREG_SCR);
+
+ HCR hcr = readMiscReg(MISCREG_HCR_EL2);
+ bool is_host = (hcr.tge && hcr.e2h);
+ ExceptionLevel target_el = is_host ? EL2 : EL1;
+ TLBIALL tlbiOp(target_el, haveSecurity && !scr.ns);
+ tlbiOp.broadcast(tc);
+ return;
+ }
// VAEx(IS) and VALEx(IS) are the same because TLBs
// only store entries
// from the last level of translation table walks
auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) :
bits(newVal, 55, 48);
- TLBIMVA tlbiOp(EL1, haveSecurity && !scr.ns,
+ HCR hcr = readMiscReg(MISCREG_HCR_EL2);
+ bool is_host = (hcr.tge && hcr.e2h);
+ ExceptionLevel target_el = is_host ? EL2 : EL1;
+ TLBIMVA tlbiOp(target_el, haveSecurity && !scr.ns,
static_cast<Addr>(bits(newVal, 43, 0)) << 12,
asid);
auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) :
bits(newVal, 55, 48);
- TLBIMVA tlbiOp(EL1, haveSecurity && !scr.ns,
- static_cast<Addr>(bits(newVal, 43, 0)) << 12,
- asid);
+ HCR hcr = readMiscReg(MISCREG_HCR_EL2);
+ bool is_host = (hcr.tge && hcr.e2h);
+ ExceptionLevel target_el = is_host ? EL2 : EL1;
+ TLBIMVA tlbiOp(target_el, haveSecurity && !scr.ns,
+ static_cast<Addr>(bits(newVal, 43, 0)) << 12,
+ asid);
tlbiOp.broadcast(tc);
return;
auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) :
bits(newVal, 55, 48);
- TLBIASID tlbiOp(EL1, haveSecurity && !scr.ns, asid);
+ HCR hcr = readMiscReg(MISCREG_HCR_EL2);
+ bool is_host = (hcr.tge && hcr.e2h);
+ ExceptionLevel target_el = is_host ? EL2 : EL1;
+ TLBIASID tlbiOp(target_el, haveSecurity && !scr.ns, asid);
tlbiOp(tc);
return;
}
auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) :
bits(newVal, 55, 48);
- TLBIASID tlbiOp(EL1, haveSecurity && !scr.ns, asid);
+ HCR hcr = readMiscReg(MISCREG_HCR_EL2);
+ bool is_host = (hcr.tge && hcr.e2h);
+ ExceptionLevel target_el = is_host ? EL2 : EL1;
+ TLBIASID tlbiOp(target_el, haveSecurity && !scr.ns, asid);
tlbiOp.broadcast(tc);
return;
}
assert64();
scr = readMiscReg(MISCREG_SCR);
- TLBIMVAA tlbiOp(EL1, haveSecurity && !scr.ns,
+ HCR hcr = readMiscReg(MISCREG_HCR_EL2);
+ bool is_host = (hcr.tge && hcr.e2h);
+ ExceptionLevel target_el = is_host ? EL2 : EL1;
+ TLBIMVAA tlbiOp(target_el, haveSecurity && !scr.ns,
static_cast<Addr>(bits(newVal, 43, 0)) << 12);
tlbiOp(tc);
assert64();
scr = readMiscReg(MISCREG_SCR);
- TLBIMVAA tlbiOp(EL1, haveSecurity && !scr.ns,
+ HCR hcr = readMiscReg(MISCREG_HCR_EL2);
+ bool is_host = (hcr.tge && hcr.e2h);
+ ExceptionLevel target_el = is_host ? EL2 : EL1;
+ TLBIMVAA tlbiOp(target_el, haveSecurity && !scr.ns,
static_cast<Addr>(bits(newVal, 43, 0)) << 12);
tlbiOp.broadcast(tc);
return flat_idx;
}
+ /**
+ * Returns the enconcing equivalent when VHE is implemented and
+ * HCR_EL2.E2H is enabled and executing at EL2
+ */
+ int
+ redirectRegVHE(ThreadContext * tc, int misc_reg)
+ {
+ const HCR hcr = readMiscRegNoEffect(MISCREG_HCR_EL2);
+ if (hcr.e2h == 0x0 || currEL(tc) != EL2)
+ return misc_reg;
+ SCR scr = readMiscRegNoEffect(MISCREG_SCR_EL3);
+ bool sec_el2 = scr.eel2 && false;
+ switch(misc_reg) {
+ case MISCREG_SPSR_EL1:
+ return MISCREG_SPSR_EL2;
+ case MISCREG_ELR_EL1:
+ return MISCREG_ELR_EL2;
+ case MISCREG_SCTLR_EL1:
+ return MISCREG_SCTLR_EL2;
+ case MISCREG_CPACR_EL1:
+ return MISCREG_CPTR_EL2;
+ // case :
+ // return MISCREG_TRFCR_EL2;
+ case MISCREG_TTBR0_EL1:
+ return MISCREG_TTBR0_EL2;
+ case MISCREG_TTBR1_EL1:
+ return MISCREG_TTBR1_EL2;
+ case MISCREG_TCR_EL1:
+ return MISCREG_TCR_EL2;
+ case MISCREG_AFSR0_EL1:
+ return MISCREG_AFSR0_EL2;
+ case MISCREG_AFSR1_EL1:
+ return MISCREG_AFSR1_EL2;
+ case MISCREG_ESR_EL1:
+ return MISCREG_ESR_EL2;
+ case MISCREG_FAR_EL1:
+ return MISCREG_FAR_EL2;
+ case MISCREG_MAIR_EL1:
+ return MISCREG_MAIR_EL2;
+ case MISCREG_AMAIR_EL1:
+ return MISCREG_AMAIR_EL2;
+ case MISCREG_VBAR_EL1:
+ return MISCREG_VBAR_EL2;
+ case MISCREG_CONTEXTIDR_EL1:
+ return MISCREG_CONTEXTIDR_EL2;
+ case MISCREG_CNTKCTL_EL1:
+ return MISCREG_CNTHCTL_EL2;
+ case MISCREG_CNTP_TVAL_EL0:
+ return sec_el2? MISCREG_CNTHPS_TVAL_EL2:
+ MISCREG_CNTHP_TVAL_EL2;
+ case MISCREG_CNTP_CTL_EL0:
+ return sec_el2? MISCREG_CNTHPS_CTL_EL2:
+ MISCREG_CNTHP_CTL_EL2;
+ case MISCREG_CNTP_CVAL_EL0:
+ return sec_el2? MISCREG_CNTHPS_CVAL_EL2:
+ MISCREG_CNTHP_CVAL_EL2;
+ case MISCREG_CNTV_TVAL_EL0:
+ return sec_el2? MISCREG_CNTHVS_TVAL_EL2:
+ MISCREG_CNTHV_TVAL_EL2;
+ case MISCREG_CNTV_CTL_EL0:
+ return sec_el2? MISCREG_CNTHVS_CTL_EL2:
+ MISCREG_CNTHV_CTL_EL2;
+ case MISCREG_CNTV_CVAL_EL0:
+ return sec_el2? MISCREG_CNTHVS_CVAL_EL2:
+ MISCREG_CNTHV_CVAL_EL2;
+ default:
+ return misc_reg;
+ }
+ /*should not be accessible */
+ return misc_reg;
+ }
+
int
snsBankedIndex64(MiscRegIndex reg, bool ns) const
{
# Add memory request flags where necessary
if self.user:
- self.memFlags.append("ArmISA::TLB::UserMode")
+ self.memFlags.append("userFlag")
if self.flavor == "dprefetch":
self.memFlags.append("Request::PREFETCH")
eaCode += self.offset
eaCode += ";"
+ if self.user:
+ eaCode += " uint8_t userFlag = 0;\n"\
+ " if(isUnpriviledgeAccess(xc->tcBase()))\n"\
+ " userFlag = ArmISA::TLB::UserMode;"
+
self.codeBlobs["ea_code"] = eaCode
def emitHelper(self, base='Memory64', wbDecl=None):
# Add memory request flags where necessary
if self.user:
- self.memFlags.append("ArmISA::TLB::UserMode")
+ self.memFlags.append("userFlag")
if self.flavor in ("relexp", "exp"):
# For exclusive pair ops alignment check is based on total size
eaCode += self.offset
eaCode += ";"
+ if self.user:
+ eaCode += " uint8_t userFlag = 0;\n"\
+ " if(isUnpriviledgeAccess(xc->tcBase()))\n"\
+ " userFlag = ArmISA::TLB::UserMode;"
self.codeBlobs["ea_code"] = eaCode
}
break;
case 5:
+ /* op0: 3 Crn:1 op1:5 */
switch (crm) {
+ case 0:
+ switch (op2) {
+ case 0:
+ return MISCREG_SCTLR_EL12;
+ case 2:
+ return MISCREG_CPACR_EL12;
+ }
+ break;
case 2:
switch (op2) {
case 0:
break;
}
break;
+ case 5:
+ /* op0: 3 Crn:2 op1:5 */
+ switch (crm) {
+ case 0:
+ switch (op2) {
+ case 0:
+ return MISCREG_TTBR0_EL12;
+ case 1:
+ return MISCREG_TTBR1_EL12;
+ case 2:
+ return MISCREG_TCR_EL12;
+ }
+ break;
+ }
+ break;
case 6:
switch (crm) {
case 0:
break;
}
break;
+ case 5:
+ switch (crm) {
+ case 0:
+ switch (op2) {
+ case 0:
+ return MISCREG_SPSR_EL12;
+ case 1:
+ return MISCREG_ELR_EL12;
+ }
+ break;
+ }
+ break;
case 6:
switch (crm) {
case 0:
break;
}
break;
+ case 5:
+ switch (crm) {
+ case 1:
+ switch (op2) {
+ case 0:
+ return MISCREG_AFSR0_EL12;
+ case 1:
+ return MISCREG_AFSR1_EL12;
+ }
+ break;
+ case 2:
+ switch (op2) {
+ case 0:
+ return MISCREG_ESR_EL12;
+ }
+ break;
+ }
+ break;
case 6:
switch (crm) {
case 1:
break;
}
break;
+ case 5:
+ switch (crm) {
+ case 0:
+ switch (op2) {
+ case 0:
+ return MISCREG_FAR_EL12;
+ }
+ break;
+ }
+ break;
case 6:
switch (crm) {
case 0:
break;
}
break;
+ case 5:
+ switch (crm) {
+ case 2:
+ switch (op2) {
+ case 0:
+ return MISCREG_MAIR_EL12;
+ }
+ break;
+ case 3:
+ switch (op2) {
+ case 0:
+ return MISCREG_AMAIR_EL12;
+ }
+ break;
+ }
+ break;
case 6:
switch (crm) {
case 2:
break;
}
break;
+ case 5:
+ switch (crm) {
+ case 0:
+ switch (op2) {
+ case 0:
+ return MISCREG_VBAR_EL12;
+ }
+ break;
+ }
+ break;
case 6:
switch (crm) {
case 0:
break;
}
break;
+ case 5:
+ switch (crm) {
+ case 0:
+ switch (op2) {
+ case 1:
+ return MISCREG_CONTEXTIDR_EL12;
+ }
+ break;
+ }
+ break;
case 6:
switch (crm) {
case 0:
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_DBGWCR15);
InitReg(MISCREG_MDCCSR_EL0)
- .allPrivileges().monSecureWrite(0).monNonSecureWrite(0)
+ .allPrivileges().writes(0)
+ //monSecureWrite(0).monNonSecureWrite(0)
.mapsTo(MISCREG_DBGDSCRint);
InitReg(MISCREG_MDDTR_EL0)
.allPrivileges();
| (nTLSMD ? 0 : 0x8000000)
| (LSMAOE ? 0 : 0x10000000))
.mapsTo(MISCREG_SCTLR_NS);
+ InitReg(MISCREG_SCTLR_EL12)
+ .allPrivileges().exceptUserMode()
+ .res0( 0x20440 | (EnDB ? 0 : 0x2000)
+ | (IESB ? 0 : 0x200000)
+ | (EnDA ? 0 : 0x8000000)
+ | (EnIB ? 0 : 0x40000000)
+ | (EnIA ? 0 : 0x80000000))
+ .res1(0x500800 | (SPAN ? 0 : 0x800000)
+ | (nTLSMD ? 0 : 0x8000000)
+ | (LSMAOE ? 0 : 0x10000000))
+ .mapsTo(MISCREG_SCTLR_EL1);
InitReg(MISCREG_ACTLR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_ACTLR_NS);
InitReg(MISCREG_CPACR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_CPACR);
+ InitReg(MISCREG_CPACR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_CPACR_EL1);
InitReg(MISCREG_SCTLR_EL2)
.hyp().mon()
.res0(0x0512c7c0 | (EnDB ? 0 : 0x2000)
InitReg(MISCREG_TTBR0_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_TTBR0_NS);
+ InitReg(MISCREG_TTBR0_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_TTBR0_EL1);
InitReg(MISCREG_TTBR1_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_TTBR1_NS);
+ InitReg(MISCREG_TTBR1_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_TTBR1_EL1);
InitReg(MISCREG_TCR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_TTBCR_NS);
+ InitReg(MISCREG_TCR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_TTBCR_NS);
InitReg(MISCREG_TTBR0_EL2)
.hyp().mon()
.mapsTo(MISCREG_HTTBR);
InitReg(MISCREG_SPSR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_SPSR_SVC); // NAM C5.2.17 SPSR_EL1
+ InitReg(MISCREG_SPSR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_SPSR_SVC);
InitReg(MISCREG_ELR_EL1)
.allPrivileges().exceptUserMode();
+ InitReg(MISCREG_ELR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_ELR_EL1);
InitReg(MISCREG_SP_EL0)
.allPrivileges().exceptUserMode();
InitReg(MISCREG_SPSEL)
InitReg(MISCREG_AFSR0_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_ADFSR_NS);
+ InitReg(MISCREG_AFSR0_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_ADFSR_NS);
InitReg(MISCREG_AFSR1_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_AIFSR_NS);
+ InitReg(MISCREG_AFSR1_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_AIFSR_NS);
InitReg(MISCREG_ESR_EL1)
.allPrivileges().exceptUserMode();
+ InitReg(MISCREG_ESR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_ESR_EL1);
InitReg(MISCREG_IFSR32_EL2)
.hyp().mon()
.mapsTo(MISCREG_IFSR_NS);
InitReg(MISCREG_FAR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_DFAR_NS, MISCREG_IFAR_NS);
+ InitReg(MISCREG_FAR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_DFAR_NS, MISCREG_IFAR_NS);
InitReg(MISCREG_FAR_EL2)
.hyp().mon()
.mapsTo(MISCREG_HDFAR, MISCREG_HIFAR);
InitReg(MISCREG_MAIR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_PRRR_NS, MISCREG_NMRR_NS);
+ InitReg(MISCREG_MAIR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_PRRR_NS, MISCREG_NMRR_NS);
InitReg(MISCREG_AMAIR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_AMAIR0_NS, MISCREG_AMAIR1_NS);
+ InitReg(MISCREG_AMAIR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_AMAIR0_NS, MISCREG_AMAIR1_NS);
InitReg(MISCREG_MAIR_EL2)
.hyp().mon()
.mapsTo(MISCREG_HMAIR0, MISCREG_HMAIR1);
InitReg(MISCREG_VBAR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_VBAR_NS);
+ InitReg(MISCREG_VBAR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_VBAR_NS);
InitReg(MISCREG_RVBAR_EL1)
.allPrivileges().exceptUserMode().writes(0);
InitReg(MISCREG_ISR_EL1)
InitReg(MISCREG_CONTEXTIDR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_CONTEXTIDR_NS);
+ InitReg(MISCREG_CONTEXTIDR_EL12)
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_CONTEXTIDR_NS);
InitReg(MISCREG_TPIDR_EL1)
.allPrivileges().exceptUserMode()
.mapsTo(MISCREG_TPIDRPRW_NS);
.hyp()
.res0(0xffffffff00000000)
.mapsTo(MISCREG_CNTHP_TVAL);
- // IF Armv8.1-VHE
+ InitReg(MISCREG_CNTHPS_CTL_EL2)
+ .mon()
+ .hyp()
+ .res0(0xfffffffffffffff8)
+ .unimplemented();
+ InitReg(MISCREG_CNTHPS_CVAL_EL2)
+ .mon()
+ .hyp()
+ .res0(0xfffffffffffffff8)
+ .unimplemented();
+ InitReg(MISCREG_CNTHPS_TVAL_EL2)
+ .mon()
+ .hyp()
+ .res0(0xfffffffffffffff8)
+ .unimplemented();
InitReg(MISCREG_CNTHV_CTL_EL2)
.mon()
.hyp()
.mon()
.hyp()
.res0(0xffffffff00000000);
+ InitReg(MISCREG_CNTHVS_CTL_EL2)
+ .mon()
+ .hyp()
+ .res0(0xfffffffffffffff8)
+ .unimplemented();
+ InitReg(MISCREG_CNTHVS_CVAL_EL2)
+ .mon()
+ .hyp()
+ .res0(0xfffffffffffffff8)
+ .unimplemented();
+ InitReg(MISCREG_CNTHVS_TVAL_EL2)
+ .mon()
+ .hyp()
+ .res0(0xfffffffffffffff8)
+ .unimplemented();
// ENDIF Armv8.1-VHE
InitReg(MISCREG_CNTVOFF_EL2)
.mon()
InitReg(MISCREG_ZCR_EL2)
.hyp().mon();
InitReg(MISCREG_ZCR_EL12)
- .unimplemented().warnNotFail();
+ .allPrivileges().exceptUserMode()
+ .mapsTo(MISCREG_ZCR_EL1);
InitReg(MISCREG_ZCR_EL1)
.allPrivileges().exceptUserMode();
MISCREG_VPIDR_EL2,
MISCREG_VMPIDR_EL2,
MISCREG_SCTLR_EL1,
+ MISCREG_SCTLR_EL12,
MISCREG_ACTLR_EL1,
MISCREG_CPACR_EL1,
+ MISCREG_CPACR_EL12,
MISCREG_SCTLR_EL2,
MISCREG_ACTLR_EL2,
MISCREG_HCR_EL2,
MISCREG_CPTR_EL3,
MISCREG_MDCR_EL3,
MISCREG_TTBR0_EL1,
+ MISCREG_TTBR0_EL12,
MISCREG_TTBR1_EL1,
+ MISCREG_TTBR1_EL12,
MISCREG_TCR_EL1,
+ MISCREG_TCR_EL12,
MISCREG_TTBR0_EL2,
MISCREG_TCR_EL2,
MISCREG_VTTBR_EL2,
MISCREG_TCR_EL3,
MISCREG_DACR32_EL2,
MISCREG_SPSR_EL1,
+ MISCREG_SPSR_EL12,
MISCREG_ELR_EL1,
+ MISCREG_ELR_EL12,
MISCREG_SP_EL0,
MISCREG_SPSEL,
MISCREG_CURRENTEL,
MISCREG_ELR_EL3,
MISCREG_SP_EL2,
MISCREG_AFSR0_EL1,
+ MISCREG_AFSR0_EL12,
MISCREG_AFSR1_EL1,
+ MISCREG_AFSR1_EL12,
MISCREG_ESR_EL1,
+ MISCREG_ESR_EL12,
MISCREG_IFSR32_EL2,
MISCREG_AFSR0_EL2,
MISCREG_AFSR1_EL2,
MISCREG_AFSR1_EL3,
MISCREG_ESR_EL3,
MISCREG_FAR_EL1,
+ MISCREG_FAR_EL12,
MISCREG_FAR_EL2,
MISCREG_HPFAR_EL2,
MISCREG_FAR_EL3,
MISCREG_PMUSERENR_EL0,
MISCREG_PMOVSSET_EL0,
MISCREG_MAIR_EL1,
+ MISCREG_MAIR_EL12,
MISCREG_AMAIR_EL1,
+ MISCREG_AMAIR_EL12,
MISCREG_MAIR_EL2,
MISCREG_AMAIR_EL2,
MISCREG_MAIR_EL3,
MISCREG_L2CTLR_EL1,
MISCREG_L2ECTLR_EL1,
MISCREG_VBAR_EL1,
+ MISCREG_VBAR_EL12,
MISCREG_RVBAR_EL1,
MISCREG_ISR_EL1,
MISCREG_VBAR_EL2,
MISCREG_RVBAR_EL3,
MISCREG_RMR_EL3,
MISCREG_CONTEXTIDR_EL1,
+ MISCREG_CONTEXTIDR_EL12,
MISCREG_TPIDR_EL1,
MISCREG_TPIDR_EL0,
MISCREG_TPIDRRO_EL0,
MISCREG_CNTHP_CTL_EL2,
MISCREG_CNTHP_CVAL_EL2,
MISCREG_CNTHP_TVAL_EL2,
+ MISCREG_CNTHPS_CTL_EL2,
+ MISCREG_CNTHPS_CVAL_EL2,
+ MISCREG_CNTHPS_TVAL_EL2,
// IF Armv8.1-VHE
MISCREG_CNTHV_CTL_EL2,
MISCREG_CNTHV_CVAL_EL2,
MISCREG_CNTHV_TVAL_EL2,
+ MISCREG_CNTHVS_CTL_EL2,
+ MISCREG_CNTHVS_CVAL_EL2,
+ MISCREG_CNTHVS_TVAL_EL2,
// ENDIF Armv8.1-VHE
MISCREG_CNTVOFF_EL2,
// END Generic Timer (AArch64)
"vpidr_el2",
"vmpidr_el2",
"sctlr_el1",
+ "sctlr_el12",
"actlr_el1",
"cpacr_el1",
+ "cpacr_el12",
"sctlr_el2",
"actlr_el2",
"hcr_el2",
"cptr_el3",
"mdcr_el3",
"ttbr0_el1",
+ "ttbr0_el12",
"ttbr1_el1",
+ "ttbr1_el12",
"tcr_el1",
+ "tcr_el12",
"ttbr0_el2",
"tcr_el2",
"vttbr_el2",
"tcr_el3",
"dacr32_el2",
"spsr_el1",
+ "spsr_el12",
"elr_el1",
+ "elr_el12",
"sp_el0",
"spsel",
"currentel",
"elr_el3",
"sp_el2",
"afsr0_el1",
+ "afsr0_el12",
"afsr1_el1",
+ "afsr1_el12",
"esr_el1",
+ "esr_el12",
"ifsr32_el2",
"afsr0_el2",
"afsr1_el2",
"afsr1_el3",
"esr_el3",
"far_el1",
+ "far_el12",
"far_el2",
"hpfar_el2",
"far_el3",
"pmuserenr_el0",
"pmovsset_el0",
"mair_el1",
+ "mair_el12",
"amair_el1",
+ "amair_el12",
"mair_el2",
"amair_el2",
"mair_el3",
"l2ctlr_el1",
"l2ectlr_el1",
"vbar_el1",
+ "vbar_el12",
"rvbar_el1",
"isr_el1",
"vbar_el2",
"rvbar_el3",
"rmr_el3",
"contextidr_el1",
+ "contextidr_el12",
"tpidr_el1",
"tpidr_el0",
"tpidrro_el0",
"cnthp_ctl_el2",
"cnthp_cval_el2",
"cnthp_tval_el2",
+ "cnthps_ctl_el2",
+ "cnthps_cval_el2",
+ "cnthps_tval_el2",
"cnthv_ctl_el2",
"cnthv_cval_el2",
"cnthv_tval_el2",
+ "cnthvs_ctl_el2",
+ "cnthvs_cval_el2",
+ "cnthvs_tval_el2",
"cntvoff_el2",
"pmevcntr0_el0",
"pmevcntr1_el0",
BitUnion32(CPTR)
Bitfield<31> tcpac;
+ Bitfield<30> tam;
+ Bitfield<28> tta_e2h;
+ Bitfield<21, 20> fpen;
Bitfield<20> tta;
+ Bitfield<17, 16> zen;
Bitfield<13, 12> res1_13_12_el2;
Bitfield<10> tfp;
Bitfield<9> res1_9_el2;
bool
match(Addr va, uint8_t _vmid, bool hypLookUp, bool secure_lookup,
- ExceptionLevel target_el) const
+ ExceptionLevel target_el, bool in_host) const
{
- return match(va, 0, _vmid, hypLookUp, secure_lookup, true, target_el);
+ return match(va, 0, _vmid, hypLookUp, secure_lookup, true,
+ target_el, in_host);
}
bool
match(Addr va, uint16_t asn, uint8_t _vmid, bool hypLookUp,
- bool secure_lookup, bool ignore_asn, ExceptionLevel target_el) const
+ bool secure_lookup, bool ignore_asn, ExceptionLevel target_el,
+ bool in_host) const
{
bool match = false;
Addr v = vpn << N;
-
if (valid && va >= v && va <= v + size && (secure_lookup == !nstid) &&
(hypLookUp == isHyp))
{
- match = checkELMatch(target_el);
+ match = checkELMatch(target_el, in_host);
if (match && !ignore_asn) {
match = global || (asn == asid);
}
bool
- checkELMatch(ExceptionLevel target_el) const
+ checkELMatch(ExceptionLevel target_el, bool in_host) const
{
- if (target_el == EL2 || target_el == EL3) {
- return (el == target_el);
- } else {
- return (el == EL0) || (el == EL1);
+ switch (target_el) {
+ case EL3:
+ return el == EL3;
+ case EL2:
+ {
+ return el == EL2 || (el == EL0 && in_host);
+ }
+ case EL1:
+ case EL0:
+ return (el == EL0) || (el == EL1);
+ default:
+ return false;
}
}
bool from_link)
{
bool v = false;
- switch (ctr.bt)
- {
- case 0x0:
- v = testAddrMatch(tc, pc, ctr.bas);
- break;
- case 0x1:
- v = testAddrMatch(tc, pc, ctr.bas); // linked
- if (v){
- v = (conf->getBrkPoint(ctr.lbn))->testLinkedBk(tc, pc, el);
- }
- break;
- case 0x2:
+ switch (ctr.bt) {
+ case 0x0:
+ v = testAddrMatch(tc, pc, ctr.bas);
+ break;
+
+ case 0x1:
+ v = testAddrMatch(tc, pc, ctr.bas); // linked
+ if (v) {
+ v = (conf->getBrkPoint(ctr.lbn))->testLinkedBk(tc, pc, el);
+ }
+ break;
+
+ case 0x2:
+ {
+ bool host = ELIsInHost(tc, el);
+ v = testContextMatch(tc, !host, true);
+ }
+ break;
+
+ case 0x3:
+ if (from_link){
+ bool host = ELIsInHost(tc, el);
+ v = testContextMatch(tc, !host, true);
+ }
+ break;
+
+ case 0x4:
+ v = testAddrMissMatch(tc, pc, ctr.bas);
+ break;
+
+ case 0x5:
+ v = testAddrMissMatch(tc, pc, ctr.bas); // linked
+ if (v && !from_link)
+ v = v && (conf->getBrkPoint(ctr.lbn))->testLinkedBk(tc, pc, el);
+ break;
+
+ case 0x6:
+ if (HaveVirtHostExt(tc) && !ELIsInHost(tc, el))
+ v = testContextMatch(tc, true);
+ break;
+
+ case 0x7:
+ if (HaveVirtHostExt(tc) && !ELIsInHost(tc, el) && from_link)
v = testContextMatch(tc, true);
- break;
- case 0x3:
- if (from_link){
- v = testContextMatch(tc, true); //linked
- }
- break;
- case 0x4:
- v = testAddrMissMatch(tc, pc, ctr.bas);
- break;
- case 0x5:
- v = testAddrMissMatch(tc, pc, ctr.bas); // linked
- if (v && !from_link)
- v = v && (conf->getBrkPoint(ctr.lbn))->testLinkedBk(tc,
- pc, el);
- break;
- case 0x6:
- // VHE not implemented
- // v = testContextMatch(tc, true);
- break;
- case 0x7:
- // VHE not implemented
- // if (from_link)
- // v = testContextMatch(tc, true);
- break;
- case 0x8:
+ break;
+
+ case 0x8:
+ if (ArmSystem::haveEL(tc, EL2) && !ELIsInHost(tc, el)) {
v = testVMIDMatch(tc);
- break;
- case 0x9:
- if (from_link && ArmSystem::haveEL(tc, EL2)){
- v = testVMIDMatch(tc); // linked
- }
- break;
- case 0xa:
- if (ArmSystem::haveEL(tc, EL2)){
- v = testContextMatch(tc, true);
- if (v && !from_link)
- v = v && testVMIDMatch(tc);
- }
- break;
- case 0xb:
- if (from_link && ArmSystem::haveEL(tc, EL2)){
- v = testContextMatch(tc, true);
- v = v && testVMIDMatch(tc);
- }
- break;
- case 0xc:
- // VHE not implemented
- // v = testContextMatch(tc, false); // CONTEXTIDR_EL2
- break;
- case 0xd:
- // VHE not implemented
- // if (from_link)
- // v = testContextMatch(tc, false);
- // CONTEXTIDR_EL2 AND LINKED
+ }
+ break;
- break;
- case 0xe:
- // VHE not implemented
- // v = testContextMatch(tc, true); // CONTEXTIDR_EL1
- // v = v && testContextMatch(tc, false); // CONTEXTIDR_EL2
- break;
- case 0xf:
- // VHE not implemented
- // if (from_link){
- // v = testContextMatch(tc, true); // CONTEXTIDR_EL1
- // v = v && testContextMatch(tc, false); // CONTEXTIDR_EL2
- // }
- break;
+ case 0x9:
+ if (from_link && ArmSystem::haveEL(tc, EL2) &&
+ !ELIsInHost(tc, el)) {
+ v = testVMIDMatch(tc);
+ }
+ break;
+
+ case 0xa:
+ if (ArmSystem::haveEL(tc, EL2) && !ELIsInHost(tc, el)) {
+ v = testContextMatch(tc, true);
+ if (v && !from_link)
+ v = v && testVMIDMatch(tc);
+ }
+ break;
+ case 0xb:
+ if (from_link && ArmSystem::haveEL(tc, EL2) &&
+ !ELIsInHost(tc, el)) {
+ v = testContextMatch(tc, true);
+ v = v && testVMIDMatch(tc);
+ }
+ break;
+
+ case 0xc:
+ if (HaveVirtHostExt(tc) && !inSecureState(tc))
+ v = testContextMatch(tc, false);
+ break;
+
+ case 0xd:
+ if (HaveVirtHostExt(tc) && from_link && !inSecureState(tc))
+ v = testContextMatch(tc, false);
+ break;
+
+ case 0xe:
+ if (HaveVirtHostExt(tc) && !ELIsInHost(tc, el)
+ && !inSecureState(tc) ) {
+ v = testContextMatch(tc, true); // CONTEXTIDR_EL1
+ v = v && testContextMatch(tc, false); // CONTEXTIDR_EL2
+ }
+ break;
+ case 0xf:
+ if (HaveVirtHostExt(tc) && !ELIsInHost(tc, el) && from_link
+ && !inSecureState(tc) ) {
+ v = testContextMatch(tc, true); // CONTEXTIDR_EL1
+ v = v && testContextMatch(tc, false); // CONTEXTIDR_EL2
+ }
+ break;
+ default:
+ break;
}
return v;
}
bool
BrkPoint::testContextMatch(ThreadContext *tc, bool ctx1)
+{
+ return testContextMatch(tc, ctx1, ctx1);
+}
+
+bool
+BrkPoint::testContextMatch(ThreadContext *tc, bool ctx1, bool low_ctx)
{
if (!isCntxtAware)
return false;
return false;
}
- RegVal ctxid = tc->readMiscReg(miscridx);
- RegVal v = getContextfromReg(tc, ctx1);
+ RegVal ctxid = bits(tc->readMiscReg(miscridx), 31, 0);
+ RegVal v = getContextfromReg(tc, low_ctx);
return (v == ctxid);
}
public:
bool testAddrMatch(ThreadContext *tc, Addr pc, uint8_t bas);
bool testAddrMissMatch(ThreadContext *tc, Addr pc, uint8_t bas);
+ bool testContextMatch(ThreadContext *tc, bool ctx1, bool low_ctx);
bool testContextMatch(ThreadContext *tc, bool ctx1);
bool testVMIDMatch(ThreadContext *tc);
currState->vaddr = currState->vaddr_tainted;
if (currState->aarch64) {
+ currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
if (isStage2) {
currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR_EL2);
} else switch (currState->el) {
case EL0:
+ if (HaveVirtHostExt(currState->tc) &&
+ currState->hcr.tge == 1 && currState->hcr.e2h ==1) {
+ currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
+ currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
+ } else {
+ currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
+ currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
+ }
+ break;
case EL1:
currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
panic("Invalid exception level");
break;
}
- currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
} else {
currState->sctlr = currState->tc->readMiscReg(snsBankedIndex(
MISCREG_SCTLR, currState->tc, !currState->isSecure));
// @TODO Should this always be the TLB or should we look in the stage2 TLB?
TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
currState->vmid, currState->isHyp, currState->isSecure, true, false,
- currState->el);
+ currState->el, false);
// Check if we still need to have a walk for this request. If the requesting
// instruction has been squashed, or a previous walk has filled the TLB with
currState = pendingQueue.front();
te = tlb->lookup(currState->vaddr, currState->asid,
currState->vmid, currState->isHyp, currState->isSecure, true,
- false, currState->el);
+ false, currState->el, false);
} else {
// Terminate the loop, nothing more to do
currState = NULL;
switch (currState->el) {
case EL0:
+ {
+ Addr ttbr0;
+ Addr ttbr1;
+ if (HaveVirtHostExt(currState->tc) &&
+ currState->hcr.tge==1 && currState->hcr.e2h == 1) {
+ // VHE code for EL2&0 regime
+ ttbr0 = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
+ ttbr1 = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
+ } else {
+ ttbr0 = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
+ ttbr1 = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
+ }
+ switch (bits(currState->vaddr, 63,48)) {
+ case 0:
+ DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
+ ttbr = ttbr0;
+ tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
+ tg = GrainMap_tg0[currState->tcr.tg0];
+ currState->hpd = currState->tcr.hpd0;
+ currState->isUncacheable = currState->tcr.irgn0 == 0;
+ if (bits(currState->vaddr, 63, tsz) != 0x0 ||
+ currState->tcr.epd0)
+ fault = true;
+ break;
+ case 0xffff:
+ DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
+ ttbr = ttbr1;
+ tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
+ tg = GrainMap_tg1[currState->tcr.tg1];
+ currState->hpd = currState->tcr.hpd1;
+ currState->isUncacheable = currState->tcr.irgn1 == 0;
+ if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
+ currState->tcr.epd1)
+ fault = true;
+ break;
+ default:
+ // top two bytes must be all 0s or all 1s, else invalid addr
+ fault = true;
+ }
+ ps = currState->tcr.ips;
+ }
+ break;
case EL1:
if (isStage2) {
DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n");
case EL2:
switch(bits(currState->vaddr, 63,48)) {
case 0:
- DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
+ DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
tg = GrainMap_tg0[currState->tcr.tg0];
break;
case 0xffff:
- DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
+ DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
tg = GrainMap_tg1[currState->tcr.tg1];
// invalid addr if top two bytes are not all 0s
fault = true;
}
- ps = currState->tcr.ps;
+ ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
break;
case EL3:
switch(bits(currState->vaddr, 63,48)) {
case 0:
- DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
+ DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
tg = GrainMap_tg0[currState->tcr.tg0];
uint8_t attrIndx = lDescriptor.attrIndx();
DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
+ ExceptionLevel regime = s1TranslationRegime(tc, currState->el);
// Select MAIR
uint64_t mair;
- switch (currState->el) {
+ switch (regime) {
case EL0:
case EL1:
mair = tc->readMiscReg(MISCREG_MAIR_EL1);
}
TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
- aarch64 ? aarch64EL : EL1);
+ aarch64 ? aarch64EL : EL1, false);
if (!e)
return false;
pa = e->pAddr(va);
TlbEntry*
TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
- bool functional, bool ignore_asn, ExceptionLevel target_el)
+ bool functional, bool ignore_asn, ExceptionLevel target_el,
+ bool in_host)
{
TlbEntry *retval = NULL;
int x = 0;
while (retval == NULL && x < size) {
if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
- target_el)) ||
- (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
+ target_el, in_host)) ||
+ (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el,
+ in_host))) {
// We only move the hit entry ahead when the position is higher
// than rangeMRU
if (x > rangeMRU && !functional) {
void
TLB::flushAllSecurity(bool secure_lookup, ExceptionLevel target_el,
- bool ignore_el)
+ bool ignore_el, bool in_host)
{
DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
(secure_lookup ? "secure" : "non-secure"));
while (x < size) {
te = &table[x];
const bool el_match = ignore_el ?
- true : te->checkELMatch(target_el);
-
+ true : te->checkELMatch(target_el, in_host);
if (te->valid && secure_lookup == !te->nstid &&
(te->vmid == vmid || secure_lookup) && el_match) {
// If there's a second stage TLB (and we're not it) then flush it as well
// if we're currently in hyp mode
if (!isStage2 && isHyp) {
- stage2Tlb->flushAllSecurity(secure_lookup, EL1, true);
+ stage2Tlb->flushAllSecurity(secure_lookup, EL1, true, false);
}
}
while (x < size) {
te = &table[x];
const bool el_match = ignore_el ?
- true : te->checkELMatch(target_el);
+ true : te->checkELMatch(target_el, false);
if (te->valid && te->nstid && te->isHyp == hyp && el_match) {
void
TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup,
- ExceptionLevel target_el)
+ ExceptionLevel target_el, bool in_host)
{
DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
"(%s lookup)\n", mva, asn, (secure_lookup ?
"secure" : "non-secure"));
- _flushMva(mva, asn, secure_lookup, false, target_el);
+ _flushMva(mva, asn, secure_lookup, false, target_el, in_host);
flushTlbMvaAsid++;
}
void
-TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el)
+TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el,
+ bool in_host)
{
DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
(secure_lookup ? "secure" : "non-secure"));
te = &table[x];
if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
(te->vmid == vmid || secure_lookup) &&
- te->checkELMatch(target_el)) {
+ te->checkELMatch(target_el, in_host)) {
te->valid = false;
DPRINTF(TLB, " - %s\n", te->print());
}
void
-TLB::flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el)
-{
+TLB::flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el,
+ bool in_host) {
+
DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
(secure_lookup ? "secure" : "non-secure"));
- _flushMva(mva, 0xbeef, secure_lookup, true, target_el);
+ _flushMva(mva, 0xbeef, secure_lookup, true, target_el, in_host);
flushTlbMva++;
}
void
TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup,
- bool ignore_asn, ExceptionLevel target_el)
+ bool ignore_asn, ExceptionLevel target_el, bool in_host)
{
TlbEntry *te;
// D5.7.2: Sign-extend address to 64 bits
bool hyp = target_el == EL2;
te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
- target_el);
+ target_el, in_host);
while (te != NULL) {
if (secure_lookup == !te->nstid) {
DPRINTF(TLB, " - %s\n", te->print());
flushedEntries++;
}
te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
- target_el);
+ target_el, in_host);
}
}
TLB::flushIpaVmid(Addr ipa, bool secure_lookup, ExceptionLevel target_el)
{
assert(!isStage2);
- stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, true, target_el);
+ stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, true, target_el, false);
}
void
uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
bool grant = false;
+ bool wxn = sctlr.wxn;
uint8_t xn = te->xn;
uint8_t pxn = te->pxn;
- bool r = !is_write && !is_fetch;
+ bool r = (!is_write && !is_fetch);
bool w = is_write;
bool x = is_fetch;
// generated the fault; they count as writes otherwise
bool grant_read = true;
DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
- "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
+ "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
+ pxn, r, w, x, is_priv, wxn);
if (isStage2) {
assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
grant_read = hap & 0x1;
if (is_fetch) {
// sctlr.wxn overrides the xn bit
- grant = !sctlr.wxn && !xn;
+ grant = !wxn && !xn;
+ } else if (is_atomic) {
+ grant = r && w;
+ grant_read = r;
} else if (is_write) {
grant = hap & 0x2;
} else { // is_read
break;
case 4:
case 5:
- grant = r || w || (x && !sctlr.wxn);
+ grant = r || w || (x && !wxn);
break;
case 6:
case 7:
switch (perm) {
case 0:
case 2:
- grant = r || w || (x && !sctlr.wxn);
+ grant = r || w || (x && !wxn);
break;
case 1:
case 3:
uint8_t perm = (ap & 0x2) | xn;
switch (perm) {
case 0:
- grant = r || w || (x && !sctlr.wxn) ;
+ grant = r || w || (x && !wxn);
break;
case 1:
grant = r || w;
// Set memory attributes
TlbEntry temp_te;
temp_te.ns = !isSecure;
- if (isStage2 || hcr.dc == 0 || isSecure ||
+ bool dc = (HaveVirtHostExt(tc)
+ && hcr.e2h == 1 && hcr.tge == 1) ? 0: hcr.dc;
+ bool i_cacheability = sctlr.i && !sctlr.m;
+ if (isStage2 || !dc || isSecure ||
(isHyp && !(tranType & S1CTran))) {
temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
: TlbEntry::MemoryType::StronglyOrdered;
- temp_te.innerAttrs = 0x0;
- temp_te.outerAttrs = 0x0;
+ temp_te.innerAttrs = i_cacheability? 0x2: 0x0;
+ temp_te.outerAttrs = i_cacheability? 0x2: 0x0;
temp_te.shareable = true;
temp_te.outerShareable = true;
} else {
if (isSecure && !te->ns) {
req->setFlags(Request::SECURE);
}
- if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
+ if (!is_fetch && fault == NoFault &&
+ (vaddr & mask(flags & AlignmentMask)) &&
(te->mtype != TlbEntry::MemoryType::Normal)) {
// Unaligned accesses to Device memory should always cause an
// abort regardless of sctlr.a
}
}
+ bool vm = hcr.vm;
+ if (HaveVirtHostExt(tc) && hcr.e2h == 1 && hcr.tge ==1)
+ vm = 0;
+ else if (hcr.dc == 1)
+ vm = 1;
+
Fault fault = NoFault;
// If guest MMU is off or hcr.vm=0 go straight to stage2
- if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
+ if ((isStage2 && !vm) || (!isStage2 && !sctlr.m)) {
fault = translateMmuOff(tc, req, mode, tranType, vaddr,
long_desc_format);
} else {
ELIs64(tc, EL2) :
ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
+ hcr = tc->readMiscReg(MISCREG_HCR_EL2);
if (aarch64) { // AArch64
// determine EL we need to translate in
switch (aarch64EL) {
case EL0:
+ if (HaveVirtHostExt(tc) && hcr.tge == 1 && hcr.e2h == 1) {
+ // VHE code for EL2&0 regime
+ sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
+ ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
+ uint64_t ttbr_asid = ttbcr.a1 ?
+ tc->readMiscReg(MISCREG_TTBR1_EL2) :
+ tc->readMiscReg(MISCREG_TTBR0_EL2);
+ asid = bits(ttbr_asid,
+ (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
+
+ } else {
+ sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
+ ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
+ uint64_t ttbr_asid = ttbcr.a1 ?
+ tc->readMiscReg(MISCREG_TTBR1_EL1) :
+ tc->readMiscReg(MISCREG_TTBR0_EL1);
+ asid = bits(ttbr_asid,
+ (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
+
+ }
+ break;
case EL1:
{
sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
case EL2:
sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
- asid = -1;
+ if (hcr.e2h == 1) {
+ // VHE code for EL2&0 regime
+ uint64_t ttbr_asid = ttbcr.a1 ?
+ tc->readMiscReg(MISCREG_TTBR1_EL2) :
+ tc->readMiscReg(MISCREG_TTBR0_EL2);
+ asid = bits(ttbr_asid,
+ (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
+ } else {
+ asid = -1;
+ }
break;
case EL3:
sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
asid = -1;
break;
}
- hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+
scr = tc->readMiscReg(MISCREG_SCR_EL3);
isPriv = aarch64EL != EL0;
if (haveVirtualization) {
- vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
+ vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
isHyp = aarch64EL == EL2;
isHyp |= tranType & HypMode;
isHyp &= (tranType & S1S2NsTran) == 0;
isHyp &= (tranType & S1CTran) == 0;
+
+ if (hcr.e2h == 1 && (aarch64EL == EL2
+ || (hcr.tge ==1 && aarch64EL == EL0))) {
+ isHyp = true;
+ directToStage2 = false;
+ stage2Req = false;
+ stage2DescReq = false;
+ } else {
// Work out if we should skip the first stage of translation and go
// directly to stage 2. This value is cached so we don't have to
// compute it for every translation.
- stage2Req = isStage2 ||
- (hcr.vm && !isHyp && !isSecure &&
- !(tranType & S1CTran) && (aarch64EL < EL2) &&
- !(tranType & S1E1Tran)); // <--- FIX THIS HACK
- stage2DescReq = isStage2 || (hcr.vm && !isHyp && !isSecure &&
- (aarch64EL < EL2));
- directToStage2 = !isStage2 && stage2Req && !sctlr.m;
+ bool vm = hcr.vm;
+ if (HaveVirtHostExt(tc) && hcr.e2h == 1 && hcr.tge == 1) {
+ vm = 0;
+ }
+
+ stage2Req = isStage2 ||
+ (vm && !isHyp && !isSecure &&
+ !(tranType & S1CTran) && (aarch64EL < EL2) &&
+ !(tranType & S1E1Tran)); // <--- FIX THIS HACK
+ stage2DescReq = isStage2 || (vm && !isHyp && !isSecure &&
+ (aarch64EL < EL2));
+ directToStage2 = !isStage2 && stage2Req && !sctlr.m;
+ }
} else {
vmid = 0;
isHyp = false;
} else {
vaddr = vaddr_tainted;
}
- *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
+ *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el,
+ false);
if (*te == NULL) {
if (req->isPrefetch()) {
// if the request is a prefetch don't attempt to fill the TLB or go
return fault;
}
- *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
+ *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false,
+ target_el, false);
if (!*te)
printTlb();
assert(*te);
*/
TlbEntry *lookup(Addr vpn, uint16_t asn, uint8_t vmid, bool hyp,
bool secure, bool functional,
- bool ignore_asn, ExceptionLevel target_el);
+ bool ignore_asn, ExceptionLevel target_el,
+ bool in_host);
virtual ~TLB();
* @param secure_lookup if the operation affects the secure world
*/
void flushAllSecurity(bool secure_lookup, ExceptionLevel target_el,
- bool ignore_el = false);
+ bool ignore_el = false, bool in_host = false);
/** Remove all entries in the non secure world, depending on whether they
* were allocated in hyp mode or not
*/
void flushAll() override
{
- flushAllSecurity(false, EL0, true);
- flushAllSecurity(true, EL0, true);
+ flushAllSecurity(false, EL0, true, false);
+ flushAllSecurity(true, EL0, true, false);
}
/** Remove any entries that match both a va and asn
* @param secure_lookup if the operation affects the secure world
*/
void flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup,
- ExceptionLevel target_el);
+ ExceptionLevel target_el, bool in_host = false);
/** Remove any entries that match the asn
* @param asn contextid/asn to flush on match
* @param secure_lookup if the operation affects the secure world
*/
void flushAsid(uint64_t asn, bool secure_lookup,
- ExceptionLevel target_el);
+ ExceptionLevel target_el, bool in_host = false);
/** Remove all entries that match the va regardless of asn
* @param mva address to flush from cache
* @param secure_lookup if the operation affects the secure world
*/
- void flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el);
+ void flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el,
+ bool in_host = false);
/**
* Invalidate all entries in the stage 2 TLB that match the given ipa
* @param asn contextid/asn to flush on match
* @param secure_lookup if the operation affects the secure world
* @param ignore_asn if the flush should ignore the asn
+ * @param in_host if hcr.e2h == 1 and hcr.tge == 1 for VHE.
*/
void _flushMva(Addr mva, uint64_t asn, bool secure_lookup,
- bool ignore_asn, ExceptionLevel target_el);
+ bool ignore_asn, ExceptionLevel target_el,
+ bool in_host);
public: /* Testing */
Fault testTranslation(const RequestPtr &req, Mode mode,
void
TLBIALL::operator()(ThreadContext* tc)
{
- getITBPtr(tc)->flushAllSecurity(secureLookup, targetEL);
- getDTBPtr(tc)->flushAllSecurity(secureLookup, targetEL);
+ HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+ bool in_host = (hcr.tge == 1 && hcr.e2h == 1);
+ getITBPtr(tc)->flushAllSecurity(secureLookup, targetEL, in_host);
+ getDTBPtr(tc)->flushAllSecurity(secureLookup, targetEL, in_host);
// If CheckerCPU is connected, need to notify it of a flush
CheckerCPU *checker = tc->getCheckerCpuPtr();
if (checker) {
getITBPtr(checker)->flushAllSecurity(secureLookup,
- targetEL);
+ targetEL, in_host);
getDTBPtr(checker)->flushAllSecurity(secureLookup,
- targetEL);
+ targetEL, in_host);
}
}
void
TLBIASID::operator()(ThreadContext* tc)
{
- getITBPtr(tc)->flushAsid(asid, secureLookup, targetEL);
- getDTBPtr(tc)->flushAsid(asid, secureLookup, targetEL);
+ HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+ bool in_host = (hcr.tge == 1 && hcr.e2h == 1);
+ getITBPtr(tc)->flushAsid(asid, secureLookup, targetEL, in_host);
+ getDTBPtr(tc)->flushAsid(asid, secureLookup, targetEL, in_host);
CheckerCPU *checker = tc->getCheckerCpuPtr();
if (checker) {
- getITBPtr(checker)->flushAsid(asid, secureLookup, targetEL);
- getDTBPtr(checker)->flushAsid(asid, secureLookup, targetEL);
+ getITBPtr(checker)->flushAsid(asid, secureLookup, targetEL, in_host);
+ getDTBPtr(checker)->flushAsid(asid, secureLookup, targetEL, in_host);
}
}
void
TLBIMVAA::operator()(ThreadContext* tc)
{
- getITBPtr(tc)->flushMva(addr, secureLookup, targetEL);
- getDTBPtr(tc)->flushMva(addr, secureLookup, targetEL);
+ HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+ bool in_host = (hcr.tge == 1 && hcr.e2h == 1);
+ getITBPtr(tc)->flushMva(addr, secureLookup, targetEL, in_host);
+ getDTBPtr(tc)->flushMva(addr, secureLookup, targetEL, in_host);
CheckerCPU *checker = tc->getCheckerCpuPtr();
if (checker) {
- getITBPtr(checker)->flushMva(addr, secureLookup, targetEL);
- getDTBPtr(checker)->flushMva(addr, secureLookup, targetEL);
+ getITBPtr(checker)->flushMva(addr, secureLookup, targetEL, in_host);
+ getDTBPtr(checker)->flushMva(addr, secureLookup, targetEL, in_host);
}
}
void
TLBIMVA::operator()(ThreadContext* tc)
{
+ HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+ bool in_host = (hcr.tge == 1 && hcr.e2h == 1);
getITBPtr(tc)->flushMvaAsid(addr, asid,
- secureLookup, targetEL);
+ secureLookup, targetEL, in_host);
getDTBPtr(tc)->flushMvaAsid(addr, asid,
- secureLookup, targetEL);
+ secureLookup, targetEL, in_host);
CheckerCPU *checker = tc->getCheckerCpuPtr();
if (checker) {
getITBPtr(checker)->flushMvaAsid(
- addr, asid, secureLookup, targetEL);
+ addr, asid, secureLookup, targetEL, in_host);
getDTBPtr(checker)->flushMvaAsid(
- addr, asid, secureLookup, targetEL);
+ addr, asid, secureLookup, targetEL, in_host);
}
}
return el;
else if (ArmSystem::haveEL(tc, EL3) && ELIs32(tc, EL3) && scr.ns == 0)
return EL3;
- else if (ArmSystem::haveVirtualization(tc) && ELIsInHost(tc, el))
+ else if (HaveVirtHostExt(tc) && ELIsInHost(tc, el))
return EL2;
else
return EL1;
ELIsInHost(ThreadContext *tc, ExceptionLevel el)
{
const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
- return ((IsSecureEL2Enabled(tc) || !isSecureBelowEL3(tc)) &&
+ return (ArmSystem::haveEL(tc, EL2) &&
+ (IsSecureEL2Enabled(tc) || !isSecureBelowEL3(tc)) &&
HaveVirtHostExt(tc) && !ELIs32(tc, EL2) && hcr.e2h == 1 &&
(el == EL2 || (el == EL0 && hcr.tge == 1)));
}
bool aarch32_below_el3 = (have_el3 && scr.rw == 0);
HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+ bool secEL2 = false;
bool aarch32_at_el1 = (aarch32_below_el3
- || (have_el2
- && !secure && hcr.rw == 0));
+ || (have_el2
+ && (secEL2 || !isSecureBelowEL3(tc))
+ && hcr.rw == 0 && !(hcr.e2h && hcr.tge
+ && HaveVirtHostExt(tc))));
// Only know if EL0 using AArch32 from PSTATE
if (el == EL0 && !aarch32_at_el1) {
case EL2:
{
TCR tcr = tc->readMiscReg(MISCREG_TCR_EL2);
- if (ArmSystem::haveVirtualization(tc) && ELIsInHost(tc, el)) {
+ if (HaveVirtHostExt(tc) && ELIsInHost(tc, el)) {
tbi = selbit? tcr.tbi1 : tcr.tbi0;
tbid = selbit? tcr.tbid1 : tcr.tbid0;
} else {
TCR tcr, bool isInstr)
{
bool selbit = bits(addr, 55);
-// TCR tcr = tc->readMiscReg(MISCREG_TCR_EL1);
int topbit = computeAddrTop(tc, selbit, isInstr, tcr, el);
if (topbit == 63) {
return (ok);
}
+bool
+isUnpriviledgeAccess(ThreadContext * tc)
+{
+ const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+ // NV Extension not implemented yet
+ bool have_nv_ext = false;
+ bool unpriv_el1 = currEL(tc) == EL1 &&
+ !(ArmSystem::haveVirtualization(tc) &&
+ have_nv_ext && hcr.nv == 1 && hcr.nv1 == 1);
+ bool unpriv_el2 = ArmSystem::haveEL(tc, EL2) && HaveVirtHostExt(tc) &&
+ currEL(tc) == EL2 && hcr.e2h == 1 && hcr.tge == 1;
+
+ // User Access override, or UAO not implemented yet.
+ bool user_access_override = false;
+ return (unpriv_el1 || unpriv_el2) && !user_access_override;
+}
+
bool
SPAlignmentCheckEnabled(ThreadContext* tc)
{
+ ExceptionLevel regime = s1TranslationRegime(tc, currEL(tc));
+
switch (currEL(tc)) {
case EL3:
return ((SCTLR) tc->readMiscReg(MISCREG_SCTLR_EL3)).sa;
case EL1:
return ((SCTLR) tc->readMiscReg(MISCREG_SCTLR_EL1)).sa;
case EL0:
- return ((SCTLR) tc->readMiscReg(MISCREG_SCTLR_EL1)).sa0;
+ {
+ SCTLR sc = (regime == EL2) ? tc->readMiscReg(MISCREG_SCTLR_EL2):
+ tc->readMiscReg(MISCREG_SCTLR_EL1);
+ return sc.sa0;
+ }
default:
panic("Invalid exception level");
break;
return isBigEndian64(tc) ? BigEndianByteOrder : LittleEndianByteOrder;
};
-}
+bool isUnpriviledgeAccess(ThreadContext * tc);
+}
#endif