From: Jordi Vaquero Date: Sat, 20 Jun 2020 12:22:03 +0000 (+0200) Subject: arch-arm: Implement ARM8.1-VHE feature X-Git-Tag: v20.1.0.0~379 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=980888eb81635dbca40b11fe557be3fb1da37573;p=gem5.git arch-arm: Implement ARM8.1-VHE feature This commit implemented the VHE feature in ARMv8. This consist in 3 parts 1. Register decl/init and register redirection from el1 to el2 miscregs.cc/hh miscregs_types.hh isa.cc utility.cc/hh 2. Definition of new EL2&0 translation regime. tlb.cc/hh table_walker.cc pagetable.hh tlbi_op.hh isa.cc ( for tlb invalidation functions) 3. Self Debug adaptation for VHE self_debug.cc 4. Effects on AMO/IMO/FMO interruptions faults.cc interrupts.hh JIRA: https://gem5.atlassian.net/browse/GEM5-682 Change-Id: I478389322c295b1ec560571071626373a8c2af61 Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/31177 Maintainer: Giacomo Travaglini Reviewed-by: Giacomo Travaglini Tested-by: kokoro --- diff --git a/src/arch/arm/ArmISA.py b/src/arch/arm/ArmISA.py index b2513f7cf..f581d4f1c 100644 --- a/src/arch/arm/ArmISA.py +++ b/src/arch/arm/ArmISA.py @@ -108,8 +108,8 @@ class ArmISA(BaseISA): # 4K | 64K | !16K | !BigEndEL0 | !SNSMem | !BigEnd | 8b ASID | 40b PA id_aa64mmfr0_el1 = Param.UInt64(0x0000000000f00002, "AArch64 Memory Model Feature Register 0") - # PAN | HPDS - id_aa64mmfr1_el1 = Param.UInt64(0x0000000000101000, + # PAN | HPDS | VHE + id_aa64mmfr1_el1 = Param.UInt64(0x0000000000101100, "AArch64 Memory Model Feature Register 1") id_aa64mmfr2_el1 = Param.UInt64(0x0000000000000000, "AArch64 Memory Model Feature Register 2") diff --git a/src/arch/arm/insts/misc64.cc b/src/arch/arm/insts/misc64.cc index f9f00f06a..cdf3ece11 100644 --- a/src/arch/arm/insts/misc64.cc +++ b/src/arch/arm/insts/misc64.cc @@ -179,169 +179,459 @@ MiscRegOp64::checkEL2Trap(ThreadContext *tc, const MiscRegIndex misc_reg, uint32_t &immediate) const { const CPTR cptr = tc->readMiscReg(MISCREG_CPTR_EL2); + const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); + const SCTLR sctlr2 = tc->readMiscReg(MISCREG_SCTLR_EL2); const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); - const CPSR cpsr = tc->readMiscReg(MISCREG_CPSR); + const HDCR mdcr = tc->readMiscReg(MISCREG_MDCR_EL3); bool trap_to_hyp = false; - if (!inSecureState(scr, cpsr) && (el != EL2)) { - switch (misc_reg) { - // FP/SIMD regs - case MISCREG_FPCR: - case MISCREG_FPSR: - case MISCREG_FPEXC32_EL2: - trap_to_hyp = cptr.tfp; + switch (misc_reg) { + case MISCREG_IMPDEF_UNIMPL: + trap_to_hyp = EL2Enabled(tc) && hcr.tidcp && el == EL1; + break; + // GICv3 regs + case MISCREG_ICC_SGI0R_EL1: + { + auto *isa = static_cast(tc->getIsaPtr()); + if (isa->haveGICv3CpuIfc()) + trap_to_hyp = EL2Enabled(tc) && hcr.fmo && el == EL1; + } + break; + case MISCREG_ICC_SGI1R_EL1: + case MISCREG_ICC_ASGI1R_EL1: + { + auto *isa = static_cast(tc->getIsaPtr()); + if (isa->haveGICv3CpuIfc()) + trap_to_hyp = EL2Enabled(tc) && hcr.imo && el == EL1; + } + break; + case MISCREG_FPCR: + case MISCREG_FPSR: + case MISCREG_FPEXC32_EL2: + { + bool from_el2 = (el == EL2) && (scr.ns || scr.eel2) && + ELIs64(tc,EL2) && + ((!hcr.e2h && cptr.tfp) || + (hcr.e2h && (cptr.fpen == 0x0 || + cptr.fpen == 0xa))); + bool from_el1 = (el == EL1) && hcr.nv && + (!hcr.e2h || (hcr.e2h && !hcr.tge)); + trap_to_hyp = from_el2 || from_el1; ec = EC_TRAPPED_SIMD_FP; immediate = 0x1E00000; - break; - // CPACR - case MISCREG_CPACR_EL1: - trap_to_hyp = cptr.tcpac && el == EL1; - break; - // Virtual memory control regs - case MISCREG_SCTLR_EL1: - case MISCREG_TTBR0_EL1: - case MISCREG_TTBR1_EL1: - case MISCREG_TCR_EL1: - case MISCREG_ESR_EL1: - case MISCREG_FAR_EL1: - case MISCREG_AFSR0_EL1: - case MISCREG_AFSR1_EL1: - case MISCREG_MAIR_EL1: - case MISCREG_AMAIR_EL1: - case MISCREG_CONTEXTIDR_EL1: - trap_to_hyp = - ((hcr.trvm && miscRead) || (hcr.tvm && !miscRead)) && - el == EL1; - break; - // TLB maintenance instructions - case MISCREG_TLBI_VMALLE1: - case MISCREG_TLBI_VAE1_Xt: - case MISCREG_TLBI_ASIDE1_Xt: - case MISCREG_TLBI_VAAE1_Xt: - case MISCREG_TLBI_VALE1_Xt: - case MISCREG_TLBI_VAALE1_Xt: - case MISCREG_TLBI_VMALLE1IS: - case MISCREG_TLBI_VAE1IS_Xt: - case MISCREG_TLBI_ASIDE1IS_Xt: - case MISCREG_TLBI_VAAE1IS_Xt: - case MISCREG_TLBI_VALE1IS_Xt: - case MISCREG_TLBI_VAALE1IS_Xt: - trap_to_hyp = hcr.ttlb && el == EL1; - break; - // Cache maintenance instructions to the point of unification - case MISCREG_IC_IVAU_Xt: - case MISCREG_ICIALLU: - case MISCREG_ICIALLUIS: - case MISCREG_DC_CVAU_Xt: - trap_to_hyp = hcr.tpu && el <= EL1; - break; - // Data/Unified cache maintenance instructions to the - // point of coherency - case MISCREG_DC_IVAC_Xt: - case MISCREG_DC_CIVAC_Xt: - case MISCREG_DC_CVAC_Xt: - trap_to_hyp = hcr.tpc && el <= EL1; - break; - // Data/Unified cache maintenance instructions by set/way - case MISCREG_DC_ISW_Xt: - case MISCREG_DC_CSW_Xt: - case MISCREG_DC_CISW_Xt: - trap_to_hyp = hcr.tsw && el == EL1; - break; - // ACTLR - case MISCREG_ACTLR_EL1: - trap_to_hyp = hcr.tacr && el == EL1; - break; - - case MISCREG_APDAKeyHi_EL1: - case MISCREG_APDAKeyLo_EL1: - case MISCREG_APDBKeyHi_EL1: - case MISCREG_APDBKeyLo_EL1: - case MISCREG_APGAKeyHi_EL1: - case MISCREG_APGAKeyLo_EL1: - case MISCREG_APIAKeyHi_EL1: - case MISCREG_APIAKeyLo_EL1: - case MISCREG_APIBKeyHi_EL1: - case MISCREG_APIBKeyLo_EL1: - trap_to_hyp = el==EL1 && hcr.apk == 0; - break; - // @todo: Trap implementation-dependent functionality based on - // hcr.tidcp + } + break; + case MISCREG_CPACR_EL1: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && cptr.tcpac; + break; + case MISCREG_SCTLR_EL1: + case MISCREG_TTBR0_EL1: + case MISCREG_TTBR1_EL1: + case MISCREG_TCR_EL1: + case MISCREG_ESR_EL1: + case MISCREG_FAR_EL1: + case MISCREG_AFSR0_EL1: + case MISCREG_AFSR1_EL1: + case MISCREG_MAIR_EL1: + case MISCREG_AMAIR_EL1: + case MISCREG_CONTEXTIDR_EL1: + { + bool tvm = miscRead? hcr.trvm: hcr.tvm; + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && tvm; + } + break; + case MISCREG_CPACR_EL12: + case MISCREG_SCTLR_EL12: + case MISCREG_TTBR0_EL12: + case MISCREG_TTBR1_EL12: + case MISCREG_TCR_EL12: + case MISCREG_ESR_EL12: + case MISCREG_FAR_EL12: + case MISCREG_AFSR0_EL12: + case MISCREG_AFSR1_EL12: + case MISCREG_MAIR_EL12: + case MISCREG_AMAIR_EL12: + case MISCREG_CONTEXTIDR_EL12: + case MISCREG_SPSR_EL12: + case MISCREG_ELR_EL12: + case MISCREG_VBAR_EL12: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && + (hcr.nv && (hcr.nv1 || !hcr.nv2)); + break; + case MISCREG_TLBI_VMALLE1: + case MISCREG_TLBI_VAE1_Xt: + case MISCREG_TLBI_ASIDE1_Xt: + case MISCREG_TLBI_VAAE1_Xt: + case MISCREG_TLBI_VALE1_Xt: + case MISCREG_TLBI_VAALE1_Xt: +// case MISCREG_TLBI_RVAE1: +// case MISCREG_TLBI_RVAAE1: +// case MISCREG_TLBI_RVALE1: +// case MISCREG_TLBI_RVAALE1: + case MISCREG_TLBI_VMALLE1IS: + case MISCREG_TLBI_VAE1IS_Xt: + case MISCREG_TLBI_ASIDE1IS_Xt: + case MISCREG_TLBI_VAAE1IS_Xt: + case MISCREG_TLBI_VALE1IS_Xt: + case MISCREG_TLBI_VAALE1IS_Xt: +// case MISCREG_TLBI_RVAE1IS: +// case MISCREG_TLBI_RVAAE1IS: +// case MISCREG_TLBI_RVALE1IS: +// case MISCREG_TLBI_RVAALE1IS: +// case MISCREG_TLBI_VMALLE1OS: +// case MISCREG_TLBI_VAE1OS: +// case MISCREG_TLBI_ASIDE1OS: +// case MISCREG_TLBI_VAAE1OS: +// case MISCREG_TLBI_VALE1OS: +// case MISCREG_TLBI_VAALE1OS: +// case MISCREG_TLBI_RVAE1OS: +// case MISCREG_TLBI_RVAAE1OS: +// case MISCREG_TLBI_RVALE1OS: +// case MISCREG_TLBI_RVAALE1OS: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.ttlb; + break; + case MISCREG_IC_IVAU_Xt: + case MISCREG_ICIALLU: + case MISCREG_ICIALLUIS: + trap_to_hyp = (el == EL1) && EL2Enabled(tc) && hcr.tpu; + break; + case MISCREG_DC_CVAU_Xt: + { + const bool el2_en = EL2Enabled(tc); + if (el == EL0 && el2_en) { + const bool in_host = hcr.e2h && hcr.tge; + const bool general_trap = el2_en && !in_host && hcr.tge && + !sctlr.uci; + const bool tpu_trap = el2_en && !in_host && hcr.tpu; + const bool host_trap = el2_en && in_host && !sctlr2.uci; + trap_to_hyp = general_trap || tpu_trap || host_trap; + } + else if (el == EL1 && el2_en) { + trap_to_hyp = hcr.tpu; + } + } + break; + case MISCREG_DC_IVAC_Xt: + trap_to_hyp = EL2Enabled(tc) && el == EL1 && hcr.tpc; + break; + case MISCREG_DC_CVAC_Xt: +// case MISCREG_DC_CVAP_Xt: + case MISCREG_DC_CIVAC_Xt: + { + const bool el2_en = EL2Enabled(tc); + if (el == EL0 && el2_en) { - // ID regs, group 3 - case MISCREG_ID_PFR0_EL1: - case MISCREG_ID_PFR1_EL1: - case MISCREG_ID_DFR0_EL1: - case MISCREG_ID_AFR0_EL1: - case MISCREG_ID_MMFR0_EL1: - case MISCREG_ID_MMFR1_EL1: - case MISCREG_ID_MMFR2_EL1: - case MISCREG_ID_MMFR3_EL1: - case MISCREG_ID_ISAR0_EL1: - case MISCREG_ID_ISAR1_EL1: - case MISCREG_ID_ISAR2_EL1: - case MISCREG_ID_ISAR3_EL1: - case MISCREG_ID_ISAR4_EL1: - case MISCREG_ID_ISAR5_EL1: - case MISCREG_MVFR0_EL1: - case MISCREG_MVFR1_EL1: - case MISCREG_MVFR2_EL1: - case MISCREG_ID_AA64PFR0_EL1: - case MISCREG_ID_AA64PFR1_EL1: - case MISCREG_ID_AA64DFR0_EL1: - case MISCREG_ID_AA64DFR1_EL1: - case MISCREG_ID_AA64ISAR0_EL1: - case MISCREG_ID_AA64ISAR1_EL1: - case MISCREG_ID_AA64MMFR0_EL1: - case MISCREG_ID_AA64MMFR1_EL1: - case MISCREG_ID_AA64MMFR2_EL1: - case MISCREG_ID_AA64AFR0_EL1: - case MISCREG_ID_AA64AFR1_EL1: - assert(miscRead); - trap_to_hyp = hcr.tid3 && el == EL1; - break; - // ID regs, group 2 - case MISCREG_CTR_EL0: - case MISCREG_CCSIDR_EL1: - case MISCREG_CLIDR_EL1: - case MISCREG_CSSELR_EL1: - trap_to_hyp = hcr.tid2 && el <= EL1; - break; - // ID regs, group 1 - case MISCREG_AIDR_EL1: - case MISCREG_REVIDR_EL1: - assert(miscRead); - trap_to_hyp = hcr.tid1 && el == EL1; - break; - case MISCREG_IMPDEF_UNIMPL: - trap_to_hyp = hcr.tidcp && el == EL1; - break; - // GICv3 regs - case MISCREG_ICC_SGI0R_EL1: - { - auto *isa = static_cast(tc->getIsaPtr()); - if (isa->haveGICv3CpuIfc()) - trap_to_hyp = hcr.fmo && el == EL1; + const bool in_host = hcr.e2h && hcr.tge; + const bool general_trap = el2_en && !in_host && hcr.tge && + !sctlr.uci; + const bool tpc_trap = el2_en && !in_host && hcr.tpc; + const bool host_trap = el2_en && in_host && !sctlr2.uci; + trap_to_hyp = general_trap || tpc_trap || host_trap; + } else if (el == EL1 && el2_en) { + trap_to_hyp = hcr.tpc; + } + } + break; + case MISCREG_DC_ISW_Xt: + case MISCREG_DC_CSW_Xt: + case MISCREG_DC_CISW_Xt: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.tsw; + break; + case MISCREG_ACTLR_EL1: + trap_to_hyp = EL2Enabled (tc) && (el == EL1) && hcr.tacr; + break; + case MISCREG_APDAKeyHi_EL1: + case MISCREG_APDAKeyLo_EL1: + case MISCREG_APDBKeyHi_EL1: + case MISCREG_APDBKeyLo_EL1: + case MISCREG_APGAKeyHi_EL1: + case MISCREG_APGAKeyLo_EL1: + case MISCREG_APIAKeyHi_EL1: + case MISCREG_APIAKeyLo_EL1: + case MISCREG_APIBKeyHi_EL1: + case MISCREG_APIBKeyLo_EL1: + trap_to_hyp = EL2Enabled(tc) && el == EL1 && !hcr.apk; + break; + case MISCREG_ID_PFR0_EL1: + case MISCREG_ID_PFR1_EL1: + //case MISCREG_ID_PFR2_EL1: + case MISCREG_ID_DFR0_EL1: + case MISCREG_ID_AFR0_EL1: + case MISCREG_ID_MMFR0_EL1: + case MISCREG_ID_MMFR1_EL1: + case MISCREG_ID_MMFR2_EL1: + case MISCREG_ID_MMFR3_EL1: + //case MISCREG_ID_MMFR4_EL1: + case MISCREG_ID_ISAR0_EL1: + case MISCREG_ID_ISAR1_EL1: + case MISCREG_ID_ISAR2_EL1: + case MISCREG_ID_ISAR3_EL1: + case MISCREG_ID_ISAR4_EL1: + case MISCREG_ID_ISAR5_EL1: + case MISCREG_MVFR0_EL1: + case MISCREG_MVFR1_EL1: + case MISCREG_MVFR2_EL1: + case MISCREG_ID_AA64PFR0_EL1: + case MISCREG_ID_AA64PFR1_EL1: + case MISCREG_ID_AA64DFR0_EL1: + case MISCREG_ID_AA64DFR1_EL1: + case MISCREG_ID_AA64ISAR0_EL1: + case MISCREG_ID_AA64ISAR1_EL1: + case MISCREG_ID_AA64MMFR0_EL1: + case MISCREG_ID_AA64MMFR1_EL1: + case MISCREG_ID_AA64MMFR2_EL1: + case MISCREG_ID_AA64AFR0_EL1: + case MISCREG_ID_AA64AFR1_EL1: + trap_to_hyp = EL2Enabled(tc) && el == EL1 && hcr.tid3; + break; + case MISCREG_CTR_EL0: + { + const bool el2_en = EL2Enabled(tc); + if (el == EL0 && el2_en) { + const bool in_host = hcr.e2h && hcr.tge; + const bool general_trap = el2_en && !in_host && hcr.tge && + !sctlr.uct; + const bool tid_trap = el2_en && !in_host && hcr.tid2; + const bool host_trap = el2_en && in_host && !sctlr2.uct; + trap_to_hyp = general_trap || tid_trap || host_trap; + } else if (el == EL1 && el2_en) { + trap_to_hyp = hcr.tid2; } - break; - case MISCREG_ICC_SGI1R_EL1: - case MISCREG_ICC_ASGI1R_EL1: - { - auto *isa = static_cast(tc->getIsaPtr()); - if (isa->haveGICv3CpuIfc()) - trap_to_hyp = hcr.imo && el == EL1; + } + break; + case MISCREG_CCSIDR_EL1: +// case MISCREG_CCSIDR2_EL1: + case MISCREG_CLIDR_EL1: + case MISCREG_CSSELR_EL1: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.tid2; + break; + case MISCREG_AIDR_EL1: + case MISCREG_REVIDR_EL1: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.tid1; + break; + // Generic Timer + case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2: + trap_to_hyp = el <= EL1 && + isGenericTimerSystemAccessTrapEL2(misc_reg, tc); + break; + case MISCREG_DAIF: + trap_to_hyp = EL2Enabled(tc) && el == EL0 && + (hcr.tge && (hcr.e2h || !sctlr.uma)); + break; + case MISCREG_SPSR_EL1: + case MISCREG_ELR_EL1: + case MISCREG_VBAR_EL1: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.nv1 && !hcr.nv2; + break; + case MISCREG_HCR_EL2: + case MISCREG_HSTR_EL2: + case MISCREG_SP_EL1: + case MISCREG_TPIDR_EL2: + case MISCREG_VTCR_EL2: + case MISCREG_VTTBR_EL2: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.nv && !hcr.nv2; + break; +// case MISCREG_AT_S1E1WP_Xt: +// case MISCREG_AT_S1E1RP_Xt: + case MISCREG_AT_S1E1R_Xt: + case MISCREG_AT_S1E1W_Xt: + case MISCREG_AT_S1E0W_Xt: + case MISCREG_AT_S1E0R_Xt: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.at; + break; + case MISCREG_ACTLR_EL2: + case MISCREG_AFSR0_EL2: + case MISCREG_AFSR1_EL2: + case MISCREG_AMAIR_EL2: + case MISCREG_CONTEXTIDR_EL2: + case MISCREG_CPTR_EL2: + case MISCREG_DACR32_EL2: + case MISCREG_ESR_EL2: + case MISCREG_FAR_EL2: + case MISCREG_HACR_EL2: + case MISCREG_HPFAR_EL2: + case MISCREG_MAIR_EL2: +// case MISCREG_RMR_EL2: + case MISCREG_SCTLR_EL2: + case MISCREG_TCR_EL2: + case MISCREG_TTBR0_EL2: + case MISCREG_TTBR1_EL2: + case MISCREG_VBAR_EL2: + case MISCREG_VMPIDR_EL2: + case MISCREG_VPIDR_EL2: + case MISCREG_TLBI_ALLE1: + case MISCREG_TLBI_ALLE1IS: +// case MISCREG_TLBI_ALLE1OS: + case MISCREG_TLBI_ALLE2: + case MISCREG_TLBI_ALLE2IS: +// case MISCREG_TLBI_ALLE2OS: + case MISCREG_TLBI_IPAS2E1_Xt: + case MISCREG_TLBI_IPAS2E1IS_Xt: +// case MISCREG_TLBI_IPAS2E1OS: + case MISCREG_TLBI_IPAS2LE1_Xt: + case MISCREG_TLBI_IPAS2LE1IS_Xt: +// case MISCREG_TLBI_IPAS2LE1OS: +// case MISCREG_TLBI_RIPAS2E1: +// case MISCREG_TLBI_RIPAS2E1IS: +// case MISCREG_TLBI_RIPAS2E1OS: +// case MISCREG_TLBI_RIPAS2LE1: +// case MISCREG_TLBI_RIPAS2LE1IS: +// case MISCREG_TLBI_RIPAS2LE1OS: +// case MISCREG_TLBI_RVAE2: +// case MISCREG_TLBI_RVAE2IS: +// case MISCREG_TLBI_RVAE2OS: +// case MISCREG_TLBI_RVALE2: +// case MISCREG_TLBI_RVALE2IS: +// case MISCREG_TLBI_RVALE2OS: + case MISCREG_TLBI_VAE2_Xt: + case MISCREG_TLBI_VAE2IS_Xt: +// case MISCREG_TLBI_VAE2OS: + case MISCREG_TLBI_VALE2_Xt: + case MISCREG_TLBI_VALE2IS_Xt: +// case MISCREG_TLBI_VALE2OS: + case MISCREG_TLBI_VMALLS12E1: + case MISCREG_TLBI_VMALLS12E1IS: +// case MISCREG_TLBI_VMALLS12E1OS: + case MISCREG_AT_S1E2W_Xt: + case MISCREG_AT_S1E2R_Xt: + case MISCREG_AT_S12E1R_Xt: + case MISCREG_AT_S12E1W_Xt: + case MISCREG_AT_S12E0W_Xt: + case MISCREG_AT_S12E0R_Xt: + case MISCREG_SPSR_UND: + case MISCREG_SPSR_IRQ: + case MISCREG_SPSR_FIQ: + case MISCREG_SPSR_ABT: + case MISCREG_SPSR_EL2: + case MISCREG_ELR_EL2: + case MISCREG_IFSR32_EL2: + case MISCREG_DBGVCR32_EL2: + case MISCREG_MDCR_EL2: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.nv; + break; +// case MISCREG_VSTTBR_EL2: +// case MISCREG_VSTCR_EL2: +// trap_to_hyp = (el == EL1) && !scr.ns && scr.eel2 && ELIs64(tc,EL2) +// && !hcr.nv2 && hcr.nv && (!hcr.e2h|| (hcr.e2h && !hcr.tge)); +// break; + + //case MISCREG_LORC_EL1: + //case MISCREG_LOREA_EL1: + //case MISCREG_LORID_EL1: + //case MISCREG_LORN_EL1: + //case MISCREG_LORSA_EL1: + // trap_to_hyp = (el == EL1) && (scr.ns || scr.eel2) && ELIs64(tc,EL2) + // && hcr.tlor && (!hcr.e2h || (hcr.e2h && !hcr.tge)); + // break; + + case MISCREG_DC_ZVA_Xt: + { + const bool el2_en = EL2Enabled(tc); + if (el == EL0 && el2_en) { + const bool in_host = hcr.e2h && hcr.tge; + const bool general_trap = el2_en && !in_host && hcr.tge && + !sctlr.dze; + const bool tdz_trap = el2_en && !in_host && hcr.tdz; + const bool host_trap = el2_en && in_host && !sctlr2.dze; + trap_to_hyp = general_trap || tdz_trap || host_trap; + } else if (el == EL1 && el2_en) { + trap_to_hyp = hcr.tdz; } - break; - // Generic Timer - case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2: - trap_to_hyp = el <= EL1 && - isGenericTimerSystemAccessTrapEL2(misc_reg, tc); - break; - default: - break; } + break; + case MISCREG_DBGBVR0_EL1: + case MISCREG_DBGBVR1_EL1: + case MISCREG_DBGBVR2_EL1: + case MISCREG_DBGBVR3_EL1: + case MISCREG_DBGBVR4_EL1: + case MISCREG_DBGBVR5_EL1: + case MISCREG_DBGBVR6_EL1: + case MISCREG_DBGBVR7_EL1: + case MISCREG_DBGBVR8_EL1: + case MISCREG_DBGBVR9_EL1: + case MISCREG_DBGBVR10_EL1: + case MISCREG_DBGBVR11_EL1: + case MISCREG_DBGBVR12_EL1: + case MISCREG_DBGBVR13_EL1: + case MISCREG_DBGBVR14_EL1: + case MISCREG_DBGBVR15_EL1: + case MISCREG_DBGBCR0_EL1: + case MISCREG_DBGBCR1_EL1: + case MISCREG_DBGBCR2_EL1: + case MISCREG_DBGBCR3_EL1: + case MISCREG_DBGBCR4_EL1: + case MISCREG_DBGBCR5_EL1: + case MISCREG_DBGBCR6_EL1: + case MISCREG_DBGBCR7_EL1: + case MISCREG_DBGBCR8_EL1: + case MISCREG_DBGBCR9_EL1: + case MISCREG_DBGBCR10_EL1: + case MISCREG_DBGBCR11_EL1: + case MISCREG_DBGBCR12_EL1: + case MISCREG_DBGBCR13_EL1: + case MISCREG_DBGBCR14_EL1: + case MISCREG_DBGBCR15_EL1: + case MISCREG_DBGWVR0_EL1: + case MISCREG_DBGWVR1_EL1: + case MISCREG_DBGWVR2_EL1: + case MISCREG_DBGWVR3_EL1: + case MISCREG_DBGWVR4_EL1: + case MISCREG_DBGWVR5_EL1: + case MISCREG_DBGWVR6_EL1: + case MISCREG_DBGWVR7_EL1: + case MISCREG_DBGWVR8_EL1: + case MISCREG_DBGWVR9_EL1: + case MISCREG_DBGWVR10_EL1: + case MISCREG_DBGWVR11_EL1: + case MISCREG_DBGWVR12_EL1: + case MISCREG_DBGWVR13_EL1: + case MISCREG_DBGWVR14_EL1: + case MISCREG_DBGWVR15_EL1: + case MISCREG_DBGWCR0_EL1: + case MISCREG_DBGWCR1_EL1: + case MISCREG_DBGWCR2_EL1: + case MISCREG_DBGWCR3_EL1: + case MISCREG_DBGWCR4_EL1: + case MISCREG_DBGWCR5_EL1: + case MISCREG_DBGWCR6_EL1: + case MISCREG_DBGWCR7_EL1: + case MISCREG_DBGWCR8_EL1: + case MISCREG_DBGWCR9_EL1: + case MISCREG_DBGWCR10_EL1: + case MISCREG_DBGWCR11_EL1: + case MISCREG_DBGWCR12_EL1: + case MISCREG_DBGWCR13_EL1: + case MISCREG_DBGWCR14_EL1: + case MISCREG_DBGWCR15_EL1: + case MISCREG_MDCCINT_EL1: + trap_to_hyp = EL2Enabled(tc) && (el == EL1) && mdcr.tda; + break; + case MISCREG_ZCR_EL1: + { + bool from_el1 = (el == EL1) && EL2Enabled(tc) && + ELIs64(tc, EL2) && ((!hcr.e2h && cptr.tz) || + (hcr.e2h && ((cptr.zen & 0x1) == 0x0))); + bool from_el2 = (el == EL2) && ((!hcr.e2h && cptr.tz) || + (hcr.e2h && ((cptr.zen & 0x1) == 0x0))); + trap_to_hyp = from_el1 || from_el2; + } + ec = EC_TRAPPED_SVE; + immediate = 0; + break; + case MISCREG_ZCR_EL2: + { + bool from_el1 = (el == EL1) && EL2Enabled(tc) && hcr.nv; + bool from_el2 = (el == EL2) && ((!hcr.e2h && cptr.tz) || + (hcr.e2h && ((cptr.zen & 0x1) == 0x0))); + trap_to_hyp = from_el1 || from_el2; + ec = from_el1 ? EC_TRAPPED_MSR_MRS_64: EC_TRAPPED_SVE; + } + immediate = 0; + break; + default: + break; } return trap_to_hyp; } @@ -353,6 +643,8 @@ MiscRegOp64::checkEL3Trap(ThreadContext *tc, const MiscRegIndex misc_reg, { const CPTR cptr = tc->readMiscReg(MISCREG_CPTR_EL3); const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + const HDCR mdcr = tc->readMiscReg(MISCREG_MDCR_EL3); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); bool trap_to_mon = false; switch (misc_reg) { @@ -360,21 +652,35 @@ MiscRegOp64::checkEL3Trap(ThreadContext *tc, const MiscRegIndex misc_reg, case MISCREG_FPCR: case MISCREG_FPSR: case MISCREG_FPEXC32_EL2: - trap_to_mon = cptr.tfp; + trap_to_mon = cptr.tfp && ELIs64(tc, EL3); ec = EC_TRAPPED_SIMD_FP; immediate = 0x1E00000; break; // CPACR, CPTR + case MISCREG_CPACR_EL12: + trap_to_mon = ((el == EL2 && cptr.tcpac && ELIs64(tc, EL3)) || + (el == EL1 && cptr.tcpac && ELIs64(tc, EL3) && + (!hcr.nv2 || hcr.nv1 || !hcr.nv))) ; + break; case MISCREG_CPACR_EL1: - if (el == EL1 || el == EL2) { - trap_to_mon = cptr.tcpac; - } + trap_to_mon = el <= EL2 && cptr.tcpac && ELIs64(tc, EL3); break; case MISCREG_CPTR_EL2: if (el == EL2) { trap_to_mon = cptr.tcpac; } break; +// case MISCREG_LORC_EL1: +// case MISCREG_LOREA_EL1: +// case MISCREG_LORID_EL1: +// case MISCREG_LORN_EL1: +// case MISCREG_LORSA_EL1: +// trap_to_mon = (el <= EL2) && scr.ns && ELIs64(tc,EL3) +// && hcr.tlor && (!hcr.e2h || (hcr.e2h && !hcr.tge)); +// break; + case MISCREG_MDCCSR_EL0: + trap_to_mon = (el <= EL2) && ELIs64(tc, EL3) && mdcr.tda == 0x1; + break; case MISCREG_APDAKeyHi_EL1: case MISCREG_APDAKeyLo_EL1: case MISCREG_APDBKeyHi_EL1: @@ -385,13 +691,100 @@ MiscRegOp64::checkEL3Trap(ThreadContext *tc, const MiscRegIndex misc_reg, case MISCREG_APIAKeyLo_EL1: case MISCREG_APIBKeyHi_EL1: case MISCREG_APIBKeyLo_EL1: - trap_to_mon = (el==EL1 || el==EL2) && scr.apk==0 && ELIs64(tc, EL3); + trap_to_mon = (el == EL1 || el == EL2) && scr.apk == 0 && + ELIs64(tc, EL3); break; // Generic Timer case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2: trap_to_mon = el == EL1 && isGenericTimerSystemAccessTrapEL3(misc_reg, tc); break; + case MISCREG_DBGBVR0_EL1: + case MISCREG_DBGBVR1_EL1: + case MISCREG_DBGBVR2_EL1: + case MISCREG_DBGBVR3_EL1: + case MISCREG_DBGBVR4_EL1: + case MISCREG_DBGBVR5_EL1: + case MISCREG_DBGBVR6_EL1: + case MISCREG_DBGBVR7_EL1: + case MISCREG_DBGBVR8_EL1: + case MISCREG_DBGBVR9_EL1: + case MISCREG_DBGBVR10_EL1: + case MISCREG_DBGBVR11_EL1: + case MISCREG_DBGBVR12_EL1: + case MISCREG_DBGBVR13_EL1: + case MISCREG_DBGBVR14_EL1: + case MISCREG_DBGBVR15_EL1: + case MISCREG_DBGBCR0_EL1: + case MISCREG_DBGBCR1_EL1: + case MISCREG_DBGBCR2_EL1: + case MISCREG_DBGBCR3_EL1: + case MISCREG_DBGBCR4_EL1: + case MISCREG_DBGBCR5_EL1: + case MISCREG_DBGBCR6_EL1: + case MISCREG_DBGBCR7_EL1: + case MISCREG_DBGBCR8_EL1: + case MISCREG_DBGBCR9_EL1: + case MISCREG_DBGBCR10_EL1: + case MISCREG_DBGBCR11_EL1: + case MISCREG_DBGBCR12_EL1: + case MISCREG_DBGBCR13_EL1: + case MISCREG_DBGBCR14_EL1: + case MISCREG_DBGBCR15_EL1: + case MISCREG_DBGVCR32_EL2: + case MISCREG_DBGWVR0_EL1: + case MISCREG_DBGWVR1_EL1: + case MISCREG_DBGWVR2_EL1: + case MISCREG_DBGWVR3_EL1: + case MISCREG_DBGWVR4_EL1: + case MISCREG_DBGWVR5_EL1: + case MISCREG_DBGWVR6_EL1: + case MISCREG_DBGWVR7_EL1: + case MISCREG_DBGWVR8_EL1: + case MISCREG_DBGWVR9_EL1: + case MISCREG_DBGWVR10_EL1: + case MISCREG_DBGWVR11_EL1: + case MISCREG_DBGWVR12_EL1: + case MISCREG_DBGWVR13_EL1: + case MISCREG_DBGWVR14_EL1: + case MISCREG_DBGWVR15_EL1: + case MISCREG_DBGWCR0_EL1: + case MISCREG_DBGWCR1_EL1: + case MISCREG_DBGWCR2_EL1: + case MISCREG_DBGWCR3_EL1: + case MISCREG_DBGWCR4_EL1: + case MISCREG_DBGWCR5_EL1: + case MISCREG_DBGWCR6_EL1: + case MISCREG_DBGWCR7_EL1: + case MISCREG_DBGWCR8_EL1: + case MISCREG_DBGWCR9_EL1: + case MISCREG_DBGWCR10_EL1: + case MISCREG_DBGWCR11_EL1: + case MISCREG_DBGWCR12_EL1: + case MISCREG_DBGWCR13_EL1: + case MISCREG_DBGWCR14_EL1: + case MISCREG_DBGWCR15_EL1: + case MISCREG_MDCCINT_EL1: + case MISCREG_MDCR_EL2: + trap_to_mon = ELIs64(tc, EL3) && mdcr.tda && (el == EL2); + break; + case MISCREG_ZCR_EL1: + trap_to_mon = !cptr.ez && ((el == EL3) || + ((el <= EL2) && ArmSystem::haveEL(tc,EL3) && ELIs64(tc, EL3))); + ec = EC_TRAPPED_SVE; + immediate = 0; + break; + case MISCREG_ZCR_EL2: + trap_to_mon = !cptr.ez && ((el == EL3) || + ((el == EL2) && ArmSystem::haveEL(tc,EL3) && ELIs64(tc, EL3))); + ec = EC_TRAPPED_SVE; + immediate = 0; + break; + case MISCREG_ZCR_EL3: + trap_to_mon = !cptr.ez && (el == EL3); + ec = EC_TRAPPED_SVE; + immediate = 0; + break; default: break; } diff --git a/src/arch/arm/insts/static_inst.cc b/src/arch/arm/insts/static_inst.cc index f18898790..0cbd77645 100644 --- a/src/arch/arm/insts/static_inst.cc +++ b/src/arch/arm/insts/static_inst.cc @@ -672,16 +672,37 @@ ArmStaticInst::advSIMDFPAccessTrap64(ExceptionLevel el) const Fault ArmStaticInst::checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const { - if (ArmSystem::haveVirtualization(tc) && !inSecureState(tc)) { - HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL2); - if (cptrEnCheck.tfp) + if (currEL(tc) <= EL2 && EL2Enabled(tc)) { + bool trap_el2 = false; + CPTR cptr_en_check = tc->readMiscReg(MISCREG_CPTR_EL2); + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + if (HaveVirtHostExt(tc) && hcr.e2h == 0x1) { + switch (cptr_en_check.fpen) { + case 0: + case 2: + trap_el2 = !(currEL(tc) == EL1 && hcr.tge == 1); + break; + case 1: + trap_el2 = (currEL(tc) == EL0 && hcr.tge == 1); + break; + default: + trap_el2 = false; + break; + } + } else if (cptr_en_check.tfp) { + trap_el2 = true; + } + + if (trap_el2) { return advSIMDFPAccessTrap64(EL2); + } } if (ArmSystem::haveSecurity(tc)) { - HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3); - if (cptrEnCheck.tfp) + CPTR cptr_en_check = tc->readMiscReg(MISCREG_CPTR_EL3); + if (cptr_en_check.tfp) { return advSIMDFPAccessTrap64(EL3); + } } return NoFault; @@ -768,8 +789,8 @@ ArmStaticInst::checkAdvSIMDOrFPEnabled32(ThreadContext *tc, } if (have_security && ELIs64(tc, EL3)) { - HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3); - if (cptrEnCheck.tfp) + HCPTR cptr_en_check = tc->readMiscReg(MISCREG_CPTR_EL3); + if (cptr_en_check.tfp) return advSIMDFPAccessTrap64(EL3); } @@ -1008,10 +1029,24 @@ ArmStaticInst::checkSveEnabled(ThreadContext *tc, CPSR cpsr, CPACR cpacr) const // Check if access disabled in CPTR_EL2 if (el <= EL2 && EL2Enabled(tc)) { CPTR cptr_en_check = tc->readMiscReg(MISCREG_CPTR_EL2); - if (cptr_en_check.tz) - return sveAccessTrap(EL2); - if (cptr_en_check.tfp) - return advSIMDFPAccessTrap64(EL2); + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + if (HaveVirtHostExt(tc) && hcr.e2h) { + if (((cptr_en_check.zen & 0x1) == 0x0) || + (cptr_en_check.zen == 0x1 && el == EL0 && + hcr.tge == 0x1)) { + return sveAccessTrap(EL2); + } + if (((cptr_en_check.fpen & 0x1) == 0x0) || + (cptr_en_check.fpen == 0x1 && el == EL0 && + hcr.tge == 0x1)) { + return advSIMDFPAccessTrap64(EL2); + } + } else { + if (cptr_en_check.tz == 1) + return sveAccessTrap(EL2); + if (cptr_en_check.tfp == 1) + return advSIMDFPAccessTrap64(EL2); + } } // Check if access disabled in CPTR_EL3 diff --git a/src/arch/arm/interrupts.hh b/src/arch/arm/interrupts.hh index 8e78f966a..99a149264 100644 --- a/src/arch/arm/interrupts.hh +++ b/src/arch/arm/interrupts.hh @@ -132,11 +132,23 @@ class Interrupts : public BaseInterrupts CPSR cpsr = tc->readMiscReg(MISCREG_CPSR); + bool no_vhe = !HaveVirtHostExt(tc); + bool amo, fmo, imo; + if (hcr.tge == 1){ + amo = (no_vhe || hcr.e2h == 0); + fmo = (no_vhe || hcr.e2h == 0); + imo = (no_vhe || hcr.e2h == 0); + } else { + amo = hcr.amo; + fmo = hcr.fmo; + imo = hcr.imo; + } + bool isHypMode = currEL(tc) == EL2; bool isSecure = inSecureState(tc); - bool allowVIrq = !cpsr.i && hcr.imo && !isSecure && !isHypMode; - bool allowVFiq = !cpsr.f && hcr.fmo && !isSecure && !isHypMode; - bool allowVAbort = !cpsr.a && hcr.amo && !isSecure && !isHypMode; + bool allowVIrq = !cpsr.i && imo && !isSecure && !isHypMode; + bool allowVFiq = !cpsr.f && fmo && !isSecure && !isHypMode; + bool allowVAbort = !cpsr.a && amo && !isSecure && !isHypMode; if ( !(intStatus || (hcr.vi && allowVIrq) || (hcr.vf && allowVFiq) || (hcr.va && allowVAbort)) ) @@ -219,14 +231,26 @@ class Interrupts : public BaseInterrupts HCR hcr = tc->readMiscReg(MISCREG_HCR); CPSR cpsr = tc->readMiscReg(MISCREG_CPSR); + bool no_vhe = !HaveVirtHostExt(tc); + bool amo, fmo, imo; + if (hcr.tge == 1){ + amo = (no_vhe || hcr.e2h == 0); + fmo = (no_vhe || hcr.e2h == 0); + imo = (no_vhe || hcr.e2h == 0); + } else { + amo = hcr.amo; + fmo = hcr.fmo; + imo = hcr.imo; + } + // Calculate a few temp vars so we can work out if there's a pending // virtual interrupt, and if its allowed to happen // ARM ARM Issue C section B1.9.9, B1.9.11, and B1.9.13 bool isHypMode = currEL(tc) == EL2; bool isSecure = inSecureState(tc); - bool allowVIrq = !cpsr.i && hcr.imo && !isSecure && !isHypMode; - bool allowVFiq = !cpsr.f && hcr.fmo && !isSecure && !isHypMode; - bool allowVAbort = !cpsr.a && hcr.amo && !isSecure && !isHypMode; + bool allowVIrq = !cpsr.i && imo && !isSecure && !isHypMode; + bool allowVFiq = !cpsr.f && fmo && !isSecure && !isHypMode; + bool allowVAbort = !cpsr.a && amo && !isSecure && !isHypMode; bool take_irq = takeInt(INT_IRQ); bool take_fiq = takeInt(INT_FIQ); diff --git a/src/arch/arm/isa.cc b/src/arch/arm/isa.cc index 82d936924..b71ee6384 100644 --- a/src/arch/arm/isa.cc +++ b/src/arch/arm/isa.cc @@ -507,6 +507,7 @@ ISA::readMiscReg(int misc_reg) miscRegName[misc_reg]); } #endif + misc_reg = redirectRegVHE(tc, misc_reg); switch (unflattenMiscReg(misc_reg)) { case MISCREG_HCR: @@ -859,6 +860,8 @@ ISA::setMiscReg(int misc_reg, RegVal val) miscRegName[misc_reg], val); } #endif + misc_reg = redirectRegVHE(tc, misc_reg); + switch (unflattenMiscReg(misc_reg)) { case MISCREG_CPACR: { @@ -1707,7 +1710,6 @@ ISA::setMiscReg(int misc_reg, RegVal val) } // AArch64 TLB Invalidate All, EL1 case MISCREG_TLBI_ALLE1: - case MISCREG_TLBI_VMALLE1: case MISCREG_TLBI_VMALLS12E1: // @todo: handle VMID and stage 2 to enable Virtualization { @@ -1718,9 +1720,21 @@ ISA::setMiscReg(int misc_reg, RegVal val) tlbiOp(tc); return; } + case MISCREG_TLBI_VMALLE1: + // @todo: handle VMID and stage 2 to enable Virtualization + { + assert64(); + scr = readMiscReg(MISCREG_SCR); + + HCR hcr = readMiscReg(MISCREG_HCR_EL2); + bool is_host = (hcr.tge && hcr.e2h); + ExceptionLevel target_el = is_host ? EL2 : EL1; + TLBIALL tlbiOp(target_el, haveSecurity && !scr.ns); + tlbiOp(tc); + return; + } // AArch64 TLB Invalidate All, EL1, Inner Shareable case MISCREG_TLBI_ALLE1IS: - case MISCREG_TLBI_VMALLE1IS: case MISCREG_TLBI_VMALLS12E1IS: // @todo: handle VMID and stage 2 to enable Virtualization { @@ -1731,6 +1745,19 @@ ISA::setMiscReg(int misc_reg, RegVal val) tlbiOp.broadcast(tc); return; } + case MISCREG_TLBI_VMALLE1IS: + // @todo: handle VMID and stage 2 to enable Virtualization + { + assert64(); + scr = readMiscReg(MISCREG_SCR); + + HCR hcr = readMiscReg(MISCREG_HCR_EL2); + bool is_host = (hcr.tge && hcr.e2h); + ExceptionLevel target_el = is_host ? EL2 : EL1; + TLBIALL tlbiOp(target_el, haveSecurity && !scr.ns); + tlbiOp.broadcast(tc); + return; + } // VAEx(IS) and VALEx(IS) are the same because TLBs // only store entries // from the last level of translation table walks @@ -1796,7 +1823,10 @@ ISA::setMiscReg(int misc_reg, RegVal val) auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) : bits(newVal, 55, 48); - TLBIMVA tlbiOp(EL1, haveSecurity && !scr.ns, + HCR hcr = readMiscReg(MISCREG_HCR_EL2); + bool is_host = (hcr.tge && hcr.e2h); + ExceptionLevel target_el = is_host ? EL2 : EL1; + TLBIMVA tlbiOp(target_el, haveSecurity && !scr.ns, static_cast(bits(newVal, 43, 0)) << 12, asid); @@ -1812,9 +1842,12 @@ ISA::setMiscReg(int misc_reg, RegVal val) auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) : bits(newVal, 55, 48); - TLBIMVA tlbiOp(EL1, haveSecurity && !scr.ns, - static_cast(bits(newVal, 43, 0)) << 12, - asid); + HCR hcr = readMiscReg(MISCREG_HCR_EL2); + bool is_host = (hcr.tge && hcr.e2h); + ExceptionLevel target_el = is_host ? EL2 : EL1; + TLBIMVA tlbiOp(target_el, haveSecurity && !scr.ns, + static_cast(bits(newVal, 43, 0)) << 12, + asid); tlbiOp.broadcast(tc); return; @@ -1828,7 +1861,10 @@ ISA::setMiscReg(int misc_reg, RegVal val) auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) : bits(newVal, 55, 48); - TLBIASID tlbiOp(EL1, haveSecurity && !scr.ns, asid); + HCR hcr = readMiscReg(MISCREG_HCR_EL2); + bool is_host = (hcr.tge && hcr.e2h); + ExceptionLevel target_el = is_host ? EL2 : EL1; + TLBIASID tlbiOp(target_el, haveSecurity && !scr.ns, asid); tlbiOp(tc); return; } @@ -1840,7 +1876,10 @@ ISA::setMiscReg(int misc_reg, RegVal val) auto asid = haveLargeAsid64 ? bits(newVal, 63, 48) : bits(newVal, 55, 48); - TLBIASID tlbiOp(EL1, haveSecurity && !scr.ns, asid); + HCR hcr = readMiscReg(MISCREG_HCR_EL2); + bool is_host = (hcr.tge && hcr.e2h); + ExceptionLevel target_el = is_host ? EL2 : EL1; + TLBIASID tlbiOp(target_el, haveSecurity && !scr.ns, asid); tlbiOp.broadcast(tc); return; } @@ -1853,7 +1892,10 @@ ISA::setMiscReg(int misc_reg, RegVal val) assert64(); scr = readMiscReg(MISCREG_SCR); - TLBIMVAA tlbiOp(EL1, haveSecurity && !scr.ns, + HCR hcr = readMiscReg(MISCREG_HCR_EL2); + bool is_host = (hcr.tge && hcr.e2h); + ExceptionLevel target_el = is_host ? EL2 : EL1; + TLBIMVAA tlbiOp(target_el, haveSecurity && !scr.ns, static_cast(bits(newVal, 43, 0)) << 12); tlbiOp(tc); @@ -1866,7 +1908,10 @@ ISA::setMiscReg(int misc_reg, RegVal val) assert64(); scr = readMiscReg(MISCREG_SCR); - TLBIMVAA tlbiOp(EL1, haveSecurity && !scr.ns, + HCR hcr = readMiscReg(MISCREG_HCR_EL2); + bool is_host = (hcr.tge && hcr.e2h); + ExceptionLevel target_el = is_host ? EL2 : EL1; + TLBIMVAA tlbiOp(target_el, haveSecurity && !scr.ns, static_cast(bits(newVal, 43, 0)) << 12); tlbiOp.broadcast(tc); diff --git a/src/arch/arm/isa.hh b/src/arch/arm/isa.hh index be57f4115..00c29bc77 100644 --- a/src/arch/arm/isa.hh +++ b/src/arch/arm/isa.hh @@ -684,6 +684,78 @@ namespace ArmISA return flat_idx; } + /** + * Returns the enconcing equivalent when VHE is implemented and + * HCR_EL2.E2H is enabled and executing at EL2 + */ + int + redirectRegVHE(ThreadContext * tc, int misc_reg) + { + const HCR hcr = readMiscRegNoEffect(MISCREG_HCR_EL2); + if (hcr.e2h == 0x0 || currEL(tc) != EL2) + return misc_reg; + SCR scr = readMiscRegNoEffect(MISCREG_SCR_EL3); + bool sec_el2 = scr.eel2 && false; + switch(misc_reg) { + case MISCREG_SPSR_EL1: + return MISCREG_SPSR_EL2; + case MISCREG_ELR_EL1: + return MISCREG_ELR_EL2; + case MISCREG_SCTLR_EL1: + return MISCREG_SCTLR_EL2; + case MISCREG_CPACR_EL1: + return MISCREG_CPTR_EL2; + // case : + // return MISCREG_TRFCR_EL2; + case MISCREG_TTBR0_EL1: + return MISCREG_TTBR0_EL2; + case MISCREG_TTBR1_EL1: + return MISCREG_TTBR1_EL2; + case MISCREG_TCR_EL1: + return MISCREG_TCR_EL2; + case MISCREG_AFSR0_EL1: + return MISCREG_AFSR0_EL2; + case MISCREG_AFSR1_EL1: + return MISCREG_AFSR1_EL2; + case MISCREG_ESR_EL1: + return MISCREG_ESR_EL2; + case MISCREG_FAR_EL1: + return MISCREG_FAR_EL2; + case MISCREG_MAIR_EL1: + return MISCREG_MAIR_EL2; + case MISCREG_AMAIR_EL1: + return MISCREG_AMAIR_EL2; + case MISCREG_VBAR_EL1: + return MISCREG_VBAR_EL2; + case MISCREG_CONTEXTIDR_EL1: + return MISCREG_CONTEXTIDR_EL2; + case MISCREG_CNTKCTL_EL1: + return MISCREG_CNTHCTL_EL2; + case MISCREG_CNTP_TVAL_EL0: + return sec_el2? MISCREG_CNTHPS_TVAL_EL2: + MISCREG_CNTHP_TVAL_EL2; + case MISCREG_CNTP_CTL_EL0: + return sec_el2? MISCREG_CNTHPS_CTL_EL2: + MISCREG_CNTHP_CTL_EL2; + case MISCREG_CNTP_CVAL_EL0: + return sec_el2? MISCREG_CNTHPS_CVAL_EL2: + MISCREG_CNTHP_CVAL_EL2; + case MISCREG_CNTV_TVAL_EL0: + return sec_el2? MISCREG_CNTHVS_TVAL_EL2: + MISCREG_CNTHV_TVAL_EL2; + case MISCREG_CNTV_CTL_EL0: + return sec_el2? MISCREG_CNTHVS_CTL_EL2: + MISCREG_CNTHV_CTL_EL2; + case MISCREG_CNTV_CVAL_EL0: + return sec_el2? MISCREG_CNTHVS_CVAL_EL2: + MISCREG_CNTHV_CVAL_EL2; + default: + return misc_reg; + } + /*should not be accessible */ + return misc_reg; + } + int snsBankedIndex64(MiscRegIndex reg, bool ns) const { diff --git a/src/arch/arm/isa/insts/ldr64.isa b/src/arch/arm/isa/insts/ldr64.isa index a2c1bae86..c9db19017 100644 --- a/src/arch/arm/isa/insts/ldr64.isa +++ b/src/arch/arm/isa/insts/ldr64.isa @@ -64,7 +64,7 @@ let {{ # Add memory request flags where necessary if self.user: - self.memFlags.append("ArmISA::TLB::UserMode") + self.memFlags.append("userFlag") if self.flavor == "dprefetch": self.memFlags.append("Request::PREFETCH") @@ -135,6 +135,11 @@ let {{ eaCode += self.offset eaCode += ";" + if self.user: + eaCode += " uint8_t userFlag = 0;\n"\ + " if(isUnpriviledgeAccess(xc->tcBase()))\n"\ + " userFlag = ArmISA::TLB::UserMode;" + self.codeBlobs["ea_code"] = eaCode def emitHelper(self, base='Memory64', wbDecl=None): diff --git a/src/arch/arm/isa/insts/str64.isa b/src/arch/arm/isa/insts/str64.isa index 7ad1cad2c..ebdad361f 100644 --- a/src/arch/arm/isa/insts/str64.isa +++ b/src/arch/arm/isa/insts/str64.isa @@ -62,7 +62,7 @@ let {{ # Add memory request flags where necessary if self.user: - self.memFlags.append("ArmISA::TLB::UserMode") + self.memFlags.append("userFlag") if self.flavor in ("relexp", "exp"): # For exclusive pair ops alignment check is based on total size @@ -135,6 +135,10 @@ let {{ eaCode += self.offset eaCode += ";" + if self.user: + eaCode += " uint8_t userFlag = 0;\n"\ + " if(isUnpriviledgeAccess(xc->tcBase()))\n"\ + " userFlag = ArmISA::TLB::UserMode;" self.codeBlobs["ea_code"] = eaCode diff --git a/src/arch/arm/miscregs.cc b/src/arch/arm/miscregs.cc index 525fbcdde..5aa5adab2 100644 --- a/src/arch/arm/miscregs.cc +++ b/src/arch/arm/miscregs.cc @@ -2233,7 +2233,16 @@ decodeAArch64SysReg(unsigned op0, unsigned op1, } break; case 5: + /* op0: 3 Crn:1 op1:5 */ switch (crm) { + case 0: + switch (op2) { + case 0: + return MISCREG_SCTLR_EL12; + case 2: + return MISCREG_CPACR_EL12; + } + break; case 2: switch (op2) { case 0: @@ -2349,6 +2358,21 @@ decodeAArch64SysReg(unsigned op0, unsigned op1, break; } break; + case 5: + /* op0: 3 Crn:2 op1:5 */ + switch (crm) { + case 0: + switch (op2) { + case 0: + return MISCREG_TTBR0_EL12; + case 1: + return MISCREG_TTBR1_EL12; + case 2: + return MISCREG_TCR_EL12; + } + break; + } + break; case 6: switch (crm) { case 0: @@ -2471,6 +2495,18 @@ decodeAArch64SysReg(unsigned op0, unsigned op1, break; } break; + case 5: + switch (crm) { + case 0: + switch (op2) { + case 0: + return MISCREG_SPSR_EL12; + case 1: + return MISCREG_ELR_EL12; + } + break; + } + break; case 6: switch (crm) { case 0: @@ -2571,6 +2607,24 @@ decodeAArch64SysReg(unsigned op0, unsigned op1, break; } break; + case 5: + switch (crm) { + case 1: + switch (op2) { + case 0: + return MISCREG_AFSR0_EL12; + case 1: + return MISCREG_AFSR1_EL12; + } + break; + case 2: + switch (op2) { + case 0: + return MISCREG_ESR_EL12; + } + break; + } + break; case 6: switch (crm) { case 1: @@ -2615,6 +2669,16 @@ decodeAArch64SysReg(unsigned op0, unsigned op1, break; } break; + case 5: + switch (crm) { + case 0: + switch (op2) { + case 0: + return MISCREG_FAR_EL12; + } + break; + } + break; case 6: switch (crm) { case 0: @@ -2733,6 +2797,22 @@ decodeAArch64SysReg(unsigned op0, unsigned op1, break; } break; + case 5: + switch (crm) { + case 2: + switch (op2) { + case 0: + return MISCREG_MAIR_EL12; + } + break; + case 3: + switch (op2) { + case 0: + return MISCREG_AMAIR_EL12; + } + break; + } + break; case 6: switch (crm) { case 2: @@ -2958,6 +3038,16 @@ decodeAArch64SysReg(unsigned op0, unsigned op1, break; } break; + case 5: + switch (crm) { + case 0: + switch (op2) { + case 0: + return MISCREG_VBAR_EL12; + } + break; + } + break; case 6: switch (crm) { case 0: @@ -3022,6 +3112,16 @@ decodeAArch64SysReg(unsigned op0, unsigned op1, break; } break; + case 5: + switch (crm) { + case 0: + switch (op2) { + case 1: + return MISCREG_CONTEXTIDR_EL12; + } + break; + } + break; case 6: switch (crm) { case 0: @@ -4512,7 +4612,8 @@ ISA::initializeMiscRegMetadata() .allPrivileges().exceptUserMode() .mapsTo(MISCREG_DBGWCR15); InitReg(MISCREG_MDCCSR_EL0) - .allPrivileges().monSecureWrite(0).monNonSecureWrite(0) + .allPrivileges().writes(0) + //monSecureWrite(0).monNonSecureWrite(0) .mapsTo(MISCREG_DBGDSCRint); InitReg(MISCREG_MDDTR_EL0) .allPrivileges(); @@ -4679,12 +4780,26 @@ ISA::initializeMiscRegMetadata() | (nTLSMD ? 0 : 0x8000000) | (LSMAOE ? 0 : 0x10000000)) .mapsTo(MISCREG_SCTLR_NS); + InitReg(MISCREG_SCTLR_EL12) + .allPrivileges().exceptUserMode() + .res0( 0x20440 | (EnDB ? 0 : 0x2000) + | (IESB ? 0 : 0x200000) + | (EnDA ? 0 : 0x8000000) + | (EnIB ? 0 : 0x40000000) + | (EnIA ? 0 : 0x80000000)) + .res1(0x500800 | (SPAN ? 0 : 0x800000) + | (nTLSMD ? 0 : 0x8000000) + | (LSMAOE ? 0 : 0x10000000)) + .mapsTo(MISCREG_SCTLR_EL1); InitReg(MISCREG_ACTLR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_ACTLR_NS); InitReg(MISCREG_CPACR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_CPACR); + InitReg(MISCREG_CPACR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_CPACR_EL1); InitReg(MISCREG_SCTLR_EL2) .hyp().mon() .res0(0x0512c7c0 | (EnDB ? 0 : 0x2000) @@ -4736,12 +4851,21 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_TTBR0_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_TTBR0_NS); + InitReg(MISCREG_TTBR0_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_TTBR0_EL1); InitReg(MISCREG_TTBR1_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_TTBR1_NS); + InitReg(MISCREG_TTBR1_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_TTBR1_EL1); InitReg(MISCREG_TCR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_TTBCR_NS); + InitReg(MISCREG_TCR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_TTBCR_NS); InitReg(MISCREG_TTBR0_EL2) .hyp().mon() .mapsTo(MISCREG_HTTBR); @@ -4766,8 +4890,14 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_SPSR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_SPSR_SVC); // NAM C5.2.17 SPSR_EL1 + InitReg(MISCREG_SPSR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_SPSR_SVC); InitReg(MISCREG_ELR_EL1) .allPrivileges().exceptUserMode(); + InitReg(MISCREG_ELR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_ELR_EL1); InitReg(MISCREG_SP_EL0) .allPrivileges().exceptUserMode(); InitReg(MISCREG_SPSEL) @@ -4814,11 +4944,20 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_AFSR0_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_ADFSR_NS); + InitReg(MISCREG_AFSR0_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_ADFSR_NS); InitReg(MISCREG_AFSR1_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_AIFSR_NS); + InitReg(MISCREG_AFSR1_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_AIFSR_NS); InitReg(MISCREG_ESR_EL1) .allPrivileges().exceptUserMode(); + InitReg(MISCREG_ESR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_ESR_EL1); InitReg(MISCREG_IFSR32_EL2) .hyp().mon() .mapsTo(MISCREG_IFSR_NS); @@ -4842,6 +4981,9 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_FAR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_DFAR_NS, MISCREG_IFAR_NS); + InitReg(MISCREG_FAR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_DFAR_NS, MISCREG_IFAR_NS); InitReg(MISCREG_FAR_EL2) .hyp().mon() .mapsTo(MISCREG_HDFAR, MISCREG_HIFAR); @@ -5023,9 +5165,15 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_MAIR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_PRRR_NS, MISCREG_NMRR_NS); + InitReg(MISCREG_MAIR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_PRRR_NS, MISCREG_NMRR_NS); InitReg(MISCREG_AMAIR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_AMAIR0_NS, MISCREG_AMAIR1_NS); + InitReg(MISCREG_AMAIR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_AMAIR0_NS, MISCREG_AMAIR1_NS); InitReg(MISCREG_MAIR_EL2) .hyp().mon() .mapsTo(MISCREG_HMAIR0, MISCREG_HMAIR1); @@ -5043,6 +5191,9 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_VBAR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_VBAR_NS); + InitReg(MISCREG_VBAR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_VBAR_NS); InitReg(MISCREG_RVBAR_EL1) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ISR_EL1) @@ -5062,6 +5213,9 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_CONTEXTIDR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_CONTEXTIDR_NS); + InitReg(MISCREG_CONTEXTIDR_EL12) + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_CONTEXTIDR_NS); InitReg(MISCREG_TPIDR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_TPIDRPRW_NS); @@ -5180,7 +5334,21 @@ ISA::initializeMiscRegMetadata() .hyp() .res0(0xffffffff00000000) .mapsTo(MISCREG_CNTHP_TVAL); - // IF Armv8.1-VHE + InitReg(MISCREG_CNTHPS_CTL_EL2) + .mon() + .hyp() + .res0(0xfffffffffffffff8) + .unimplemented(); + InitReg(MISCREG_CNTHPS_CVAL_EL2) + .mon() + .hyp() + .res0(0xfffffffffffffff8) + .unimplemented(); + InitReg(MISCREG_CNTHPS_TVAL_EL2) + .mon() + .hyp() + .res0(0xfffffffffffffff8) + .unimplemented(); InitReg(MISCREG_CNTHV_CTL_EL2) .mon() .hyp() @@ -5192,6 +5360,21 @@ ISA::initializeMiscRegMetadata() .mon() .hyp() .res0(0xffffffff00000000); + InitReg(MISCREG_CNTHVS_CTL_EL2) + .mon() + .hyp() + .res0(0xfffffffffffffff8) + .unimplemented(); + InitReg(MISCREG_CNTHVS_CVAL_EL2) + .mon() + .hyp() + .res0(0xfffffffffffffff8) + .unimplemented(); + InitReg(MISCREG_CNTHVS_TVAL_EL2) + .mon() + .hyp() + .res0(0xfffffffffffffff8) + .unimplemented(); // ENDIF Armv8.1-VHE InitReg(MISCREG_CNTVOFF_EL2) .mon() @@ -5746,7 +5929,8 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_ZCR_EL2) .hyp().mon(); InitReg(MISCREG_ZCR_EL12) - .unimplemented().warnNotFail(); + .allPrivileges().exceptUserMode() + .mapsTo(MISCREG_ZCR_EL1); InitReg(MISCREG_ZCR_EL1) .allPrivileges().exceptUserMode(); diff --git a/src/arch/arm/miscregs.hh b/src/arch/arm/miscregs.hh index ac5d68fa5..5b09f3407 100644 --- a/src/arch/arm/miscregs.hh +++ b/src/arch/arm/miscregs.hh @@ -569,8 +569,10 @@ namespace ArmISA MISCREG_VPIDR_EL2, MISCREG_VMPIDR_EL2, MISCREG_SCTLR_EL1, + MISCREG_SCTLR_EL12, MISCREG_ACTLR_EL1, MISCREG_CPACR_EL1, + MISCREG_CPACR_EL12, MISCREG_SCTLR_EL2, MISCREG_ACTLR_EL2, MISCREG_HCR_EL2, @@ -585,8 +587,11 @@ namespace ArmISA MISCREG_CPTR_EL3, MISCREG_MDCR_EL3, MISCREG_TTBR0_EL1, + MISCREG_TTBR0_EL12, MISCREG_TTBR1_EL1, + MISCREG_TTBR1_EL12, MISCREG_TCR_EL1, + MISCREG_TCR_EL12, MISCREG_TTBR0_EL2, MISCREG_TCR_EL2, MISCREG_VTTBR_EL2, @@ -595,7 +600,9 @@ namespace ArmISA MISCREG_TCR_EL3, MISCREG_DACR32_EL2, MISCREG_SPSR_EL1, + MISCREG_SPSR_EL12, MISCREG_ELR_EL1, + MISCREG_ELR_EL12, MISCREG_SP_EL0, MISCREG_SPSEL, MISCREG_CURRENTEL, @@ -616,8 +623,11 @@ namespace ArmISA MISCREG_ELR_EL3, MISCREG_SP_EL2, MISCREG_AFSR0_EL1, + MISCREG_AFSR0_EL12, MISCREG_AFSR1_EL1, + MISCREG_AFSR1_EL12, MISCREG_ESR_EL1, + MISCREG_ESR_EL12, MISCREG_IFSR32_EL2, MISCREG_AFSR0_EL2, MISCREG_AFSR1_EL2, @@ -627,6 +637,7 @@ namespace ArmISA MISCREG_AFSR1_EL3, MISCREG_ESR_EL3, MISCREG_FAR_EL1, + MISCREG_FAR_EL12, MISCREG_FAR_EL2, MISCREG_HPFAR_EL2, MISCREG_FAR_EL3, @@ -703,7 +714,9 @@ namespace ArmISA MISCREG_PMUSERENR_EL0, MISCREG_PMOVSSET_EL0, MISCREG_MAIR_EL1, + MISCREG_MAIR_EL12, MISCREG_AMAIR_EL1, + MISCREG_AMAIR_EL12, MISCREG_MAIR_EL2, MISCREG_AMAIR_EL2, MISCREG_MAIR_EL3, @@ -711,6 +724,7 @@ namespace ArmISA MISCREG_L2CTLR_EL1, MISCREG_L2ECTLR_EL1, MISCREG_VBAR_EL1, + MISCREG_VBAR_EL12, MISCREG_RVBAR_EL1, MISCREG_ISR_EL1, MISCREG_VBAR_EL2, @@ -719,6 +733,7 @@ namespace ArmISA MISCREG_RVBAR_EL3, MISCREG_RMR_EL3, MISCREG_CONTEXTIDR_EL1, + MISCREG_CONTEXTIDR_EL12, MISCREG_TPIDR_EL1, MISCREG_TPIDR_EL0, MISCREG_TPIDRRO_EL0, @@ -749,10 +764,16 @@ namespace ArmISA MISCREG_CNTHP_CTL_EL2, MISCREG_CNTHP_CVAL_EL2, MISCREG_CNTHP_TVAL_EL2, + MISCREG_CNTHPS_CTL_EL2, + MISCREG_CNTHPS_CVAL_EL2, + MISCREG_CNTHPS_TVAL_EL2, // IF Armv8.1-VHE MISCREG_CNTHV_CTL_EL2, MISCREG_CNTHV_CVAL_EL2, MISCREG_CNTHV_TVAL_EL2, + MISCREG_CNTHVS_CTL_EL2, + MISCREG_CNTHVS_CVAL_EL2, + MISCREG_CNTHVS_TVAL_EL2, // ENDIF Armv8.1-VHE MISCREG_CNTVOFF_EL2, // END Generic Timer (AArch64) @@ -1647,8 +1668,10 @@ namespace ArmISA "vpidr_el2", "vmpidr_el2", "sctlr_el1", + "sctlr_el12", "actlr_el1", "cpacr_el1", + "cpacr_el12", "sctlr_el2", "actlr_el2", "hcr_el2", @@ -1663,8 +1686,11 @@ namespace ArmISA "cptr_el3", "mdcr_el3", "ttbr0_el1", + "ttbr0_el12", "ttbr1_el1", + "ttbr1_el12", "tcr_el1", + "tcr_el12", "ttbr0_el2", "tcr_el2", "vttbr_el2", @@ -1673,7 +1699,9 @@ namespace ArmISA "tcr_el3", "dacr32_el2", "spsr_el1", + "spsr_el12", "elr_el1", + "elr_el12", "sp_el0", "spsel", "currentel", @@ -1694,8 +1722,11 @@ namespace ArmISA "elr_el3", "sp_el2", "afsr0_el1", + "afsr0_el12", "afsr1_el1", + "afsr1_el12", "esr_el1", + "esr_el12", "ifsr32_el2", "afsr0_el2", "afsr1_el2", @@ -1705,6 +1736,7 @@ namespace ArmISA "afsr1_el3", "esr_el3", "far_el1", + "far_el12", "far_el2", "hpfar_el2", "far_el3", @@ -1781,7 +1813,9 @@ namespace ArmISA "pmuserenr_el0", "pmovsset_el0", "mair_el1", + "mair_el12", "amair_el1", + "amair_el12", "mair_el2", "amair_el2", "mair_el3", @@ -1789,6 +1823,7 @@ namespace ArmISA "l2ctlr_el1", "l2ectlr_el1", "vbar_el1", + "vbar_el12", "rvbar_el1", "isr_el1", "vbar_el2", @@ -1797,6 +1832,7 @@ namespace ArmISA "rvbar_el3", "rmr_el3", "contextidr_el1", + "contextidr_el12", "tpidr_el1", "tpidr_el0", "tpidrro_el0", @@ -1826,9 +1862,15 @@ namespace ArmISA "cnthp_ctl_el2", "cnthp_cval_el2", "cnthp_tval_el2", + "cnthps_ctl_el2", + "cnthps_cval_el2", + "cnthps_tval_el2", "cnthv_ctl_el2", "cnthv_cval_el2", "cnthv_tval_el2", + "cnthvs_ctl_el2", + "cnthvs_cval_el2", + "cnthvs_tval_el2", "cntvoff_el2", "pmevcntr0_el0", "pmevcntr1_el0", diff --git a/src/arch/arm/miscregs_types.hh b/src/arch/arm/miscregs_types.hh index 3578f58c5..f6bfee4c3 100644 --- a/src/arch/arm/miscregs_types.hh +++ b/src/arch/arm/miscregs_types.hh @@ -662,7 +662,11 @@ namespace ArmISA BitUnion32(CPTR) Bitfield<31> tcpac; + Bitfield<30> tam; + Bitfield<28> tta_e2h; + Bitfield<21, 20> fpen; Bitfield<20> tta; + Bitfield<17, 16> zen; Bitfield<13, 12> res1_13_12_el2; Bitfield<10> tfp; Bitfield<9> res1_9_el2; diff --git a/src/arch/arm/pagetable.hh b/src/arch/arm/pagetable.hh index 1d18d2151..b91d3de01 100644 --- a/src/arch/arm/pagetable.hh +++ b/src/arch/arm/pagetable.hh @@ -188,22 +188,23 @@ struct TlbEntry : public Serializable bool match(Addr va, uint8_t _vmid, bool hypLookUp, bool secure_lookup, - ExceptionLevel target_el) const + ExceptionLevel target_el, bool in_host) const { - return match(va, 0, _vmid, hypLookUp, secure_lookup, true, target_el); + return match(va, 0, _vmid, hypLookUp, secure_lookup, true, + target_el, in_host); } bool match(Addr va, uint16_t asn, uint8_t _vmid, bool hypLookUp, - bool secure_lookup, bool ignore_asn, ExceptionLevel target_el) const + bool secure_lookup, bool ignore_asn, ExceptionLevel target_el, + bool in_host) const { bool match = false; Addr v = vpn << N; - if (valid && va >= v && va <= v + size && (secure_lookup == !nstid) && (hypLookUp == isHyp)) { - match = checkELMatch(target_el); + match = checkELMatch(target_el, in_host); if (match && !ignore_asn) { match = global || (asn == asid); @@ -216,12 +217,20 @@ struct TlbEntry : public Serializable } bool - checkELMatch(ExceptionLevel target_el) const + checkELMatch(ExceptionLevel target_el, bool in_host) const { - if (target_el == EL2 || target_el == EL3) { - return (el == target_el); - } else { - return (el == EL0) || (el == EL1); + switch (target_el) { + case EL3: + return el == EL3; + case EL2: + { + return el == EL2 || (el == EL0 && in_host); + } + case EL1: + case EL0: + return (el == EL0) || (el == EL1); + default: + return false; } } diff --git a/src/arch/arm/self_debug.cc b/src/arch/arm/self_debug.cc index 94d2f231a..790e04bf6 100644 --- a/src/arch/arm/self_debug.cc +++ b/src/arch/arm/self_debug.cc @@ -222,87 +222,106 @@ BrkPoint::test(ThreadContext *tc, Addr pc, ExceptionLevel el, DBGBCR ctr, bool from_link) { bool v = false; - switch (ctr.bt) - { - case 0x0: - v = testAddrMatch(tc, pc, ctr.bas); - break; - case 0x1: - v = testAddrMatch(tc, pc, ctr.bas); // linked - if (v){ - v = (conf->getBrkPoint(ctr.lbn))->testLinkedBk(tc, pc, el); - } - break; - case 0x2: + switch (ctr.bt) { + case 0x0: + v = testAddrMatch(tc, pc, ctr.bas); + break; + + case 0x1: + v = testAddrMatch(tc, pc, ctr.bas); // linked + if (v) { + v = (conf->getBrkPoint(ctr.lbn))->testLinkedBk(tc, pc, el); + } + break; + + case 0x2: + { + bool host = ELIsInHost(tc, el); + v = testContextMatch(tc, !host, true); + } + break; + + case 0x3: + if (from_link){ + bool host = ELIsInHost(tc, el); + v = testContextMatch(tc, !host, true); + } + break; + + case 0x4: + v = testAddrMissMatch(tc, pc, ctr.bas); + break; + + case 0x5: + v = testAddrMissMatch(tc, pc, ctr.bas); // linked + if (v && !from_link) + v = v && (conf->getBrkPoint(ctr.lbn))->testLinkedBk(tc, pc, el); + break; + + case 0x6: + if (HaveVirtHostExt(tc) && !ELIsInHost(tc, el)) + v = testContextMatch(tc, true); + break; + + case 0x7: + if (HaveVirtHostExt(tc) && !ELIsInHost(tc, el) && from_link) v = testContextMatch(tc, true); - break; - case 0x3: - if (from_link){ - v = testContextMatch(tc, true); //linked - } - break; - case 0x4: - v = testAddrMissMatch(tc, pc, ctr.bas); - break; - case 0x5: - v = testAddrMissMatch(tc, pc, ctr.bas); // linked - if (v && !from_link) - v = v && (conf->getBrkPoint(ctr.lbn))->testLinkedBk(tc, - pc, el); - break; - case 0x6: - // VHE not implemented - // v = testContextMatch(tc, true); - break; - case 0x7: - // VHE not implemented - // if (from_link) - // v = testContextMatch(tc, true); - break; - case 0x8: + break; + + case 0x8: + if (ArmSystem::haveEL(tc, EL2) && !ELIsInHost(tc, el)) { v = testVMIDMatch(tc); - break; - case 0x9: - if (from_link && ArmSystem::haveEL(tc, EL2)){ - v = testVMIDMatch(tc); // linked - } - break; - case 0xa: - if (ArmSystem::haveEL(tc, EL2)){ - v = testContextMatch(tc, true); - if (v && !from_link) - v = v && testVMIDMatch(tc); - } - break; - case 0xb: - if (from_link && ArmSystem::haveEL(tc, EL2)){ - v = testContextMatch(tc, true); - v = v && testVMIDMatch(tc); - } - break; - case 0xc: - // VHE not implemented - // v = testContextMatch(tc, false); // CONTEXTIDR_EL2 - break; - case 0xd: - // VHE not implemented - // if (from_link) - // v = testContextMatch(tc, false); - // CONTEXTIDR_EL2 AND LINKED + } + break; - break; - case 0xe: - // VHE not implemented - // v = testContextMatch(tc, true); // CONTEXTIDR_EL1 - // v = v && testContextMatch(tc, false); // CONTEXTIDR_EL2 - break; - case 0xf: - // VHE not implemented - // if (from_link){ - // v = testContextMatch(tc, true); // CONTEXTIDR_EL1 - // v = v && testContextMatch(tc, false); // CONTEXTIDR_EL2 - // } - break; + case 0x9: + if (from_link && ArmSystem::haveEL(tc, EL2) && + !ELIsInHost(tc, el)) { + v = testVMIDMatch(tc); + } + break; + + case 0xa: + if (ArmSystem::haveEL(tc, EL2) && !ELIsInHost(tc, el)) { + v = testContextMatch(tc, true); + if (v && !from_link) + v = v && testVMIDMatch(tc); + } + break; + case 0xb: + if (from_link && ArmSystem::haveEL(tc, EL2) && + !ELIsInHost(tc, el)) { + v = testContextMatch(tc, true); + v = v && testVMIDMatch(tc); + } + break; + + case 0xc: + if (HaveVirtHostExt(tc) && !inSecureState(tc)) + v = testContextMatch(tc, false); + break; + + case 0xd: + if (HaveVirtHostExt(tc) && from_link && !inSecureState(tc)) + v = testContextMatch(tc, false); + break; + + case 0xe: + if (HaveVirtHostExt(tc) && !ELIsInHost(tc, el) + && !inSecureState(tc) ) { + v = testContextMatch(tc, true); // CONTEXTIDR_EL1 + v = v && testContextMatch(tc, false); // CONTEXTIDR_EL2 + } + break; + case 0xf: + if (HaveVirtHostExt(tc) && !ELIsInHost(tc, el) && from_link + && !inSecureState(tc) ) { + v = testContextMatch(tc, true); // CONTEXTIDR_EL1 + v = v && testContextMatch(tc, false); // CONTEXTIDR_EL2 + } + break; + default: + break; } return v; } @@ -403,6 +422,12 @@ BrkPoint::testAddrMissMatch(ThreadContext *tc, Addr in_pc, uint8_t bas) bool BrkPoint::testContextMatch(ThreadContext *tc, bool ctx1) +{ + return testContextMatch(tc, ctx1, ctx1); +} + +bool +BrkPoint::testContextMatch(ThreadContext *tc, bool ctx1, bool low_ctx) { if (!isCntxtAware) return false; @@ -420,8 +445,8 @@ BrkPoint::testContextMatch(ThreadContext *tc, bool ctx1) return false; } - RegVal ctxid = tc->readMiscReg(miscridx); - RegVal v = getContextfromReg(tc, ctx1); + RegVal ctxid = bits(tc->readMiscReg(miscridx), 31, 0); + RegVal v = getContextfromReg(tc, low_ctx); return (v == ctxid); } diff --git a/src/arch/arm/self_debug.hh b/src/arch/arm/self_debug.hh index 9dd6e228e..a1c03eafd 100644 --- a/src/arch/arm/self_debug.hh +++ b/src/arch/arm/self_debug.hh @@ -108,6 +108,7 @@ class BrkPoint public: bool testAddrMatch(ThreadContext *tc, Addr pc, uint8_t bas); bool testAddrMissMatch(ThreadContext *tc, Addr pc, uint8_t bas); + bool testContextMatch(ThreadContext *tc, bool ctx1, bool low_ctx); bool testContextMatch(ThreadContext *tc, bool ctx1); bool testVMIDMatch(ThreadContext *tc); diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc index 51bb8ec13..aa3dbf6e2 100644 --- a/src/arch/arm/table_walker.cc +++ b/src/arch/arm/table_walker.cc @@ -264,11 +264,21 @@ TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid, currState->vaddr = currState->vaddr_tainted; if (currState->aarch64) { + currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2); if (isStage2) { currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1); currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR_EL2); } else switch (currState->el) { case EL0: + if (HaveVirtHostExt(currState->tc) && + currState->hcr.tge == 1 && currState->hcr.e2h ==1) { + currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2); + currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2); + } else { + currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1); + currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1); + } + break; case EL1: currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1); currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1); @@ -287,7 +297,6 @@ TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid, panic("Invalid exception level"); break; } - currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2); } else { currState->sctlr = currState->tc->readMiscReg(snsBankedIndex( MISCREG_SCTLR, currState->tc, !currState->isSecure)); @@ -370,7 +379,7 @@ TableWalker::processWalkWrapper() // @TODO Should this always be the TLB or should we look in the stage2 TLB? TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid, currState->vmid, currState->isHyp, currState->isSecure, true, false, - currState->el); + currState->el, false); // Check if we still need to have a walk for this request. If the requesting // instruction has been squashed, or a previous walk has filled the TLB with @@ -436,7 +445,7 @@ TableWalker::processWalkWrapper() currState = pendingQueue.front(); te = tlb->lookup(currState->vaddr, currState->asid, currState->vmid, currState->isHyp, currState->isSecure, true, - false, currState->el); + false, currState->el, false); } else { // Terminate the loop, nothing more to do currState = NULL; @@ -772,6 +781,48 @@ TableWalker::processWalkAArch64() switch (currState->el) { case EL0: + { + Addr ttbr0; + Addr ttbr1; + if (HaveVirtHostExt(currState->tc) && + currState->hcr.tge==1 && currState->hcr.e2h == 1) { + // VHE code for EL2&0 regime + ttbr0 = currState->tc->readMiscReg(MISCREG_TTBR0_EL2); + ttbr1 = currState->tc->readMiscReg(MISCREG_TTBR1_EL2); + } else { + ttbr0 = currState->tc->readMiscReg(MISCREG_TTBR0_EL1); + ttbr1 = currState->tc->readMiscReg(MISCREG_TTBR1_EL1); + } + switch (bits(currState->vaddr, 63,48)) { + case 0: + DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); + ttbr = ttbr0; + tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz); + tg = GrainMap_tg0[currState->tcr.tg0]; + currState->hpd = currState->tcr.hpd0; + currState->isUncacheable = currState->tcr.irgn0 == 0; + if (bits(currState->vaddr, 63, tsz) != 0x0 || + currState->tcr.epd0) + fault = true; + break; + case 0xffff: + DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n"); + ttbr = ttbr1; + tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz); + tg = GrainMap_tg1[currState->tcr.tg1]; + currState->hpd = currState->tcr.hpd1; + currState->isUncacheable = currState->tcr.irgn1 == 0; + if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) || + currState->tcr.epd1) + fault = true; + break; + default: + // top two bytes must be all 0s or all 1s, else invalid addr + fault = true; + } + ps = currState->tcr.ips; + } + break; case EL1: if (isStage2) { DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n"); @@ -828,7 +879,7 @@ TableWalker::processWalkAArch64() case EL2: switch(bits(currState->vaddr, 63,48)) { case 0: - DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); + DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n"); ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2); tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz); tg = GrainMap_tg0[currState->tcr.tg0]; @@ -838,7 +889,7 @@ TableWalker::processWalkAArch64() break; case 0xffff: - DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n"); + DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n"); ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2); tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz); tg = GrainMap_tg1[currState->tcr.tg1]; @@ -853,12 +904,12 @@ TableWalker::processWalkAArch64() // invalid addr if top two bytes are not all 0s fault = true; } - ps = currState->tcr.ps; + ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps; break; case EL3: switch(bits(currState->vaddr, 63,48)) { case 0: - DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); + DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n"); ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3); tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz); tg = GrainMap_tg0[currState->tcr.tg0]; @@ -1408,10 +1459,11 @@ TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx = lDescriptor.attrIndx(); DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh); + ExceptionLevel regime = s1TranslationRegime(tc, currState->el); // Select MAIR uint64_t mair; - switch (currState->el) { + switch (regime) { case EL0: case EL1: mair = tc->readMiscReg(MISCREG_MAIR_EL1); diff --git a/src/arch/arm/tlb.cc b/src/arch/arm/tlb.cc index ca9784935..0c001b0bc 100644 --- a/src/arch/arm/tlb.cc +++ b/src/arch/arm/tlb.cc @@ -124,7 +124,7 @@ TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa) } TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false, - aarch64 ? aarch64EL : EL1); + aarch64 ? aarch64EL : EL1, false); if (!e) return false; pa = e->pAddr(va); @@ -157,7 +157,8 @@ TLB::finalizePhysical(const RequestPtr &req, TlbEntry* TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure, - bool functional, bool ignore_asn, ExceptionLevel target_el) + bool functional, bool ignore_asn, ExceptionLevel target_el, + bool in_host) { TlbEntry *retval = NULL; @@ -166,8 +167,9 @@ TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure, int x = 0; while (retval == NULL && x < size) { if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false, - target_el)) || - (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) { + target_el, in_host)) || + (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el, + in_host))) { // We only move the hit entry ahead when the position is higher // than rangeMRU if (x > rangeMRU && !functional) { @@ -244,7 +246,7 @@ TLB::printTlb() const void TLB::flushAllSecurity(bool secure_lookup, ExceptionLevel target_el, - bool ignore_el) + bool ignore_el, bool in_host) { DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", (secure_lookup ? "secure" : "non-secure")); @@ -253,8 +255,7 @@ TLB::flushAllSecurity(bool secure_lookup, ExceptionLevel target_el, while (x < size) { te = &table[x]; const bool el_match = ignore_el ? - true : te->checkELMatch(target_el); - + true : te->checkELMatch(target_el, in_host); if (te->valid && secure_lookup == !te->nstid && (te->vmid == vmid || secure_lookup) && el_match) { @@ -270,7 +271,7 @@ TLB::flushAllSecurity(bool secure_lookup, ExceptionLevel target_el, // If there's a second stage TLB (and we're not it) then flush it as well // if we're currently in hyp mode if (!isStage2 && isHyp) { - stage2Tlb->flushAllSecurity(secure_lookup, EL1, true); + stage2Tlb->flushAllSecurity(secure_lookup, EL1, true, false); } } @@ -286,7 +287,7 @@ TLB::flushAllNs(ExceptionLevel target_el, bool ignore_el) while (x < size) { te = &table[x]; const bool el_match = ignore_el ? - true : te->checkELMatch(target_el); + true : te->checkELMatch(target_el, false); if (te->valid && te->nstid && te->isHyp == hyp && el_match) { @@ -307,17 +308,18 @@ TLB::flushAllNs(ExceptionLevel target_el, bool ignore_el) void TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, - ExceptionLevel target_el) + ExceptionLevel target_el, bool in_host) { DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x " "(%s lookup)\n", mva, asn, (secure_lookup ? "secure" : "non-secure")); - _flushMva(mva, asn, secure_lookup, false, target_el); + _flushMva(mva, asn, secure_lookup, false, target_el, in_host); flushTlbMvaAsid++; } void -TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el) +TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el, + bool in_host) { DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn, (secure_lookup ? "secure" : "non-secure")); @@ -329,7 +331,7 @@ TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el) te = &table[x]; if (te->valid && te->asid == asn && secure_lookup == !te->nstid && (te->vmid == vmid || secure_lookup) && - te->checkELMatch(target_el)) { + te->checkELMatch(target_el, in_host)) { te->valid = false; DPRINTF(TLB, " - %s\n", te->print()); @@ -341,17 +343,18 @@ TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el) } void -TLB::flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el) -{ +TLB::flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el, + bool in_host) { + DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva, (secure_lookup ? "secure" : "non-secure")); - _flushMva(mva, 0xbeef, secure_lookup, true, target_el); + _flushMva(mva, 0xbeef, secure_lookup, true, target_el, in_host); flushTlbMva++; } void TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, - bool ignore_asn, ExceptionLevel target_el) + bool ignore_asn, ExceptionLevel target_el, bool in_host) { TlbEntry *te; // D5.7.2: Sign-extend address to 64 bits @@ -360,7 +363,7 @@ TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp = target_el == EL2; te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, - target_el); + target_el, in_host); while (te != NULL) { if (secure_lookup == !te->nstid) { DPRINTF(TLB, " - %s\n", te->print()); @@ -368,7 +371,7 @@ TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, flushedEntries++; } te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, - target_el); + target_el, in_host); } } @@ -376,7 +379,7 @@ void TLB::flushIpaVmid(Addr ipa, bool secure_lookup, ExceptionLevel target_el) { assert(!isStage2); - stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, true, target_el); + stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, true, target_el, false); } void @@ -823,9 +826,10 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field bool grant = false; + bool wxn = sctlr.wxn; uint8_t xn = te->xn; uint8_t pxn = te->pxn; - bool r = !is_write && !is_fetch; + bool r = (!is_write && !is_fetch); bool w = is_write; bool x = is_fetch; @@ -835,7 +839,8 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, // generated the fault; they count as writes otherwise bool grant_read = true; DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, " - "w:%d, x:%d\n", ap, xn, pxn, r, w, x); + "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn, + pxn, r, w, x, is_priv, wxn); if (isStage2) { assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2); @@ -846,7 +851,10 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, grant_read = hap & 0x1; if (is_fetch) { // sctlr.wxn overrides the xn bit - grant = !sctlr.wxn && !xn; + grant = !wxn && !xn; + } else if (is_atomic) { + grant = r && w; + grant_read = r; } else if (is_write) { grant = hap & 0x2; } else { // is_read @@ -867,7 +875,7 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, break; case 4: case 5: - grant = r || w || (x && !sctlr.wxn); + grant = r || w || (x && !wxn); break; case 6: case 7: @@ -898,7 +906,7 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, switch (perm) { case 0: case 2: - grant = r || w || (x && !sctlr.wxn); + grant = r || w || (x && !wxn); break; case 1: case 3: @@ -939,7 +947,7 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, uint8_t perm = (ap & 0x2) | xn; switch (perm) { case 0: - grant = r || w || (x && !sctlr.wxn) ; + grant = r || w || (x && !wxn); break; case 1: grant = r || w; @@ -1049,13 +1057,16 @@ TLB::translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, // Set memory attributes TlbEntry temp_te; temp_te.ns = !isSecure; - if (isStage2 || hcr.dc == 0 || isSecure || + bool dc = (HaveVirtHostExt(tc) + && hcr.e2h == 1 && hcr.tge == 1) ? 0: hcr.dc; + bool i_cacheability = sctlr.i && !sctlr.m; + if (isStage2 || !dc || isSecure || (isHyp && !(tranType & S1CTran))) { temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal : TlbEntry::MemoryType::StronglyOrdered; - temp_te.innerAttrs = 0x0; - temp_te.outerAttrs = 0x0; + temp_te.innerAttrs = i_cacheability? 0x2: 0x0; + temp_te.outerAttrs = i_cacheability? 0x2: 0x0; temp_te.shareable = true; temp_te.outerShareable = true; } else { @@ -1119,7 +1130,8 @@ TLB::translateMmuOn(ThreadContext* tc, const RequestPtr &req, Mode mode, if (isSecure && !te->ns) { req->setFlags(Request::SECURE); } - if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) && + if (!is_fetch && fault == NoFault && + (vaddr & mask(flags & AlignmentMask)) && (te->mtype != TlbEntry::MemoryType::Normal)) { // Unaligned accesses to Device memory should always cause an // abort regardless of sctlr.a @@ -1199,9 +1211,15 @@ TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, } } + bool vm = hcr.vm; + if (HaveVirtHostExt(tc) && hcr.e2h == 1 && hcr.tge ==1) + vm = 0; + else if (hcr.dc == 1) + vm = 1; + Fault fault = NoFault; // If guest MMU is off or hcr.vm=0 go straight to stage2 - if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) { + if ((isStage2 && !vm) || (!isStage2 && !sctlr.m)) { fault = translateMmuOff(tc, req, mode, tranType, vaddr, long_desc_format); } else { @@ -1341,10 +1359,32 @@ TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType) ELIs64(tc, EL2) : ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL); + hcr = tc->readMiscReg(MISCREG_HCR_EL2); if (aarch64) { // AArch64 // determine EL we need to translate in switch (aarch64EL) { case EL0: + if (HaveVirtHostExt(tc) && hcr.tge == 1 && hcr.e2h == 1) { + // VHE code for EL2&0 regime + sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2); + ttbcr = tc->readMiscReg(MISCREG_TCR_EL2); + uint64_t ttbr_asid = ttbcr.a1 ? + tc->readMiscReg(MISCREG_TTBR1_EL2) : + tc->readMiscReg(MISCREG_TTBR0_EL2); + asid = bits(ttbr_asid, + (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48); + + } else { + sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); + ttbcr = tc->readMiscReg(MISCREG_TCR_EL1); + uint64_t ttbr_asid = ttbcr.a1 ? + tc->readMiscReg(MISCREG_TTBR1_EL1) : + tc->readMiscReg(MISCREG_TTBR0_EL1); + asid = bits(ttbr_asid, + (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48); + + } + break; case EL1: { sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); @@ -1359,7 +1399,16 @@ TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType) case EL2: sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2); ttbcr = tc->readMiscReg(MISCREG_TCR_EL2); - asid = -1; + if (hcr.e2h == 1) { + // VHE code for EL2&0 regime + uint64_t ttbr_asid = ttbcr.a1 ? + tc->readMiscReg(MISCREG_TTBR1_EL2) : + tc->readMiscReg(MISCREG_TTBR0_EL2); + asid = bits(ttbr_asid, + (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48); + } else { + asid = -1; + } break; case EL3: sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3); @@ -1367,25 +1416,39 @@ TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType) asid = -1; break; } - hcr = tc->readMiscReg(MISCREG_HCR_EL2); + scr = tc->readMiscReg(MISCREG_SCR_EL3); isPriv = aarch64EL != EL0; if (haveVirtualization) { - vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48); + vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48); isHyp = aarch64EL == EL2; isHyp |= tranType & HypMode; isHyp &= (tranType & S1S2NsTran) == 0; isHyp &= (tranType & S1CTran) == 0; + + if (hcr.e2h == 1 && (aarch64EL == EL2 + || (hcr.tge ==1 && aarch64EL == EL0))) { + isHyp = true; + directToStage2 = false; + stage2Req = false; + stage2DescReq = false; + } else { // Work out if we should skip the first stage of translation and go // directly to stage 2. This value is cached so we don't have to // compute it for every translation. - stage2Req = isStage2 || - (hcr.vm && !isHyp && !isSecure && - !(tranType & S1CTran) && (aarch64EL < EL2) && - !(tranType & S1E1Tran)); // <--- FIX THIS HACK - stage2DescReq = isStage2 || (hcr.vm && !isHyp && !isSecure && - (aarch64EL < EL2)); - directToStage2 = !isStage2 && stage2Req && !sctlr.m; + bool vm = hcr.vm; + if (HaveVirtHostExt(tc) && hcr.e2h == 1 && hcr.tge == 1) { + vm = 0; + } + + stage2Req = isStage2 || + (vm && !isHyp && !isSecure && + !(tranType & S1CTran) && (aarch64EL < EL2) && + !(tranType & S1E1Tran)); // <--- FIX THIS HACK + stage2DescReq = isStage2 || (vm && !isHyp && !isSecure && + (aarch64EL < EL2)); + directToStage2 = !isStage2 && stage2Req && !sctlr.m; + } } else { vmid = 0; isHyp = false; @@ -1499,7 +1562,8 @@ TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, } else { vaddr = vaddr_tainted; } - *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); + *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el, + false); if (*te == NULL) { if (req->isPrefetch()) { // if the request is a prefetch don't attempt to fill the TLB or go @@ -1530,7 +1594,8 @@ TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, return fault; } - *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); + *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, + target_el, false); if (!*te) printTlb(); assert(*te); diff --git a/src/arch/arm/tlb.hh b/src/arch/arm/tlb.hh index 767222a24..24faecfd6 100644 --- a/src/arch/arm/tlb.hh +++ b/src/arch/arm/tlb.hh @@ -206,7 +206,8 @@ class TLB : public BaseTLB */ TlbEntry *lookup(Addr vpn, uint16_t asn, uint8_t vmid, bool hyp, bool secure, bool functional, - bool ignore_asn, ExceptionLevel target_el); + bool ignore_asn, ExceptionLevel target_el, + bool in_host); virtual ~TLB(); @@ -246,7 +247,7 @@ class TLB : public BaseTLB * @param secure_lookup if the operation affects the secure world */ void flushAllSecurity(bool secure_lookup, ExceptionLevel target_el, - bool ignore_el = false); + bool ignore_el = false, bool in_host = false); /** Remove all entries in the non secure world, depending on whether they * were allocated in hyp mode or not @@ -259,8 +260,8 @@ class TLB : public BaseTLB */ void flushAll() override { - flushAllSecurity(false, EL0, true); - flushAllSecurity(true, EL0, true); + flushAllSecurity(false, EL0, true, false); + flushAllSecurity(true, EL0, true, false); } /** Remove any entries that match both a va and asn @@ -269,20 +270,21 @@ class TLB : public BaseTLB * @param secure_lookup if the operation affects the secure world */ void flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, - ExceptionLevel target_el); + ExceptionLevel target_el, bool in_host = false); /** Remove any entries that match the asn * @param asn contextid/asn to flush on match * @param secure_lookup if the operation affects the secure world */ void flushAsid(uint64_t asn, bool secure_lookup, - ExceptionLevel target_el); + ExceptionLevel target_el, bool in_host = false); /** Remove all entries that match the va regardless of asn * @param mva address to flush from cache * @param secure_lookup if the operation affects the secure world */ - void flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el); + void flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el, + bool in_host = false); /** * Invalidate all entries in the stage 2 TLB that match the given ipa @@ -447,9 +449,11 @@ private: * @param asn contextid/asn to flush on match * @param secure_lookup if the operation affects the secure world * @param ignore_asn if the flush should ignore the asn + * @param in_host if hcr.e2h == 1 and hcr.tge == 1 for VHE. */ void _flushMva(Addr mva, uint64_t asn, bool secure_lookup, - bool ignore_asn, ExceptionLevel target_el); + bool ignore_asn, ExceptionLevel target_el, + bool in_host); public: /* Testing */ Fault testTranslation(const RequestPtr &req, Mode mode, diff --git a/src/arch/arm/tlbi_op.cc b/src/arch/arm/tlbi_op.cc index 3530a90ef..f3b9bd121 100644 --- a/src/arch/arm/tlbi_op.cc +++ b/src/arch/arm/tlbi_op.cc @@ -45,16 +45,18 @@ namespace ArmISA { void TLBIALL::operator()(ThreadContext* tc) { - getITBPtr(tc)->flushAllSecurity(secureLookup, targetEL); - getDTBPtr(tc)->flushAllSecurity(secureLookup, targetEL); + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + bool in_host = (hcr.tge == 1 && hcr.e2h == 1); + getITBPtr(tc)->flushAllSecurity(secureLookup, targetEL, in_host); + getDTBPtr(tc)->flushAllSecurity(secureLookup, targetEL, in_host); // If CheckerCPU is connected, need to notify it of a flush CheckerCPU *checker = tc->getCheckerCpuPtr(); if (checker) { getITBPtr(checker)->flushAllSecurity(secureLookup, - targetEL); + targetEL, in_host); getDTBPtr(checker)->flushAllSecurity(secureLookup, - targetEL); + targetEL, in_host); } } @@ -73,12 +75,14 @@ DTLBIALL::operator()(ThreadContext* tc) void TLBIASID::operator()(ThreadContext* tc) { - getITBPtr(tc)->flushAsid(asid, secureLookup, targetEL); - getDTBPtr(tc)->flushAsid(asid, secureLookup, targetEL); + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + bool in_host = (hcr.tge == 1 && hcr.e2h == 1); + getITBPtr(tc)->flushAsid(asid, secureLookup, targetEL, in_host); + getDTBPtr(tc)->flushAsid(asid, secureLookup, targetEL, in_host); CheckerCPU *checker = tc->getCheckerCpuPtr(); if (checker) { - getITBPtr(checker)->flushAsid(asid, secureLookup, targetEL); - getDTBPtr(checker)->flushAsid(asid, secureLookup, targetEL); + getITBPtr(checker)->flushAsid(asid, secureLookup, targetEL, in_host); + getDTBPtr(checker)->flushAsid(asid, secureLookup, targetEL, in_host); } } @@ -110,30 +114,34 @@ TLBIALLN::operator()(ThreadContext* tc) void TLBIMVAA::operator()(ThreadContext* tc) { - getITBPtr(tc)->flushMva(addr, secureLookup, targetEL); - getDTBPtr(tc)->flushMva(addr, secureLookup, targetEL); + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + bool in_host = (hcr.tge == 1 && hcr.e2h == 1); + getITBPtr(tc)->flushMva(addr, secureLookup, targetEL, in_host); + getDTBPtr(tc)->flushMva(addr, secureLookup, targetEL, in_host); CheckerCPU *checker = tc->getCheckerCpuPtr(); if (checker) { - getITBPtr(checker)->flushMva(addr, secureLookup, targetEL); - getDTBPtr(checker)->flushMva(addr, secureLookup, targetEL); + getITBPtr(checker)->flushMva(addr, secureLookup, targetEL, in_host); + getDTBPtr(checker)->flushMva(addr, secureLookup, targetEL, in_host); } } void TLBIMVA::operator()(ThreadContext* tc) { + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + bool in_host = (hcr.tge == 1 && hcr.e2h == 1); getITBPtr(tc)->flushMvaAsid(addr, asid, - secureLookup, targetEL); + secureLookup, targetEL, in_host); getDTBPtr(tc)->flushMvaAsid(addr, asid, - secureLookup, targetEL); + secureLookup, targetEL, in_host); CheckerCPU *checker = tc->getCheckerCpuPtr(); if (checker) { getITBPtr(checker)->flushMvaAsid( - addr, asid, secureLookup, targetEL); + addr, asid, secureLookup, targetEL, in_host); getDTBPtr(checker)->flushMvaAsid( - addr, asid, secureLookup, targetEL); + addr, asid, secureLookup, targetEL, in_host); } } diff --git a/src/arch/arm/utility.cc b/src/arch/arm/utility.cc index e72c2301a..07740a21c 100644 --- a/src/arch/arm/utility.cc +++ b/src/arch/arm/utility.cc @@ -327,7 +327,7 @@ s1TranslationRegime(ThreadContext* tc, ExceptionLevel el) return el; else if (ArmSystem::haveEL(tc, EL3) && ELIs32(tc, EL3) && scr.ns == 0) return EL3; - else if (ArmSystem::haveVirtualization(tc) && ELIsInHost(tc, el)) + else if (HaveVirtHostExt(tc) && ELIsInHost(tc, el)) return EL2; else return EL1; @@ -380,7 +380,8 @@ bool ELIsInHost(ThreadContext *tc, ExceptionLevel el) { const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); - return ((IsSecureEL2Enabled(tc) || !isSecureBelowEL3(tc)) && + return (ArmSystem::haveEL(tc, EL2) && + (IsSecureEL2Enabled(tc) || !isSecureBelowEL3(tc)) && HaveVirtHostExt(tc) && !ELIs32(tc, EL2) && hcr.e2h == 1 && (el == EL2 || (el == EL0 && hcr.tge == 1))); } @@ -416,9 +417,12 @@ ELStateUsingAArch32K(ThreadContext *tc, ExceptionLevel el, bool secure) bool aarch32_below_el3 = (have_el3 && scr.rw == 0); HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + bool secEL2 = false; bool aarch32_at_el1 = (aarch32_below_el3 - || (have_el2 - && !secure && hcr.rw == 0)); + || (have_el2 + && (secEL2 || !isSecureBelowEL3(tc)) + && hcr.rw == 0 && !(hcr.e2h && hcr.tge + && HaveVirtHostExt(tc)))); // Only know if EL0 using AArch32 from PSTATE if (el == EL0 && !aarch32_at_el1) { @@ -497,7 +501,7 @@ computeAddrTop(ThreadContext *tc, bool selbit, bool isInstr, case EL2: { TCR tcr = tc->readMiscReg(MISCREG_TCR_EL2); - if (ArmSystem::haveVirtualization(tc) && ELIsInHost(tc, el)) { + if (HaveVirtHostExt(tc) && ELIsInHost(tc, el)) { tbi = selbit? tcr.tbi1 : tcr.tbi0; tbid = selbit? tcr.tbid1 : tcr.tbid0; } else { @@ -526,7 +530,6 @@ purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool isInstr) { bool selbit = bits(addr, 55); -// TCR tcr = tc->readMiscReg(MISCREG_TCR_EL1); int topbit = computeAddrTop(tc, selbit, isInstr, tcr, el); if (topbit == 63) { @@ -1304,9 +1307,28 @@ decodeMrsMsrBankedReg(uint8_t sysM, bool r, bool &isIntReg, int ®Idx, return (ok); } +bool +isUnpriviledgeAccess(ThreadContext * tc) +{ + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + // NV Extension not implemented yet + bool have_nv_ext = false; + bool unpriv_el1 = currEL(tc) == EL1 && + !(ArmSystem::haveVirtualization(tc) && + have_nv_ext && hcr.nv == 1 && hcr.nv1 == 1); + bool unpriv_el2 = ArmSystem::haveEL(tc, EL2) && HaveVirtHostExt(tc) && + currEL(tc) == EL2 && hcr.e2h == 1 && hcr.tge == 1; + + // User Access override, or UAO not implemented yet. + bool user_access_override = false; + return (unpriv_el1 || unpriv_el2) && !user_access_override; +} + bool SPAlignmentCheckEnabled(ThreadContext* tc) { + ExceptionLevel regime = s1TranslationRegime(tc, currEL(tc)); + switch (currEL(tc)) { case EL3: return ((SCTLR) tc->readMiscReg(MISCREG_SCTLR_EL3)).sa; @@ -1315,7 +1337,11 @@ SPAlignmentCheckEnabled(ThreadContext* tc) case EL1: return ((SCTLR) tc->readMiscReg(MISCREG_SCTLR_EL1)).sa; case EL0: - return ((SCTLR) tc->readMiscReg(MISCREG_SCTLR_EL1)).sa0; + { + SCTLR sc = (regime == EL2) ? tc->readMiscReg(MISCREG_SCTLR_EL2): + tc->readMiscReg(MISCREG_SCTLR_EL1); + return sc.sa0; + } default: panic("Invalid exception level"); break; diff --git a/src/arch/arm/utility.hh b/src/arch/arm/utility.hh index b61fc2020..efe9e0808 100644 --- a/src/arch/arm/utility.hh +++ b/src/arch/arm/utility.hh @@ -450,6 +450,7 @@ inline ByteOrder byteOrder(const ThreadContext *tc) return isBigEndian64(tc) ? BigEndianByteOrder : LittleEndianByteOrder; }; -} +bool isUnpriviledgeAccess(ThreadContext * tc); +} #endif