/*
- * Copyright (c) 2010 ARM Limited
+ * Copyright (c) 2010, 2012-2013, 2016-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
#ifndef __ARCH_ARM_UTILITY_HH__
#define __ARCH_ARM_UTILITY_HH__
+#include "arch/arm/isa_traits.hh"
#include "arch/arm/miscregs.hh"
#include "arch/arm/types.hh"
-#include "base/hashmap.hh"
+#include "base/logging.hh"
#include "base/trace.hh"
#include "base/types.hh"
+#include "cpu/static_inst.hh"
#include "cpu/thread_context.hh"
-namespace __hash_namespace {
- template<>
- struct hash<ArmISA::ExtMachInst> : public hash<uint32_t> {
- size_t operator()(const ArmISA::ExtMachInst &emi) const {
- return hash<uint32_t>::operator()((uint32_t)emi);
- };
- };
-}
+class ArmSystem;
namespace ArmISA {
- inline bool
- testPredicate(CPSR cpsr, ConditionCode code)
+inline PCState
+buildRetPC(const PCState &curPC, const PCState &callPC)
+{
+ PCState retPC = callPC;
+ retPC.uEnd();
+ return retPC;
+}
+
+inline bool
+testPredicate(uint32_t nz, uint32_t c, uint32_t v, ConditionCode code)
+{
+ bool n = (nz & 0x2);
+ bool z = (nz & 0x1);
+
+ switch (code)
{
- switch (code)
- {
- case COND_EQ: return cpsr.z;
- case COND_NE: return !cpsr.z;
- case COND_CS: return cpsr.c;
- case COND_CC: return !cpsr.c;
- case COND_MI: return cpsr.n;
- case COND_PL: return !cpsr.n;
- case COND_VS: return cpsr.v;
- case COND_VC: return !cpsr.v;
- case COND_HI: return (cpsr.c && !cpsr.z);
- case COND_LS: return !(cpsr.c && !cpsr.z);
- case COND_GE: return !(cpsr.n ^ cpsr.v);
- case COND_LT: return (cpsr.n ^ cpsr.v);
- case COND_GT: return !(cpsr.n ^ cpsr.v || cpsr.z);
- case COND_LE: return (cpsr.n ^ cpsr.v || cpsr.z);
- case COND_AL: return true;
- case COND_UC: return true;
- default:
- panic("Unhandled predicate condition: %d\n", code);
- }
+ case COND_EQ: return z;
+ case COND_NE: return !z;
+ case COND_CS: return c;
+ case COND_CC: return !c;
+ case COND_MI: return n;
+ case COND_PL: return !n;
+ case COND_VS: return v;
+ case COND_VC: return !v;
+ case COND_HI: return (c && !z);
+ case COND_LS: return !(c && !z);
+ case COND_GE: return !(n ^ v);
+ case COND_LT: return (n ^ v);
+ case COND_GT: return !(n ^ v || z);
+ case COND_LE: return (n ^ v || z);
+ case COND_AL: return true;
+ case COND_UC: return true;
+ default:
+ panic("Unhandled predicate condition: %d\n", code);
}
+}
- /**
- * Function to insure ISA semantics about 0 registers.
- * @param tc The thread context.
- */
- template <class TC>
- void zeroRegisters(TC *tc);
+/**
+ * Function to insure ISA semantics about 0 registers.
+ * @param tc The thread context.
+ */
+template <class TC>
+void zeroRegisters(TC *tc);
- // Instruction address compression hooks
- static inline Addr realPCToFetchPC(const Addr &addr) {
- return addr;
- }
+inline void startupCPU(ThreadContext *tc, int cpuId)
+{
+ tc->activate();
+}
- static inline Addr fetchPCToRealPC(const Addr &addr) {
- return addr;
- }
+void copyRegs(ThreadContext *src, ThreadContext *dest);
- // the size of "fetched" instructions
- static inline size_t fetchInstSize() {
- return sizeof(MachInst);
- }
+static inline void
+copyMiscRegs(ThreadContext *src, ThreadContext *dest)
+{
+ panic("Copy Misc. Regs Not Implemented Yet\n");
+}
- static inline MachInst makeRegisterCopy(int dest, int src) {
- panic("makeRegisterCopy not implemented");
- return 0;
- }
+void initCPU(ThreadContext *tc, int cpuId);
- inline void startupCPU(ThreadContext *tc, int cpuId)
- {
- tc->activate(0);
- }
+/** Send an event (SEV) to a specific PE if there isn't
+ * already a pending event */
+void sendEvent(ThreadContext *tc);
- template <class XC>
- Fault
- checkFpEnableFault(XC *xc)
- {
- return NoFault;
- }
+static inline bool
+inUserMode(CPSR cpsr)
+{
+ return cpsr.mode == MODE_USER || cpsr.mode == MODE_EL0T;
+}
- static inline void
- copyRegs(ThreadContext *src, ThreadContext *dest)
- {
- panic("Copy Regs Not Implemented Yet\n");
- }
+static inline bool
+inUserMode(ThreadContext *tc)
+{
+ return inUserMode(tc->readMiscRegNoEffect(MISCREG_CPSR));
+}
- static inline void
- copyMiscRegs(ThreadContext *src, ThreadContext *dest)
- {
- panic("Copy Misc. Regs Not Implemented Yet\n");
- }
+static inline bool
+inPrivilegedMode(CPSR cpsr)
+{
+ return !inUserMode(cpsr);
+}
- void initCPU(ThreadContext *tc, int cpuId);
-
- static inline bool
- inUserMode(ThreadContext *tc)
- {
- return (tc->readMiscRegNoEffect(MISCREG_CPSR) & 0x1f) == MODE_USER;
+static inline bool
+inPrivilegedMode(ThreadContext *tc)
+{
+ return !inUserMode(tc);
+}
+
+bool inAArch64(ThreadContext *tc);
+
+static inline OperatingMode
+currOpMode(ThreadContext *tc)
+{
+ CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ return (OperatingMode) (uint8_t) cpsr.mode;
+}
+
+static inline ExceptionLevel
+currEL(ThreadContext *tc)
+{
+ return opModeToEL(currOpMode(tc));
+}
+
+inline ExceptionLevel
+currEL(CPSR cpsr)
+{
+ return opModeToEL((OperatingMode) (uint8_t)cpsr.mode);
+}
+
+bool HaveVirtHostExt(ThreadContext *tc);
+bool HaveSecureEL2Ext(ThreadContext *tc);
+bool IsSecureEL2Enabled(ThreadContext *tc);
+bool EL2Enabled(ThreadContext *tc);
+
+/**
+ * This function checks whether selected EL provided as an argument
+ * is using the AArch32 ISA. This information might be unavailable
+ * at the current EL status: it hence returns a pair of boolean values:
+ * a first boolean, true if information is available (known),
+ * and a second one, true if EL is using AArch32, false for AArch64.
+ *
+ * @param tc The thread context.
+ * @param el The target exception level.
+ * @retval known is FALSE for EL0 if the current Exception level
+ * is not EL0 and EL1 is using AArch64, since it cannot
+ * determine the state of EL0; TRUE otherwise.
+ * @retval aarch32 is TRUE if the specified Exception level is using AArch32;
+ * FALSE otherwise.
+ */
+std::pair<bool, bool>
+ELUsingAArch32K(ThreadContext *tc, ExceptionLevel el);
+
+bool ELIs32(ThreadContext *tc, ExceptionLevel el);
+
+bool ELIs64(ThreadContext *tc, ExceptionLevel el);
+
+/**
+ * Returns true if the current exception level `el` is executing a Host OS or
+ * an application of a Host OS (Armv8.1 Virtualization Host Extensions).
+ */
+bool ELIsInHost(ThreadContext *tc, ExceptionLevel el);
+
+bool isBigEndian64(ThreadContext *tc);
+
+/**
+ * badMode is checking if the execution mode provided as an argument is
+ * valid and implemented for AArch32
+ *
+ * @param tc ThreadContext
+ * @param mode OperatingMode to check
+ * @return false if mode is valid and implemented, true otherwise
+ */
+bool badMode32(ThreadContext *tc, OperatingMode mode);
+
+/**
+ * badMode is checking if the execution mode provided as an argument is
+ * valid and implemented.
+ *
+ * @param tc ThreadContext
+ * @param mode OperatingMode to check
+ * @return false if mode is valid and implemented, true otherwise
+ */
+bool badMode(ThreadContext *tc, OperatingMode mode);
+
+static inline uint8_t
+itState(CPSR psr)
+{
+ ITSTATE it = 0;
+ it.top6 = psr.it2;
+ it.bottom2 = psr.it1;
+
+ return (uint8_t)it;
+}
+
+/**
+ * Removes the tag from tagged addresses if that mode is enabled.
+ * @param addr The address to be purified.
+ * @param tc The thread context.
+ * @param el The controlled exception level.
+ * @return The purified address.
+ */
+Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el,
+ TTBCR tcr);
+Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el);
+
+static inline bool
+inSecureState(SCR scr, CPSR cpsr)
+{
+ switch ((OperatingMode) (uint8_t) cpsr.mode) {
+ case MODE_MON:
+ case MODE_EL3T:
+ case MODE_EL3H:
+ return true;
+ case MODE_HYP:
+ case MODE_EL2T:
+ case MODE_EL2H:
+ return false;
+ default:
+ return !scr.ns;
}
+}
-uint64_t getArgument(ThreadContext *tc, int number, bool fp);
-
-Fault setCp15Register(uint32_t &Rd, int CRn, int opc1, int CRm, int opc2);
-Fault readCp15Register(uint32_t &Rd, int CRn, int opc1, int CRm, int opc2);
+bool inSecureState(ThreadContext *tc);
+
+/**
+ * Return TRUE if an Exception level below EL3 is in Secure state.
+ * Differs from inSecureState in that it ignores the current EL
+ * or Mode in considering security state.
+ */
+inline bool isSecureBelowEL3(ThreadContext *tc);
+
+bool longDescFormatInUse(ThreadContext *tc);
+
+/** This helper function is either returing the value of
+ * MPIDR_EL1 (by calling getMPIDR), or it is issuing a read
+ * to VMPIDR_EL2 (as it happens in virtualized systems) */
+RegVal readMPIDR(ArmSystem *arm_sys, ThreadContext *tc);
+
+/** This helper function is returing the value of MPIDR_EL1 */
+RegVal getMPIDR(ArmSystem *arm_sys, ThreadContext *tc);
+
+static inline uint32_t
+mcrMrcIssBuild(bool isRead, uint32_t crm, IntRegIndex rt, uint32_t crn,
+ uint32_t opc1, uint32_t opc2)
+{
+ return (isRead << 0) |
+ (crm << 1) |
+ (rt << 5) |
+ (crn << 10) |
+ (opc1 << 14) |
+ (opc2 << 17);
+}
+
+static inline void
+mcrMrcIssExtract(uint32_t iss, bool &isRead, uint32_t &crm, IntRegIndex &rt,
+ uint32_t &crn, uint32_t &opc1, uint32_t &opc2)
+{
+ isRead = (iss >> 0) & 0x1;
+ crm = (iss >> 1) & 0xF;
+ rt = (IntRegIndex) ((iss >> 5) & 0xF);
+ crn = (iss >> 10) & 0xF;
+ opc1 = (iss >> 14) & 0x7;
+ opc2 = (iss >> 17) & 0x7;
+}
+
+static inline uint32_t
+mcrrMrrcIssBuild(bool isRead, uint32_t crm, IntRegIndex rt, IntRegIndex rt2,
+ uint32_t opc1)
+{
+ return (isRead << 0) |
+ (crm << 1) |
+ (rt << 5) |
+ (rt2 << 10) |
+ (opc1 << 16);
+}
+static inline uint32_t
+msrMrs64IssBuild(bool isRead, uint32_t op0, uint32_t op1, uint32_t crn,
+ uint32_t crm, uint32_t op2, IntRegIndex rt)
+{
+ return isRead |
+ (crm << 1) |
+ (rt << 5) |
+ (crn << 10) |
+ (op1 << 14) |
+ (op2 << 17) |
+ (op0 << 20);
+}
+
+bool
+mcrMrc15TrapToHyp(const MiscRegIndex miscReg, ThreadContext *tc, uint32_t iss);
+
+bool
+mcrMrc14TrapToHyp(const MiscRegIndex miscReg, HCR hcr, CPSR cpsr, SCR scr,
+ HDCR hdcr, HSTR hstr, HCPTR hcptr, uint32_t iss);
+bool
+mcrrMrrc15TrapToHyp(const MiscRegIndex miscReg, CPSR cpsr, SCR scr, HSTR hstr,
+ HCR hcr, uint32_t iss);
+
+bool SPAlignmentCheckEnabled(ThreadContext* tc);
+
+uint64_t getArgument(ThreadContext *tc, int &number, uint16_t size, bool fp);
+
+void skipFunction(ThreadContext *tc);
+
+inline void
+advancePC(PCState &pc, const StaticInstPtr &inst)
+{
+ inst->advancePC(pc);
+}
+
+Addr truncPage(Addr addr);
+Addr roundPage(Addr addr);
+
+inline uint64_t
+getExecutingAsid(ThreadContext *tc)
+{
+ return tc->readMiscReg(MISCREG_CONTEXTIDR);
+}
+
+// Decodes the register index to access based on the fields used in a MSR
+// or MRS instruction
+bool
+decodeMrsMsrBankedReg(uint8_t sysM, bool r, bool &isIntReg, int ®Idx,
+ CPSR cpsr, SCR scr, NSACR nsacr,
+ bool checkSecurity = true);
+
+// This wrapper function is used to turn the register index into a source
+// parameter for the instruction. See Operands.isa
+static inline int
+decodeMrsMsrBankedIntRegIndex(uint8_t sysM, bool r)
+{
+ int regIdx;
+ bool isIntReg;
+ bool validReg;
+
+ validReg = decodeMrsMsrBankedReg(sysM, r, isIntReg, regIdx, 0, 0, 0, false);
+ return (validReg && isIntReg) ? regIdx : INTREG_DUMMY;
+}
+
+/**
+ * Returns the n. of PA bits corresponding to the specified encoding.
+ */
+int decodePhysAddrRange64(uint8_t pa_enc);
+
+/**
+ * Returns the encoding corresponding to the specified n. of PA bits.
+ */
+uint8_t encodePhysAddrRange64(int pa_size);
+
+inline ByteOrder byteOrder(ThreadContext *tc)
+{
+ return isBigEndian64(tc) ? BigEndianByteOrder : LittleEndianByteOrder;
};
+}
#endif