return False
if isa == "arm":
- return host_isa == "armv7l"
+ return host_isa in ( "armv7l", "aarch64" )
elif isa == "x86":
if host_isa != "x86_64":
return False
("atomic", "AtomicSimpleCPU"),
("minor", "MinorCPU"),
("detailed", "DerivO3CPU"),
- ("kvm", ("ArmKvmCPU", "X86KvmCPU")),
+ ("kvm", ("ArmKvmCPU", "ArmV8KvmCPU", "X86KvmCPU")),
]
# Filtered list of aliases. Only aliases for existing CPUs exist in
--- /dev/null
+# Copyright (c) 2015 ARM Limited
+# All rights reserved.
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Andreas Sandberg
+
+from m5.params import *
+from BaseArmKvmCPU import BaseArmKvmCPU
+
+class ArmV8KvmCPU(BaseArmKvmCPU):
+ type = 'ArmV8KvmCPU'
+ cxx_header = "arch/arm/kvm/armv8_cpu.hh"
--- /dev/null
+# Copyright (c) 2015 ARM Limited
+# All rights reserved.
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Andreas Sandberg
+
+from m5.params import *
+from BaseKvmCPU import BaseKvmCPU
+
+class BaseArmKvmCPU(BaseKvmCPU):
+ type = 'BaseArmKvmCPU'
+ cxx_header = "arch/arm/kvm/base_cpu.hh"
+ abstract = True
SimObject('KvmGic.py')
Source('gic.cc')
+SimObject('BaseArmKvmCPU.py')
+Source('base_cpu.cc')
+
if host_isa == "armv7l":
SimObject('ArmKvmCPU.py')
Source('arm_cpu.cc')
+elif host_isa == "aarch64":
+ SimObject('ArmV8KvmCPU.py')
+ Source('armv8_cpu.cc')
--- /dev/null
+/*
+ * Copyright (c) 2015 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andreas Sandberg
+ */
+
+#include "arch/arm/kvm/armv8_cpu.hh"
+
+#include <linux/kvm.h>
+
+#include "debug/KvmContext.hh"
+#include "params/ArmV8KvmCPU.hh"
+
+// Unlike gem5, kvm doesn't count the SP as a normal integer register,
+// which means we only have 31 normal integer registers.
+constexpr static unsigned NUM_XREGS = NUM_ARCH_INTREGS - 1;
+static_assert(NUM_XREGS == 31, "Unexpected number of aarch64 int. regs.");
+
+// The KVM interface accesses vector registers of 4 single precision
+// floats instead of individual registers.
+constexpr static unsigned NUM_QREGS = NumFloatV8ArchRegs / 4;
+static_assert(NUM_QREGS == 32, "Unexpected number of aarch64 vector regs.");
+
+#define EXTRACT_FIELD(v, name) \
+ (((v) & name ## _MASK) >> name ## _SHIFT)
+
+#define CORE_REG(name, size) \
+ (KVM_REG_ARM64 | KVM_REG_ARM_CORE | \
+ KVM_REG_SIZE_ ## size | \
+ KVM_REG_ARM_CORE_REG(name))
+
+#define INT_REG(name) CORE_REG(name, U64)
+#define SIMD_REG(name) CORE_REG(name, U128)
+
+constexpr uint64_t
+kvmXReg(const int num)
+{
+ return INT_REG(regs.regs[0]) +
+ (INT_REG(regs.regs[1]) - INT_REG(regs.regs[0])) * num;
+}
+
+constexpr uint64_t
+kvmFPReg(const int num)
+{
+ return SIMD_REG(fp_regs.vregs[0]) +
+ (SIMD_REG(fp_regs.vregs[1]) - SIMD_REG(fp_regs.vregs[0])) * num;
+}
+
+union KvmFPReg {
+ union {
+ uint32_t i;
+ float f;
+ } s[4];
+
+ union {
+ uint64_t i;
+ double f;
+ } d[2];
+
+ uint8_t data[32];
+};
+
+#define FP_REGS_PER_VFP_REG 4
+static_assert(sizeof(FloatRegBits) == 4, "Unexpected float reg size");
+
+const std::vector<ArmV8KvmCPU::IntRegInfo> ArmV8KvmCPU::intRegMap = {
+ { INT_REG(regs.sp), INTREG_SP0, "SP(EL0)" },
+ { INT_REG(sp_el1), INTREG_SP1, "SP(EL1)" },
+};
+
+const std::vector<ArmV8KvmCPU::MiscRegInfo> ArmV8KvmCPU::miscRegMap = {
+ MiscRegInfo(INT_REG(regs.pstate), MISCREG_CPSR, "PSTATE"),
+ MiscRegInfo(INT_REG(elr_el1), MISCREG_ELR_EL1, "ELR(EL1)"),
+ MiscRegInfo(INT_REG(spsr[KVM_SPSR_EL1]), MISCREG_SPSR_EL1, "SPSR(EL1)"),
+ MiscRegInfo(INT_REG(spsr[KVM_SPSR_ABT]), MISCREG_SPSR_ABT, "SPSR(ABT)"),
+ MiscRegInfo(INT_REG(spsr[KVM_SPSR_UND]), MISCREG_SPSR_UND, "SPSR(UND)"),
+ MiscRegInfo(INT_REG(spsr[KVM_SPSR_IRQ]), MISCREG_SPSR_IRQ, "SPSR(IRQ)"),
+ MiscRegInfo(INT_REG(spsr[KVM_SPSR_FIQ]), MISCREG_SPSR_FIQ, "SPSR(FIQ)"),
+ MiscRegInfo(INT_REG(fp_regs.fpsr), MISCREG_FPSR, "FPSR"),
+ MiscRegInfo(INT_REG(fp_regs.fpcr), MISCREG_FPCR, "FPCR"),
+};
+
+ArmV8KvmCPU::ArmV8KvmCPU(ArmV8KvmCPUParams *params)
+ : BaseArmKvmCPU(params)
+{
+}
+
+ArmV8KvmCPU::~ArmV8KvmCPU()
+{
+}
+
+void
+ArmV8KvmCPU::dump()
+{
+ inform("Integer registers:\n");
+ inform(" PC: %s\n", getAndFormatOneReg(INT_REG(regs.pc)));
+ for (int i = 0; i < NUM_XREGS; ++i)
+ inform(" X%i: %s\n", i, getAndFormatOneReg(kvmXReg(i)));
+
+ for (int i = 0; i < NUM_QREGS; ++i)
+ inform(" Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i)));
+
+ for (const auto &ri : intRegMap)
+ inform(" %s: %s\n", ri.name, getAndFormatOneReg(ri.kvm));
+
+ for (const auto &ri : miscRegMap)
+ inform(" %s: %s\n", ri.name, getAndFormatOneReg(ri.kvm));
+
+ for (const auto ® : getRegList()) {
+ const uint64_t arch(reg & KVM_REG_ARCH_MASK);
+ if (arch != KVM_REG_ARM64) {
+ inform("0x%x: %s\n", reg, getAndFormatOneReg(reg));
+ continue;
+ }
+
+ const uint64_t type(reg & KVM_REG_ARM_COPROC_MASK);
+ switch (type) {
+ case KVM_REG_ARM_CORE:
+ // These have already been printed
+ break;
+
+ case KVM_REG_ARM64_SYSREG: {
+ const uint64_t op0(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP0));
+ const uint64_t op1(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP1));
+ const uint64_t crn(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRN));
+ const uint64_t crm(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRM));
+ const uint64_t op2(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP2));
+ const MiscRegIndex idx(
+ decodeAArch64SysReg(op0, op1, crn, crm, op2));
+
+ inform(" %s (op0: %i, op1: %i, crn: %i, crm: %i, op2: %i): %s",
+ miscRegName[idx], op0, op1, crn, crm, op2,
+ getAndFormatOneReg(reg));
+ } break;
+
+ case KVM_REG_ARM_DEMUX: {
+ const uint64_t id(EXTRACT_FIELD(reg, KVM_REG_ARM_DEMUX_ID));
+ const uint64_t val(EXTRACT_FIELD(reg, KVM_REG_ARM_DEMUX_VAL));
+ if (id == KVM_REG_ARM_DEMUX_ID_CCSIDR) {
+ inform(" CSSIDR[%i]: %s\n", val,
+ getAndFormatOneReg(reg));
+ } else {
+ inform(" UNKNOWN[%i:%i]: %s\n", id, val,
+ getAndFormatOneReg(reg));
+ }
+ } break;
+
+ default:
+ inform("0x%x: %s\n", reg, getAndFormatOneReg(reg));
+ }
+ }
+}
+
+void
+ArmV8KvmCPU::updateKvmState()
+{
+ DPRINTF(KvmContext, "In updateKvmState():\n");
+ for (const auto &ri : miscRegMap) {
+ const uint64_t value(tc->readMiscReg(ri.idx));
+ DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value);
+ setOneReg(ri.kvm, value);
+ }
+
+ for (int i = 0; i < NUM_XREGS; ++i) {
+ const uint64_t value(tc->readIntReg(INTREG_X0 + i));
+ DPRINTF(KvmContext, " X%i := 0x%x\n", i, value);
+ setOneReg(kvmXReg(i), value);
+ }
+
+ for (const auto &ri : intRegMap) {
+ const uint64_t value(tc->readIntReg(ri.idx));
+ DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value);
+ setOneReg(ri.kvm, value);
+ }
+
+ for (int i = 0; i < NUM_QREGS; ++i) {
+ const RegIndex reg_base(i * FP_REGS_PER_VFP_REG);
+ KvmFPReg reg;
+ for (int j = 0; j < FP_REGS_PER_VFP_REG; j++)
+ reg.s[j].i = tc->readFloatRegBits(reg_base + j);
+
+ setOneReg(kvmFPReg(i), reg.data);
+ DPRINTF(KvmContext, " Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i)));
+ }
+
+ for (const auto &ri : getSysRegMap()) {
+ const uint64_t value(tc->readMiscReg(ri.idx));
+ DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value);
+ setOneReg(ri.kvm, value);
+ }
+
+ setOneReg(INT_REG(regs.pc), tc->instAddr());
+ DPRINTF(KvmContext, " PC := 0x%x\n", tc->instAddr());
+}
+
+void
+ArmV8KvmCPU::updateThreadContext()
+{
+ DPRINTF(KvmContext, "In updateThreadContext():\n");
+
+ // Update core misc regs first as they (particularly PSTATE/CPSR)
+ // affect how other registers are mapped.
+ for (const auto &ri : miscRegMap) {
+ const auto value(getOneRegU64(ri.kvm));
+ DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value);
+ tc->setMiscRegNoEffect(ri.idx, value);
+ }
+
+ for (int i = 0; i < NUM_XREGS; ++i) {
+ const auto value(getOneRegU64(kvmXReg(i)));
+ DPRINTF(KvmContext, " X%i := 0x%x\n", i, value);
+ tc->setIntReg(INTREG_X0 + i, value);
+ }
+
+ for (const auto &ri : intRegMap) {
+ const auto value(getOneRegU64(ri.kvm));
+ DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value);
+ tc->setIntReg(ri.idx, value);
+ }
+
+ for (int i = 0; i < NUM_QREGS; ++i) {
+ const RegIndex reg_base(i * FP_REGS_PER_VFP_REG);
+ KvmFPReg reg;
+ DPRINTF(KvmContext, " Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i)));
+ getOneReg(kvmFPReg(i), reg.data);
+ for (int j = 0; j < FP_REGS_PER_VFP_REG; j++)
+ tc->setFloatRegBits(reg_base + j, reg.s[j].i);
+ }
+
+ for (const auto &ri : getSysRegMap()) {
+ const auto value(getOneRegU64(ri.kvm));
+ DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value);
+ tc->setMiscRegNoEffect(ri.idx, value);
+ }
+
+ const CPSR cpsr(tc->readMiscRegNoEffect(MISCREG_CPSR));
+ PCState pc(getOneRegU64(INT_REG(regs.pc)));
+ pc.aarch64(inAArch64(tc));
+ pc.thumb(cpsr.t);
+ pc.nextAArch64(inAArch64(tc));
+ // TODO: This is a massive assumption that will break when
+ // switching to thumb.
+ pc.nextThumb(cpsr.t);
+ DPRINTF(KvmContext, " PC := 0x%x (t: %i, a64: %i)\n",
+ pc.instAddr(), pc.thumb(), pc.aarch64());
+ tc->pcState(pc);
+}
+
+const std::vector<ArmV8KvmCPU::MiscRegInfo> &
+ArmV8KvmCPU::getSysRegMap() const
+{
+ // Try to use the cached map
+ if (!sysRegMap.empty())
+ return sysRegMap;
+
+ for (const auto ® : getRegList()) {
+ const uint64_t arch(reg & KVM_REG_ARCH_MASK);
+ if (arch != KVM_REG_ARM64)
+ continue;
+
+ const uint64_t type(reg & KVM_REG_ARM_COPROC_MASK);
+ if (type != KVM_REG_ARM64_SYSREG)
+ continue;
+
+ const uint64_t op0(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP0));
+ const uint64_t op1(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP1));
+ const uint64_t crn(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRN));
+ const uint64_t crm(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRM));
+ const uint64_t op2(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP2));
+ const MiscRegIndex idx(decodeAArch64SysReg(op0, op1, crn, crm, op2));
+ const auto &info(miscRegInfo[idx]);
+ const bool writeable(
+ info[MISCREG_USR_NS_WR] || info[MISCREG_USR_S_WR] ||
+ info[MISCREG_PRI_S_WR] || info[MISCREG_PRI_NS_WR] ||
+ info[MISCREG_HYP_WR] ||
+ info[MISCREG_MON_NS0_WR] || info[MISCREG_MON_NS1_WR]);
+ const bool implemented(
+ info[MISCREG_IMPLEMENTED] || info[MISCREG_WARN_NOT_FAIL]);
+
+ // Only add implemented registers that we are going to be able
+ // to write.
+ if (implemented && writeable)
+ sysRegMap.emplace_back(reg, idx, miscRegName[idx]);
+ }
+
+ return sysRegMap;
+}
+
+ArmV8KvmCPU *
+ArmV8KvmCPUParams::create()
+{
+ return new ArmV8KvmCPU(this);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andreas Sandberg
+ */
+
+#ifndef __ARCH_ARM_KVM_ARMV8_CPU_HH__
+#define __ARCH_ARM_KVM_ARMV8_CPU_HH__
+
+#include <vector>
+
+#include "arch/arm/intregs.hh"
+#include "arch/arm/kvm/base_cpu.hh"
+#include "arch/arm/miscregs.hh"
+
+struct ArmV8KvmCPUParams;
+
+/**
+ * This is an implementation of a KVM-based ARMv8-compatible CPU.
+ *
+ * Known limitations:
+ * <ul>
+ *
+ * <li>The system-register-based generic timer can only be simulated
+ * by the host kernel. Workaround: Use a memory mapped timer
+ * instead to simulate the timer in gem5.
+ *
+ * <li>Simulating devices (e.g., the generic timer) in the host
+ * kernel requires that the host kernel also simulates the
+ * GIC.
+ *
+ * <li>ID registers in the host and in gem5 must match for switching
+ * between simulated CPUs and KVM. This is particularly
+ * important for ID registers describing memory system
+ * capabilities (e.g., ASID size, physical address size).
+ *
+ * <li>Switching between a virtualized CPU and a simulated CPU is
+ * currently not supported if in-kernel device emulation is
+ * used. This could be worked around by adding support for
+ * switching to the gem5 (e.g., the KvmGic) side of the device
+ * models. A simpler workaround is to avoid in-kernel device
+ * models altogether.
+ *
+ * </ul>
+ *
+ */
+class ArmV8KvmCPU : public BaseArmKvmCPU
+{
+ public:
+ ArmV8KvmCPU(ArmV8KvmCPUParams *params);
+ virtual ~ArmV8KvmCPU();
+
+ void dump() M5_ATTR_OVERRIDE;
+
+ protected:
+ void updateKvmState() M5_ATTR_OVERRIDE;
+ void updateThreadContext() M5_ATTR_OVERRIDE;
+
+ protected:
+ /** Mapping between integer registers in gem5 and KVM */
+ struct IntRegInfo {
+ IntRegInfo(uint64_t _kvm, IntRegIndex _idx, const char *_name)
+ : kvm(_kvm), idx(_idx), name(_name) {}
+
+ /** Register index in KVM */
+ uint64_t kvm;
+ /** Register index in gem5 */
+ IntRegIndex idx;
+ /** Name to use in debug dumps */
+ const char *name;
+ };
+
+ /** Mapping between misc registers in gem5 and registers in KVM */
+ struct MiscRegInfo {
+ MiscRegInfo(uint64_t _kvm, MiscRegIndex _idx, const char *_name)
+ : kvm(_kvm), idx(_idx), name(_name) {}
+
+ /** Register index in KVM */
+ uint64_t kvm;
+ /** Register index in gem5 */
+ MiscRegIndex idx;
+ /** Name to use in debug dumps */
+ const char *name;
+ };
+
+ /**
+ * Get a map between system registers in kvm and gem5 registers
+ *
+ * This method returns a mapping between system registers in kvm
+ * and misc regs in gem5. The actual mapping is only created the
+ * first time the method is called and stored in a cache
+ * (ArmV8KvmCPU::sysRegMap).
+ *
+ * @return Vector of kvm<->misc reg mappings.
+ */
+ const std::vector<ArmV8KvmCPU::MiscRegInfo> &getSysRegMap() const;
+
+ /** Mapping between gem5 integer registers and integer registers in kvm */
+ static const std::vector<ArmV8KvmCPU::IntRegInfo> intRegMap;
+ /** Mapping between gem5 misc registers registers and registers in kvm */
+ static const std::vector<ArmV8KvmCPU::MiscRegInfo> miscRegMap;
+
+ /** Cached mapping between system registers in kvm and misc regs in gem5 */
+ mutable std::vector<ArmV8KvmCPU::MiscRegInfo> sysRegMap;
+};
+
+#endif // __ARCH_ARM_KVM_ARMV8_CPU_HH__
--- /dev/null
+/*
+ * Copyright (c) 2012, 2015 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andreas Sandberg
+ */
+
+#include "arch/arm/kvm/base_cpu.hh"
+
+#include <linux/kvm.h>
+
+#include "debug/KvmInt.hh"
+#include "params/BaseArmKvmCPU.hh"
+
+
+#define INTERRUPT_ID(type, vcpu, irq) ( \
+ ((type) << KVM_ARM_IRQ_TYPE_SHIFT) | \
+ ((vcpu) << KVM_ARM_IRQ_VCPU_SHIFT) | \
+ ((irq) << KVM_ARM_IRQ_NUM_SHIFT))
+
+#define INTERRUPT_VCPU_IRQ(vcpu) \
+ INTERRUPT_ID(KVM_ARM_IRQ_TYPE_CPU, vcpu, KVM_ARM_IRQ_CPU_IRQ)
+
+#define INTERRUPT_VCPU_FIQ(vcpu) \
+ INTERRUPT_ID(KVM_ARM_IRQ_TYPE_CPU, vcpu, KVM_ARM_IRQ_CPU_FIQ)
+
+
+BaseArmKvmCPU::BaseArmKvmCPU(BaseArmKvmCPUParams *params)
+ : BaseKvmCPU(params),
+ irqAsserted(false), fiqAsserted(false)
+{
+}
+
+BaseArmKvmCPU::~BaseArmKvmCPU()
+{
+}
+
+void
+BaseArmKvmCPU::startup()
+{
+ BaseKvmCPU::startup();
+
+ /* TODO: This needs to be moved when we start to support VMs with
+ * multiple threads since kvmArmVCpuInit requires that all CPUs in
+ * the VM have been created.
+ */
+ struct kvm_vcpu_init target_config;
+ memset(&target_config, 0, sizeof(target_config));
+
+ vm.kvmArmPreferredTarget(target_config);
+ kvmArmVCpuInit(target_config);
+}
+
+Tick
+BaseArmKvmCPU::kvmRun(Tick ticks)
+{
+ bool simFIQ(interrupts->checkRaw(INT_FIQ));
+ bool simIRQ(interrupts->checkRaw(INT_IRQ));
+
+ if (fiqAsserted != simFIQ) {
+ fiqAsserted = simFIQ;
+ DPRINTF(KvmInt, "KVM: Update FIQ state: %i\n", simFIQ);
+ vm.setIRQLine(INTERRUPT_VCPU_FIQ(vcpuID), simFIQ);
+ }
+ if (irqAsserted != simIRQ) {
+ irqAsserted = simIRQ;
+ DPRINTF(KvmInt, "KVM: Update IRQ state: %i\n", simIRQ);
+ vm.setIRQLine(INTERRUPT_VCPU_IRQ(vcpuID), simIRQ);
+ }
+
+ return BaseKvmCPU::kvmRun(ticks);
+}
+
+const BaseArmKvmCPU::RegIndexVector &
+BaseArmKvmCPU::getRegList() const
+{
+ // Do we need to request a list of registers from the kernel?
+ if (_regIndexList.size() == 0) {
+ // Start by probing for the size of the list. We do this
+ // calling the ioctl with a struct size of 0. The kernel will
+ // return the number of elements required to hold the list.
+ kvm_reg_list regs_probe;
+ regs_probe.n = 0;
+ getRegList(regs_probe);
+
+ // Request the actual register list now that we know how many
+ // register we need to allocate space for.
+ std::unique_ptr<struct kvm_reg_list> regs;
+ const size_t size(sizeof(struct kvm_reg_list) +
+ regs_probe.n * sizeof(uint64_t));
+ regs.reset((struct kvm_reg_list *)operator new(size));
+ regs->n = regs_probe.n;
+ if (!getRegList(*regs))
+ panic("Failed to determine register list size.\n");
+
+ _regIndexList.assign(regs->reg, regs->reg + regs->n);
+ }
+
+ return _regIndexList;
+}
+
+void
+BaseArmKvmCPU::kvmArmVCpuInit(const struct kvm_vcpu_init &init)
+{
+ if (ioctl(KVM_ARM_VCPU_INIT, (void *)&init) == -1)
+ panic("KVM: Failed to initialize vCPU\n");
+}
+
+bool
+BaseArmKvmCPU::getRegList(struct kvm_reg_list ®s) const
+{
+ if (ioctl(KVM_GET_REG_LIST, (void *)®s) == -1) {
+ if (errno == E2BIG) {
+ return false;
+ } else {
+ panic("KVM: Failed to get vCPU register list (errno: %i)\n",
+ errno);
+ }
+ } else {
+ return true;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2012, 2015 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andreas Sandberg
+ */
+
+#ifndef __ARCH_ARM_KVM_BASE_CPU_HH__
+#define __ARCH_ARM_KVM_BASE_CPU_HH__
+
+#include <vector>
+
+#include "cpu/kvm/base.hh"
+
+struct BaseArmKvmCPUParams;
+
+class BaseArmKvmCPU : public BaseKvmCPU
+{
+ public:
+ BaseArmKvmCPU(BaseArmKvmCPUParams *params);
+ virtual ~BaseArmKvmCPU();
+
+ void startup() M5_ATTR_OVERRIDE;
+
+ protected:
+ Tick kvmRun(Tick ticks) M5_ATTR_OVERRIDE;
+
+
+ /** Cached state of the IRQ line */
+ bool irqAsserted;
+ /** Cached state of the FIQ line */
+ bool fiqAsserted;
+
+ protected:
+ typedef std::vector<uint64_t> RegIndexVector;
+
+ /**
+ * Get a list of registers supported by getOneReg() and setOneReg().
+ *
+ * This method returns a list of all registers supported by
+ * kvm. The actual list is only requested the first time this
+ * method is called. Subsequent calls return a cached copy of the
+ * register list.
+ *
+ * @return Vector of register indexes.
+ */
+ const RegIndexVector &getRegList() const;
+
+ /**
+ * Tell the kernel to initialize this CPU
+ *
+ * The kernel needs to know what type of the CPU that we want to
+ * emulate. The specified CPU type has to be compatible with the
+ * host CPU. In practice, we usually call
+ * KvmVM::kvmArmPreferredTarget() to discover the host CPU.
+ *
+ * @param target CPU type to emulate
+ */
+ void kvmArmVCpuInit(const struct kvm_vcpu_init &init);
+
+ private:
+ std::unique_ptr<struct kvm_reg_list> tryGetRegList(uint64_t nelem) const;
+
+ /**
+ * Get a list of registers supported by getOneReg() and setOneReg().
+ *
+ * @return False if the number of elements allocated in the list
+ * is too small to hold the complete register list (the required
+ * size is written to regs.n in this case). True on success.
+ */
+ bool getRegList(struct kvm_reg_list ®s) const;
+
+ /**
+ * Cached copy of the list of registers supported by KVM
+ */
+ mutable RegIndexVector _regIndexList;
+};
+
+#endif // __ARCH_ARM_KVM_BASE_CPU_HH__
return nextVCPUID++;
}
+#if defined(__aarch64__)
+void
+KvmVM::kvmArmPreferredTarget(struct kvm_vcpu_init &target) const
+{
+ if (ioctl(KVM_ARM_PREFERRED_TARGET, &target) == -1) {
+ panic("KVM: Failed to get ARM preferred CPU target (errno: %i)\n",
+ errno);
+ }
+}
+#endif
+
int
KvmVM::ioctl(int request, long p1) const
{
/** Global KVM interface */
Kvm kvm;
+#if defined(__aarch64__)
+ public: // ARM-specific
+ /**
+ * Ask the kernel for the preferred CPU target to simulate.
+ *
+ * When creating an ARM vCPU in Kvm, we need to initialize it with
+ * a call to BaseArmKvmCPU::kvmArmVCpuInit(). When calling this
+ * function, we need to know what type of CPU the host has. This
+ * call sets up the kvm_vcpu_init structure with the values the
+ * kernel wants.
+ *
+ * @param[out] target Target structure to initialize.
+ */
+ void kvmArmPreferredTarget(struct kvm_vcpu_init &target) const;
+
+#endif
+
protected:
/**
* VM CPU initialization code.