APPLY_IREG(r13, INTREG_R13); \
APPLY_IREG(r14, INTREG_R14); \
APPLY_IREG(r15, INTREG_R15); \
- } while(0)
+ } while (0)
#define FOREACH_SREG() \
do { \
APPLY_SREG(cr8, MISCREG_CR8); \
APPLY_SREG(efer, MISCREG_EFER); \
APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
- } while(0)
+ } while (0)
#define FOREACH_DREG() \
do { \
APPLY_DREG(db[3], MISCREG_DR3); \
APPLY_DREG(dr6, MISCREG_DR6); \
APPLY_DREG(dr7, MISCREG_DR7); \
- } while(0)
+ } while (0)
#define FOREACH_SEGMENT() \
do { \
APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
- } while(0)
+ } while (0)
#define FOREACH_DTABLE() \
do { \
APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
- } while(0)
+ } while (0)
template<typename STRUCT, typename ENTRY>
static STRUCT *newVarStruct(size_t entries)
: BaseKvmCPU(params),
useXSave(params->useXSave)
{
- Kvm &kvm(vm.kvm);
+ Kvm &kvm(*vm.kvm);
if (!kvm.capSetTSSAddress())
panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
updateCPUID();
- io_req.setThreadContext(tc->contextId(), 0);
-
// TODO: Do we need to create an identity mapped TSS area? We
// should call kvm.vm.setTSSAddress() here in that case. It should
// only be needed for old versions of the virtualization
}
void
-X86KvmCPU::dump()
+X86KvmCPU::dump() const
{
dumpIntRegs();
if (useXSave)
void
X86KvmCPU::dumpMSRs() const
{
- const Kvm::MSRIndexVector &supported_msrs(vm.kvm.getSupportedMSRs());
+ const Kvm::MSRIndexVector &supported_msrs(vm.kvm->getSupportedMSRs());
std::unique_ptr<struct kvm_msrs> msrs(
newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
supported_msrs.size()));
FOREACH_IREG();
#undef APPLY_IREG
- regs.rip = tc->instAddr();
+ regs.rip = tc->instAddr() - tc->readMiscReg(MISCREG_CS_BASE);
/* You might think that setting regs.rflags to the contents
* MISCREG_RFLAGS here would suffice. In that case you're
void
X86KvmCPU::updateThreadContext()
{
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+
+ getRegisters(regs);
+ getSpecialRegisters(sregs);
+
DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
if (DTRACE(KvmContext))
dump();
- updateThreadContextRegs();
- updateThreadContextSRegs();
- if (useXSave)
- updateThreadContextXSave();
- else
- updateThreadContextFPU();
+ updateThreadContextRegs(regs, sregs);
+ updateThreadContextSRegs(sregs);
+ if (useXSave) {
+ struct kvm_xsave xsave;
+ getXSave(xsave);
+
+ updateThreadContextXSave(xsave);
+ } else {
+ struct kvm_fpu fpu;
+ getFPUState(fpu);
+
+ updateThreadContextFPU(fpu);
+ }
updateThreadContextMSRs();
// The M5 misc reg caches some values from other
}
void
-X86KvmCPU::updateThreadContextRegs()
+X86KvmCPU::updateThreadContextRegs(const struct kvm_regs ®s,
+ const struct kvm_sregs &sregs)
{
- struct kvm_regs regs;
- getRegisters(regs);
-
#define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
FOREACH_IREG();
#undef APPLY_IREG
- tc->pcState(PCState(regs.rip));
+ tc->pcState(PCState(regs.rip + sregs.cs.base));
// Flags are spread out across multiple semi-magic registers so we
// need some special care when updating them.
}
void
-X86KvmCPU::updateThreadContextSRegs()
+X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
{
- struct kvm_sregs sregs;
- getSpecialRegisters(sregs);
-
assert(getKvmRunState()->apic_base == sregs.apic_base);
assert(getKvmRunState()->cr8 == sregs.cr8);
}
void
-X86KvmCPU::updateThreadContextFPU()
+X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu)
{
- struct kvm_fpu fpu;
- getFPUState(fpu);
-
updateThreadContextFPUCommon(tc, fpu);
tc->setMiscRegNoEffect(MISCREG_FISEG, 0);
}
void
-X86KvmCPU::updateThreadContextXSave()
+X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
{
- struct kvm_xsave kxsave;
- FXSave &xsave(*(FXSave *)kxsave.region);
- getXSave(kxsave);
+ const FXSave &xsave(*(const FXSave *)kxsave.region);
updateThreadContextFPUCommon(tc, xsave);
void
X86KvmCPU::deliverInterrupts()
{
+ Fault fault;
+
syncThreadContext();
- Fault fault(interrupts->getInterrupt(tc));
- interrupts->updateIntrInfo(tc);
+ {
+ // Migrate to the interrupt controller's thread to get the
+ // interrupt. Even though the individual methods are safe to
+ // call across threads, we might still lose interrupts unless
+ // they are getInterrupt() and updateIntrInfo() are called
+ // atomically.
+ EventQueue::ScopedMigration migrate(interrupts[0]->eventQueue());
+ fault = interrupts[0]->getInterrupt(tc);
+ interrupts[0]->updateIntrInfo(tc);
+ }
X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
{
struct kvm_run &kvm_run(*getKvmRunState());
- if (interrupts->checkInterruptsRaw()) {
- if (interrupts->hasPendingUnmaskable()) {
+ if (interrupts[0]->checkInterruptsRaw()) {
+ if (interrupts[0]->hasPendingUnmaskable()) {
DPRINTF(KvmInt,
"Delivering unmaskable interrupt.\n");
syncThreadContext();
// the thread context and check if there are /really/
// interrupts that should be delivered now.
syncThreadContext();
- if (interrupts->checkInterrupts(tc)) {
+ if (interrupts[0]->checkInterrupts(tc)) {
DPRINTF(KvmInt,
"M5 has pending interrupts, delivering interrupt.\n");
pAddr = X86ISA::x86IOAddress(port);
}
- io_req.setPhys(pAddr, kvm_run.io.size, Request::UNCACHEABLE,
+ Request io_req(pAddr, kvm_run.io.size, Request::UNCACHEABLE,
dataMasterId());
+ io_req.setThreadContext(tc->contextId(), 0);
const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
+ // Temporarily lock and migrate to the event queue of the
+ // VM. This queue is assumed to "own" all devices we need to
+ // access if running in multi-core mode.
+ EventQueue::ScopedMigration migrate(vm.eventQueue());
for (int i = 0; i < count; ++i) {
Packet pkt(&io_req, cmd);
X86KvmCPU::getMsrIntersection() const
{
if (cachedMsrIntersection.empty()) {
- const Kvm::MSRIndexVector &kvm_msrs(vm.kvm.getSupportedMSRs());
+ const Kvm::MSRIndexVector &kvm_msrs(vm.kvm->getSupportedMSRs());
DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {