BaseKvmCPU::wakeup()
{
DPRINTF(Kvm, "wakeup()\n");
+ // This method might have been called from another
+ // context. Migrate to this SimObject's event queue when
+ // delivering the wakeup signal.
+ EventQueue::ScopedMigration migrate(eventQueue());
+
+ // Kick the vCPU to get it to come out of KVM.
+ kick();
if (thread->status() != ThreadContext::Suspended)
return;
// twice.
ticksExecuted = clockPeriod();
} else {
+ // This method is executed as a result of a tick event. That
+ // means that the event queue will be locked when entering the
+ // method. We temporarily unlock the event queue to allow
+ // other threads to steal control of this thread to inject
+ // interrupts. They will typically lock the queue and then
+ // force an exit from KVM by kicking the vCPU.
+ EventQueue::ScopedRelease release(curEventQueue());
+
if (ticks < runTimer->resolution()) {
DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n",
ticks, runTimer->resolution());
pkt.dataStatic(data);
if (mmio_req.isMmappedIpr()) {
+ // We currently assume that there is no need to migrate to a
+ // different event queue when doing IPRs. Currently, IPRs are
+ // only used for m5ops, so it should be a valid assumption.
const Cycles ipr_delay(write ?
TheISA::handleIprWrite(tc, &pkt) :
TheISA::handleIprRead(tc, &pkt));
return clockPeriod() * ipr_delay;
} else {
+ // Temporarily lock and migrate to the event queue of the
+ // VM. This queue is assumed to "own" all devices we need to
+ // access if running in multi-core mode.
+ EventQueue::ScopedMigration migrate(vm.eventQueue());
+
return dataPort.sendAtomic(&pkt);
}
}
void
X86KvmCPU::deliverInterrupts()
{
+ Fault fault;
+
syncThreadContext();
- Fault fault(interrupts->getInterrupt(tc));
- interrupts->updateIntrInfo(tc);
+ {
+ // Migrate to the interrupt controller's thread to get the
+ // interrupt. Even though the individual methods are safe to
+ // call across threads, we might still lose interrupts unless
+ // they are getInterrupt() and updateIntrInfo() are called
+ // atomically.
+ EventQueue::ScopedMigration migrate(interrupts->eventQueue());
+ fault = interrupts->getInterrupt(tc);
+ interrupts->updateIntrInfo(tc);
+ }
X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
dataMasterId());
const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
+ // Temporarily lock and migrate to the event queue of the
+ // VM. This queue is assumed to "own" all devices we need to
+ // access if running in multi-core mode.
+ EventQueue::ScopedMigration migrate(vm.eventQueue());
for (int i = 0; i < count; ++i) {
Packet pkt(&io_req, cmd);