/* Used by some KVM macros */
#define PAGE_SIZE pageSize
-static volatile __thread bool timerOverflowed = false;
-
BaseKvmCPU::BaseKvmCPU(BaseKvmCPUParams *params)
: BaseCPU(params),
vm(*params->kvmVM),
panic("KVM: Failed to determine host page size (%i)\n",
errno);
- thread = new SimpleThread(this, 0, params->system,
- params->itb, params->dtb, params->isa[0]);
+ if (FullSystem)
+ thread = new SimpleThread(this, 0, params->system, params->itb, params->dtb,
+ params->isa[0]);
+ else
+ thread = new SimpleThread(this, /* thread_num */ 0, params->system,
+ params->workload[0], params->itb,
+ params->dtb, params->isa[0]);
+
thread->setStatus(ThreadContext::Halted);
tc = thread->getTC();
threadContexts.push_back(tc);
// initialize CPU, including PC
if (FullSystem && !switchedOut())
TheISA::initCPU(tc, tc->contextId());
-
- mmio_req.setThreadContext(tc->contextId(), 0);
}
void
const BaseKvmCPUParams * const p(
dynamic_cast<const BaseKvmCPUParams *>(params()));
+ vcpuThread = pthread_self();
+
// Setup signal handlers. This has to be done after the vCPU is
// created since it manipulates the vCPU signal mask.
setupSignalHandler();
if (p->usePerfOverflow)
runTimer.reset(new PerfKvmTimer(hwCycles,
- KVM_TIMER_SIGNAL,
+ KVM_KICK_SIGNAL,
p->hostFactor,
p->hostFreq));
else
- runTimer.reset(new PosixKvmTimer(KVM_TIMER_SIGNAL, CLOCK_MONOTONIC,
+ runTimer.reset(new PosixKvmTimer(KVM_KICK_SIGNAL, CLOCK_MONOTONIC,
p->hostFactor,
p->hostFreq));
BaseKvmCPU::wakeup()
{
DPRINTF(Kvm, "wakeup()\n");
+ // This method might have been called from another
+ // context. Migrate to this SimObject's event queue when
+ // delivering the wakeup signal.
+ EventQueue::ScopedMigration migrate(eventQueue());
+
+ // Kick the vCPU to get it to come out of KVM.
+ kick();
if (thread->status() != ThreadContext::Suspended)
return;
}
void
-BaseKvmCPU::activateContext(ThreadID thread_num, Cycles delay)
+BaseKvmCPU::activateContext(ThreadID thread_num)
{
- DPRINTF(Kvm, "ActivateContext %d (%d cycles)\n", thread_num, delay);
+ DPRINTF(Kvm, "ActivateContext %d\n", thread_num);
assert(thread_num == 0);
assert(thread);
numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
- schedule(tickEvent, clockEdge(delay));
+ schedule(tickEvent, clockEdge(Cycles(0)));
_status = Running;
}
if (_status == Idle)
return;
- assert(_status == Running);
+ assert(_status == Running || _status == RunningServiceCompletion);
// The tick event may no be scheduled if the quest has requested
// the monitor to wait for interrupts. The normal CPU models can
{
Tick ticksExecuted;
DPRINTF(KvmRun, "KVM: Executing for %i ticks\n", ticks);
- timerOverflowed = false;
if (ticks == 0) {
// Settings ticks == 0 is a special case which causes an entry
++numVMHalfEntries;
- // This signal is always masked while we are executing in gem5
- // and gets unmasked temporarily as soon as we enter into
+ // Send a KVM_KICK_SIGNAL to the vCPU thread (i.e., this
+ // thread). The KVM control signal is masked while executing
+ // in gem5 and gets unmasked temporarily as when entering
// KVM. See setSignalMask() and setupSignalHandler().
- raise(KVM_TIMER_SIGNAL);
+ kick();
- // Enter into KVM. KVM will check for signals after completing
- // pending operations (IO). Since the KVM_TIMER_SIGNAL is
- // pending, this forces an immediate exit into gem5 again. We
+ // Start the vCPU. KVM will check for signals after completing
+ // pending operations (IO). Since the KVM_KICK_SIGNAL is
+ // pending, this forces an immediate exit to gem5 again. We
// don't bother to setup timers since this shouldn't actually
- // execute any code in the guest.
+ // execute any code (other than completing half-executed IO
+ // instructions) in the guest.
ioctlRun();
// We always execute at least one cycle to prevent the
// twice.
ticksExecuted = clockPeriod();
} else {
+ // This method is executed as a result of a tick event. That
+ // means that the event queue will be locked when entering the
+ // method. We temporarily unlock the event queue to allow
+ // other threads to steal control of this thread to inject
+ // interrupts. They will typically lock the queue and then
+ // force an exit from KVM by kicking the vCPU.
+ EventQueue::ScopedRelease release(curEventQueue());
+
if (ticks < runTimer->resolution()) {
DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n",
ticks, runTimer->resolution());
if (!perfControlledByTimer)
hwCycles.stop();
- // The timer signal may have been delivered after we exited
+ // The control signal may have been delivered after we exited
// from KVM. It will be pending in that case since it is
// masked when we aren't executing in KVM. Discard it to make
// sure we don't deliver it immediately next time we try to
// enter into KVM.
- discardPendingSignal(KVM_TIMER_SIGNAL);
- discardPendingSignal(KVM_INST_SIGNAL);
+ discardPendingSignal(KVM_KICK_SIGNAL);
const uint64_t hostCyclesExecuted(getHostCycles() - baseCycles);
const uint64_t simCyclesExecuted(hostCyclesExecuted * hostFactor);
const uint64_t instsExecuted(hwInstructions.read() - baseInstrs);
ticksExecuted = runTimer->ticksFromHostCycles(hostCyclesExecuted);
- if (ticksExecuted < ticks &&
- timerOverflowed &&
- _kvmRun->exit_reason == KVM_EXIT_INTR) {
- // TODO: We should probably do something clever here...
- warn("KVM: Early timer event, requested %i ticks but got %i ticks.\n",
- ticks, ticksExecuted);
- }
-
/* Update statistics */
numCycles += simCyclesExecuted;;
numInsts += instsExecuted;
ThreadContext *tc(thread->getTC());
syncThreadContext();
- mmio_req.setPhys(paddr, size, Request::UNCACHEABLE, dataMasterId());
+ Request mmio_req(paddr, size, Request::UNCACHEABLE, dataMasterId());
+ mmio_req.setThreadContext(tc->contextId(), 0);
// Some architectures do need to massage physical addresses a bit
// before they are inserted into the memory system. This enables
// APIC accesses on x86 and m5ops where supported through a MMIO
pkt.dataStatic(data);
if (mmio_req.isMmappedIpr()) {
+ // We currently assume that there is no need to migrate to a
+ // different event queue when doing IPRs. Currently, IPRs are
+ // only used for m5ops, so it should be a valid assumption.
const Cycles ipr_delay(write ?
TheISA::handleIprWrite(tc, &pkt) :
TheISA::handleIprRead(tc, &pkt));
+ threadContextDirty = true;
return clockPeriod() * ipr_delay;
} else {
+ // Temporarily lock and migrate to the event queue of the
+ // VM. This queue is assumed to "own" all devices we need to
+ // access if running in multi-core mode.
+ EventQueue::ScopedMigration migrate(vm.eventQueue());
+
return dataPort.sendAtomic(&pkt);
}
}
}
/**
- * Cycle timer overflow when running in KVM. Forces the KVM syscall to
- * exit with EINTR and allows us to run the event queue.
+ * Dummy handler for KVM kick signals.
*
- * @warn This function might not be called since some kernels don't
+ * @note This function is usually not called since the kernel doesn't
* seem to deliver signals when the signal is only unmasked when
* running in KVM. This doesn't matter though since we are only
* interested in getting KVM to exit, which happens as expected. See
* handling.
*/
static void
-onTimerOverflow(int signo, siginfo_t *si, void *data)
-{
- timerOverflowed = true;
-}
-
-/**
- * Instruction counter overflow when running in KVM. Forces the KVM
- * syscall to exit with EINTR and allows us to handle instruction
- * count events.
- */
-static void
-onInstEvent(int signo, siginfo_t *si, void *data)
+onKickSignal(int signo, siginfo_t *si, void *data)
{
}
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = onTimerOverflow;
+ sa.sa_sigaction = onKickSignal;
sa.sa_flags = SA_SIGINFO | SA_RESTART;
- if (sigaction(KVM_TIMER_SIGNAL, &sa, NULL) == -1)
+ if (sigaction(KVM_KICK_SIGNAL, &sa, NULL) == -1)
panic("KVM: Failed to setup vCPU timer signal handler\n");
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = onInstEvent;
- sa.sa_flags = SA_SIGINFO | SA_RESTART;
- if (sigaction(KVM_INST_SIGNAL, &sa, NULL) == -1)
- panic("KVM: Failed to setup vCPU instruction signal handler\n");
-
sigset_t sigset;
if (pthread_sigmask(SIG_BLOCK, NULL, &sigset) == -1)
panic("KVM: Failed get signal mask\n");
// Request KVM to setup the same signal mask as we're currently
- // running with except for the KVM control signals. We'll
- // sometimes need to raise the KVM_TIMER_SIGNAL to cause immediate
- // exits from KVM after servicing IO requests. See kvmRun().
- sigdelset(&sigset, KVM_TIMER_SIGNAL);
- sigdelset(&sigset, KVM_INST_SIGNAL);
+ // running with except for the KVM control signal. We'll sometimes
+ // need to raise the KVM_KICK_SIGNAL to cause immediate exits from
+ // KVM after servicing IO requests. See kvmRun().
+ sigdelset(&sigset, KVM_KICK_SIGNAL);
setSignalMask(&sigset);
// Mask our control signals so they aren't delivered unless we're
// actually executing inside KVM.
- sigaddset(&sigset, KVM_TIMER_SIGNAL);
- sigaddset(&sigset, KVM_INST_SIGNAL);
+ sigaddset(&sigset, KVM_KICK_SIGNAL);
if (pthread_sigmask(SIG_SETMASK, &sigset, NULL) == -1)
panic("KVM: Failed mask the KVM control signals\n");
}
hwCycles);
if (period)
- hwInstructions.enableSignals(KVM_INST_SIGNAL);
+ hwInstructions.enableSignals(KVM_KICK_SIGNAL);
activeInstPeriod = period;
}