cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
- cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master
- cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave
- cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master
+ cpu.interrupts[0].pio = test_sys.ruby._cpu_ports[i].master
+ cpu.interrupts[0].int_master = test_sys.ruby._cpu_ports[i].slave
+ cpu.interrupts[0].int_slave = test_sys.ruby._cpu_ports[i].master
else:
if options.caches or options.l2cache:
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
- system.cpu[i].interrupts.pio = ruby_port.master
- system.cpu[i].interrupts.int_master = ruby_port.slave
- system.cpu[i].interrupts.int_slave = ruby_port.master
+ system.cpu[i].interrupts[0].pio = ruby_port.master
+ system.cpu[i].interrupts[0].int_master = ruby_port.slave
+ system.cpu[i].interrupts[0].int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
0x01: quiesce({{
// Don't sleep if (unmasked) interrupts are pending
Interrupts* interrupts =
- xc->tcBase()->getCpuPtr()->getInterruptController();
+ xc->tcBase()->getCpuPtr()->getInterruptController(0);
if (interrupts->checkInterrupts(xc->tcBase())) {
PseudoInst::quiesceSkip(xc->tcBase());
} else {
Reset::invoke(ThreadContext *tc, const StaticInstPtr &inst)
{
if (FullSystem) {
- tc->getCpuPtr()->clearInterrupts();
+ tc->getCpuPtr()->clearInterrupts(tc->threadId());
tc->clearArchRegs();
}
if (!ArmSystem::highestELIs64(tc)) {
}
if (source == ArmFault::AsynchronousExternalAbort) {
- tc->getCpuPtr()->clearInterrupt(INT_ABT, 0);
+ tc->getCpuPtr()->clearInterrupt(tc->threadId(), INT_ABT, 0);
}
// Get effective fault source encoding
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
void
SystemError::invoke(ThreadContext *tc, const StaticInstPtr &inst)
{
- tc->getCpuPtr()->clearInterrupt(INT_ABT, 0);
+ tc->getCpuPtr()->clearInterrupt(tc->threadId(), INT_ABT, 0);
ArmFault::invoke(tc, inst);
}
// SEV execution and let pipeline continue as pcState is still
// valid.
tc->setMiscReg(MISCREG_SEV_MAILBOX, 1);
- tc->getCpuPtr()->clearInterrupt(INT_SEV, 0);
+ tc->getCpuPtr()->clearInterrupt(tc->threadId(), INT_SEV, 0);
}
// Instantiate all the templates to make the linker happy
case MISCREG_DBGDSCRint:
return 0;
case MISCREG_ISR:
- return tc->getCpuPtr()->getInterruptController()->getISR(
+ return tc->getCpuPtr()->getInterruptController(tc->threadId())->getISR(
readMiscRegNoEffect(MISCREG_HCR),
readMiscRegNoEffect(MISCREG_CPSR),
readMiscRegNoEffect(MISCREG_SCR));
case MISCREG_ISR_EL1:
- return tc->getCpuPtr()->getInterruptController()->getISR(
+ return tc->getCpuPtr()->getInterruptController(tc->threadId())->getISR(
readMiscRegNoEffect(MISCREG_HCR_EL2),
readMiscRegNoEffect(MISCREG_CPSR),
readMiscRegNoEffect(MISCREG_SCR_EL3));
"been configured to use a generic timer.\n");
}
- timer.reset(new GenericTimerISA(*generic_timer, tc->cpuId()));
+ timer.reset(new GenericTimerISA(*generic_timer, tc->contextId()));
return *timer.get();
}
if (SevMailbox == 1) {
SevMailbox = 0;
PseudoInst::quiesceSkip(tc);
- } else if (tc->getCpuPtr()->getInterruptController()->checkInterrupts(tc)) {
+ } else if (tc->getCpuPtr()->getInterruptController(
+ tc->threadId())->checkInterrupts(tc)) {
PseudoInst::quiesceSkip(tc);
} else if (cpsr.el == EL0 && !sctlr.ntwe) {
PseudoInst::quiesceSkip(tc);
// WFI doesn't sleep if interrupts are pending (masked or not)
ThreadContext *tc = xc->tcBase();
- if (tc->getCpuPtr()->getInterruptController()->checkWfiWake(hcr, cpsr,
- scr)) {
+ if (tc->getCpuPtr()->getInterruptController(
+ tc->threadId())->checkWfiWake(hcr, cpsr, scr)) {
PseudoInst::quiesceSkip(tc);
} else if (cpsr.el == EL0 && !sctlr.ntwi) {
PseudoInst::quiesceSkip(tc);
} else {
PseudoInst::quiesce(tc);
}
- tc->getCpuPtr()->clearInterrupt(INT_ABT, 0);
+ tc->getCpuPtr()->clearInterrupt(tc->threadId(), INT_ABT, 0);
'''
wfiIop = InstObjParams("wfi", "WfiInst", "PredOp", \
{ "code" : wfiCode, "predicate_test" : predicateTest },
// Wake CPU with interrupt if they were sleeping
if (oc->readMiscReg(MISCREG_SEV_MAILBOX) == 0) {
// Post Interrupt and wake cpu if needed
- oc->getCpuPtr()->postInterrupt(INT_SEV, 0);
+ oc->getCpuPtr()->postInterrupt(oc->threadId(), INT_SEV, 0);
}
}
'''
{
tl = val;
if (hpstate.tlz && tl == 0 && !hpstate.hpriv)
- tc->getCpuPtr()->postInterrupt(IT_TRAP_LEVEL_ZERO, 0);
+ tc->getCpuPtr()->postInterrupt(0, IT_TRAP_LEVEL_ZERO, 0);
else
- tc->getCpuPtr()->clearInterrupt(IT_TRAP_LEVEL_ZERO, 0);
+ tc->getCpuPtr()->clearInterrupt(0, IT_TRAP_LEVEL_ZERO, 0);
return;
}
case MISCREG_CWP:
{
SparcISA::Interrupts * interrupts =
dynamic_cast<SparcISA::Interrupts *>(
- tc->getCpuPtr()->getInterruptController());
+ tc->getCpuPtr()->getInterruptController(0));
pkt->set(interrupts->get_vec(IT_INT_VEC));
}
break;
{
SparcISA::Interrupts * interrupts =
dynamic_cast<SparcISA::Interrupts *>(
- tc->getCpuPtr()->getInterruptController());
+ tc->getCpuPtr()->getInterruptController(0));
temp = findMsbSet(interrupts->get_vec(IT_INT_VEC));
- tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, temp);
+ tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, temp);
pkt->set(temp);
}
break;
// clear all the interrupts that aren't set in the write
SparcISA::Interrupts * interrupts =
dynamic_cast<SparcISA::Interrupts *>(
- tc->getCpuPtr()->getInterruptController());
+ tc->getCpuPtr()->getInterruptController(0));
while (interrupts->get_vec(IT_INT_VEC) & data) {
msb = findMsbSet(interrupts->get_vec(IT_INT_VEC) & data);
- tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, msb);
+ tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, msb);
}
}
break;
case ASI_SWVR_UDB_INTR_W:
tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
- postInterrupt(bits(data, 5, 0), 0);
+ postInterrupt(0, bits(data, 5, 0), 0);
break;
default:
doMmuWriteError:
// If PIL < 14, copy over the tm and sm bits
if (pil < 14 && softint & 0x10000)
- cpu->postInterrupt(IT_SOFT_INT, 16);
+ cpu->postInterrupt(0, IT_SOFT_INT, 16);
else
- cpu->clearInterrupt(IT_SOFT_INT, 16);
+ cpu->clearInterrupt(0, IT_SOFT_INT, 16);
if (pil < 14 && softint & 0x1)
- cpu->postInterrupt(IT_SOFT_INT, 0);
+ cpu->postInterrupt(0, IT_SOFT_INT, 0);
else
- cpu->clearInterrupt(IT_SOFT_INT, 0);
+ cpu->clearInterrupt(0, IT_SOFT_INT, 0);
// Copy over any of the other bits that are set
for (int bit = 15; bit > 0; --bit) {
if (1 << bit & softint && bit > pil)
- cpu->postInterrupt(IT_SOFT_INT, bit);
+ cpu->postInterrupt(0, IT_SOFT_INT, bit);
else
- cpu->clearInterrupt(IT_SOFT_INT, bit);
+ cpu->clearInterrupt(0, IT_SOFT_INT, bit);
}
}
case MISCREG_HINTP:
setMiscRegNoEffect(miscReg, val);
if (hintp)
- cpu->postInterrupt(IT_HINTP, 0);
+ cpu->postInterrupt(0, IT_HINTP, 0);
else
- cpu->clearInterrupt(IT_HINTP, 0);
+ cpu->clearInterrupt(0, IT_HINTP, 0);
break;
case MISCREG_HTBA:
case MISCREG_QUEUE_CPU_MONDO_TAIL:
setMiscRegNoEffect(miscReg, val);
if (cpu_mondo_head != cpu_mondo_tail)
- cpu->postInterrupt(IT_CPU_MONDO, 0);
+ cpu->postInterrupt(0, IT_CPU_MONDO, 0);
else
- cpu->clearInterrupt(IT_CPU_MONDO, 0);
+ cpu->clearInterrupt(0, IT_CPU_MONDO, 0);
break;
case MISCREG_QUEUE_DEV_MONDO_HEAD:
case MISCREG_QUEUE_DEV_MONDO_TAIL:
setMiscRegNoEffect(miscReg, val);
if (dev_mondo_head != dev_mondo_tail)
- cpu->postInterrupt(IT_DEV_MONDO, 0);
+ cpu->postInterrupt(0, IT_DEV_MONDO, 0);
else
- cpu->clearInterrupt(IT_DEV_MONDO, 0);
+ cpu->clearInterrupt(0, IT_DEV_MONDO, 0);
break;
case MISCREG_QUEUE_RES_ERROR_HEAD:
case MISCREG_QUEUE_RES_ERROR_TAIL:
setMiscRegNoEffect(miscReg, val);
if (res_error_head != res_error_tail)
- cpu->postInterrupt(IT_RES_ERROR, 0);
+ cpu->postInterrupt(0, IT_RES_ERROR, 0);
else
- cpu->clearInterrupt(IT_RES_ERROR, 0);
+ cpu->clearInterrupt(0, IT_RES_ERROR, 0);
break;
case MISCREG_QUEUE_NRES_ERROR_HEAD:
case MISCREG_QUEUE_NRES_ERROR_TAIL:
setMiscRegNoEffect(miscReg, newVal);
newVal = hpstate;
if (newVal.tlz && tl == 0 && !newVal.hpriv)
- cpu->postInterrupt(IT_TRAP_LEVEL_ZERO, 0);
+ cpu->postInterrupt(0, IT_TRAP_LEVEL_ZERO, 0);
else
- cpu->clearInterrupt(IT_TRAP_LEVEL_ZERO, 0);
+ cpu->clearInterrupt(0, IT_TRAP_LEVEL_ZERO, 0);
break;
}
case MISCREG_HTSTATE:
tc->setMiscReg(MISCREG_APIC_BASE, lApicBase);
Interrupts * interrupts = dynamic_cast<Interrupts *>(
- tc->getCpuPtr()->getInterruptController());
+ tc->getCpuPtr()->getInterruptController(0));
assert(interrupts);
interrupts->setRegNoEffect(APIC_ID, cpuId << 24);
if buildEnv['TARGET_ISA'] == 'sparc':
dtb = Param.SparcTLB(SparcTLB(), "Data TLB")
itb = Param.SparcTLB(SparcTLB(), "Instruction TLB")
- interrupts = Param.SparcInterrupts(
- NULL, "Interrupt Controller")
+ interrupts = VectorParam.SparcInterrupts(
+ [], "Interrupt Controller")
isa = VectorParam.SparcISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'alpha':
dtb = Param.AlphaTLB(AlphaDTB(), "Data TLB")
itb = Param.AlphaTLB(AlphaITB(), "Instruction TLB")
- interrupts = Param.AlphaInterrupts(
- NULL, "Interrupt Controller")
+ interrupts = VectorParam.AlphaInterrupts(
+ [], "Interrupt Controller")
isa = VectorParam.AlphaISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'x86':
dtb = Param.X86TLB(X86TLB(), "Data TLB")
itb = Param.X86TLB(X86TLB(), "Instruction TLB")
- interrupts = Param.X86LocalApic(NULL, "Interrupt Controller")
+ interrupts = VectorParam.X86LocalApic([], "Interrupt Controller")
isa = VectorParam.X86ISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'mips':
dtb = Param.MipsTLB(MipsTLB(), "Data TLB")
itb = Param.MipsTLB(MipsTLB(), "Instruction TLB")
- interrupts = Param.MipsInterrupts(
- NULL, "Interrupt Controller")
+ interrupts = VectorParam.MipsInterrupts(
+ [], "Interrupt Controller")
isa = VectorParam.MipsISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'arm':
dtb = Param.ArmTLB(ArmTLB(), "Data TLB")
itb = Param.ArmTLB(ArmTLB(), "Instruction TLB")
istage2_mmu = Param.ArmStage2MMU(ArmStage2IMMU(), "Stage 2 trans")
dstage2_mmu = Param.ArmStage2MMU(ArmStage2DMMU(), "Stage 2 trans")
- interrupts = Param.ArmInterrupts(
- NULL, "Interrupt Controller")
+ interrupts = VectorParam.ArmInterrupts(
+ [], "Interrupt Controller")
isa = VectorParam.ArmISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'power':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
dtb = Param.PowerTLB(PowerTLB(), "Data TLB")
itb = Param.PowerTLB(PowerTLB(), "Instruction TLB")
- interrupts = Param.PowerInterrupts(
- NULL, "Interrupt Controller")
+ interrupts = VectorParam.PowerInterrupts(
+ [], "Interrupt Controller")
isa = VectorParam.PowerISA([ isa_class() ], "ISA instance")
else:
print "Don't know what TLB to use for ISA %s" % \
_uncached_slave_ports = []
_uncached_master_ports = []
if buildEnv['TARGET_ISA'] == 'x86':
- _uncached_slave_ports += ["interrupts.pio", "interrupts.int_slave"]
- _uncached_master_ports += ["interrupts.int_master"]
+ _uncached_slave_ports += ["interrupts[0].pio",
+ "interrupts[0].int_slave"]
+ _uncached_master_ports += ["interrupts[0].int_master"]
def createInterruptController(self):
if buildEnv['TARGET_ISA'] == 'sparc':
- self.interrupts = SparcInterrupts()
+ self.interrupts = [SparcInterrupts() for i in xrange(self.numThreads)]
elif buildEnv['TARGET_ISA'] == 'alpha':
- self.interrupts = AlphaInterrupts()
+ self.interrupts = [AlphaInterrupts() for i in xrange(self.numThreads)]
elif buildEnv['TARGET_ISA'] == 'x86':
self.apic_clk_domain = DerivedClockDomain(clk_domain =
Parent.clk_domain,
clk_divider = 16)
- self.interrupts = X86LocalApic(clk_domain = self.apic_clk_domain,
+ self.interrupts = [X86LocalApic(clk_domain = self.apic_clk_domain,
pio_addr=0x2000000000000000)
+ for i in xrange(self.numThreads)]
_localApic = self.interrupts
elif buildEnv['TARGET_ISA'] == 'mips':
- self.interrupts = MipsInterrupts()
+ self.interrupts = [MipsInterrupts() for i in xrange(self.numThreads)]
elif buildEnv['TARGET_ISA'] == 'arm':
- self.interrupts = ArmInterrupts()
+ self.interrupts = [ArmInterrupts() for i in xrange(self.numThreads)]
elif buildEnv['TARGET_ISA'] == 'power':
- self.interrupts = PowerInterrupts()
+ self.interrupts = [PowerInterrupts() for i in xrange(self.numThreads)]
else:
print "Don't know what Interrupt Controller to use for ISA %s" % \
buildEnv['TARGET_ISA']
// The interrupts should always be present unless this CPU is
// switched in later or in case it is a checker CPU
if (!params()->switched_out && !is_checker) {
- if (interrupts) {
- interrupts->setCPU(this);
+ if (!interrupts.empty()) {
+ for (ThreadID tid = 0; tid < numThreads; tid++) {
+ interrupts[tid]->setCPU(this);
+ }
} else {
fatal("CPU %s has no interrupt controller.\n"
"Ensure createInterruptController() is called.\n", name());
}
interrupts = oldCPU->interrupts;
- interrupts->setCPU(this);
- oldCPU->interrupts = NULL;
+ for (ThreadID tid = 0; tid < numThreads; tid++) {
+ interrupts[tid]->setCPU(this);
+ }
+ oldCPU->interrupts.clear();
if (FullSystem) {
for (ThreadID i = 0; i < size; ++i)
* system. */
SERIALIZE_SCALAR(_pid);
- interrupts->serialize(cp);
-
// Serialize the threads, this is done by the CPU implementation.
for (ThreadID i = 0; i < numThreads; ++i) {
ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
+ interrupts[i]->serialize(cp);
serializeThread(cp, i);
}
}
if (!_switchedOut) {
UNSERIALIZE_SCALAR(_pid);
- interrupts->unserialize(cp);
// Unserialize the threads, this is done by the CPU implementation.
for (ThreadID i = 0; i < numThreads; ++i) {
ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
+ interrupts[i]->unserialize(cp);
unserializeThread(cp, i);
}
}
TheISA::MicrocodeRom microcodeRom;
protected:
- TheISA::Interrupts *interrupts;
+ std::vector<TheISA::Interrupts*> interrupts;
public:
TheISA::Interrupts *
- getInterruptController()
+ getInterruptController(ThreadID tid)
{
- return interrupts;
+ if (interrupts.empty())
+ return NULL;
+
+ assert(interrupts.size() > tid);
+ return interrupts[tid];
}
virtual void wakeup() = 0;
void
- postInterrupt(int int_num, int index)
+ postInterrupt(ThreadID tid, int int_num, int index)
{
- interrupts->post(int_num, index);
+ interrupts[tid]->post(int_num, index);
if (FullSystem)
wakeup();
}
void
- clearInterrupt(int int_num, int index)
+ clearInterrupt(ThreadID tid, int int_num, int index)
{
- interrupts->clear(int_num, index);
+ interrupts[tid]->clear(int_num, index);
}
void
- clearInterrupts()
+ clearInterrupts(ThreadID tid)
{
- interrupts->clearAll();
+ interrupts[tid]->clearAll();
}
bool
checkInterrupts(ThreadContext *tc) const
{
- return FullSystem && interrupts->checkInterrupts(tc);
+ return FullSystem && interrupts[tc->threadId()]->checkInterrupts(tc);
}
class ProfileEvent : public Event
params->system = system;
params->cpu_id = cpu_id;
params->profile = profile;
- params->interrupts = NULL;
params->workload = workload;
DummyChecker *cpu = new DummyChecker(params);
DPRINTF(IntrControl, "post %d:%d (cpu %d)\n", int_num, index, cpu_id);
std::vector<ThreadContext *> &tcvec = sys->threadContexts;
BaseCPU *cpu = tcvec[cpu_id]->getCpuPtr();
- cpu->postInterrupt(int_num, index);
+ cpu->postInterrupt(tcvec[cpu_id]->threadId(), int_num, index);
}
void
DPRINTF(IntrControl, "clear %d:%d (cpu %d)\n", int_num, index, cpu_id);
std::vector<ThreadContext *> &tcvec = sys->threadContexts;
BaseCPU *cpu = tcvec[cpu_id]->getCpuPtr();
- cpu->clearInterrupt(int_num, index);
+ cpu->clearInterrupt(tcvec[cpu_id]->threadId(), int_num, index);
}
IntrControl *
// call across threads, we might still lose interrupts unless
// they are getInterrupt() and updateIntrInfo() are called
// atomically.
- EventQueue::ScopedMigration migrate(interrupts->eventQueue());
- fault = interrupts->getInterrupt(tc);
- interrupts->updateIntrInfo(tc);
+ EventQueue::ScopedMigration migrate(interrupts[0]->eventQueue());
+ fault = interrupts[0]->getInterrupt(tc);
+ interrupts[0]->updateIntrInfo(tc);
}
X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
{
struct kvm_run &kvm_run(*getKvmRunState());
- if (interrupts->checkInterruptsRaw()) {
- if (interrupts->hasPendingUnmaskable()) {
+ if (interrupts[0]->checkInterruptsRaw()) {
+ if (interrupts[0]->hasPendingUnmaskable()) {
DPRINTF(KvmInt,
"Delivering unmaskable interrupt.\n");
syncThreadContext();
// the thread context and check if there are /really/
// interrupts that should be delivered now.
syncThreadContext();
- if (interrupts->checkInterrupts(tc)) {
+ if (interrupts[0]->checkInterrupts(tc)) {
DPRINTF(KvmInt,
"M5 has pending interrupts, delivering interrupt.\n");
DPRINTF(MinorInterrupt, "Considering interrupt status from PC: %s\n",
cpu.getContext(thread_id)->pcState());
- Fault interrupt = cpu.getInterruptController()->getInterrupt
+ Fault interrupt = cpu.getInterruptController(thread_id)->getInterrupt
(cpu.getContext(thread_id));
if (interrupt != NoFault) {
/* The interrupt *must* set pcState */
- cpu.getInterruptController()->updateIntrInfo
+ cpu.getInterruptController(thread_id)->updateIntrInfo
(cpu.getContext(thread_id));
interrupt->invoke(cpu.getContext(thread_id));
/* If there was an interrupt signalled, was it acted on now? */
bool took_interrupt = false;
- if (cpu.getInterruptController()) {
+ if (cpu.getInterruptController(0)) {
/* This is here because it seems that after drainResume the
* interrupt controller isn't always set */
interrupted = drainState == NotDraining && isInterrupted(0);
params->system = system;
params->cpu_id = cpu_id;
params->profile = profile;
- params->interrupts = NULL;
params->workload = workload;
O3Checker *cpu = new O3Checker(params);
}
// FullO3CPU always requires an interrupt controller.
- if (!params->switched_out && !interrupts) {
+ if (!params->switched_out && interrupts.empty()) {
fatal("FullO3CPU %s has no interrupt controller.\n"
"Ensure createInterruptController() is called.\n", name());
}
FullO3CPU<Impl>::getInterrupts()
{
// Check if there are any outstanding interrupts
- return this->interrupts->getInterrupt(this->threadContexts[0]);
+ return this->interrupts[0]->getInterrupt(this->threadContexts[0]);
}
template <class Impl>
// @todo: Allow other threads to handle interrupts.
assert(interrupt != NoFault);
- this->interrupts->updateIntrInfo(this->threadContexts[0]);
+ this->interrupts[0]->updateIntrInfo(this->threadContexts[0]);
DPRINTF(O3CPU, "Interrupt %s being handled\n", interrupt->name());
this->trap(interrupt, 0, nullptr);
ThreadContext* tc = thread->getTC();
if (checkInterrupts(tc)) {
- Fault interrupt = interrupts->getInterrupt(tc);
+ Fault interrupt = interrupts[curThread]->getInterrupt(tc);
if (interrupt != NoFault) {
t_info.fetchOffset = 0;
- interrupts->updateIntrInfo(tc);
+ interrupts[curThread]->updateIntrInfo(tc);
interrupt->invoke(tc);
thread->decoder.reset();
}
} else {
for (int i = 0; i < numContexts; i++) {
Interrupts *localApic = sys->getThreadContext(i)->
- getCpuPtr()->getInterruptController();
+ getCpuPtr()->getInterruptController(0);
if ((localApic->readReg(APIC_LOGICAL_DESTINATION) >> 24) &
message.destination) {
apics.push_back(localApic->getInitialApicId());
cpu.itb.walker.port = system.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = system.ruby._cpu_ports[i].slave
- cpu.interrupts.pio = system.ruby._cpu_ports[i].master
- cpu.interrupts.int_master = system.ruby._cpu_ports[i].slave
- cpu.interrupts.int_slave = system.ruby._cpu_ports[i].master
+ cpu.interrupts[0].pio = system.ruby._cpu_ports[i].master
+ cpu.interrupts[0].int_master = system.ruby._cpu_ports[i].slave
+ cpu.interrupts[0].int_slave = system.ruby._cpu_ports[i].master
root = Root(full_system = True, system = system)
m5.ticks.setGlobalFrequency('1THz')
--- /dev/null
+# Upgrade single-threaded checkpoints to be properly supported with SMT.
+# SMT adds per-thread interrupts. Thus we must move the interrupt status
+# from the CPU and into the execution context.
+def upgrader(cpt):
+ for sec in cpt.sections():
+ import re
+
+ re_cpu_match = re.match('^(.*sys.*\.cpu[^._]*)$', sec)
+ if re_cpu_match != None:
+ interrupts = cpt.get(sec, 'interrupts')
+ intStatus = cpt.get(sec, 'intStatus')
+
+ cpu_name = re_cpu_match.group(1)
+
+ cpt.set(cpu_name + ".xc.0", 'interrupts', interrupts)
+ cpt.set(cpu_name + ".xc.0", 'intStatus', intStatus)
+
+ cpt.remove_option(sec, 'interrupts')
+ cpt.remove_option(sec, 'intStatus')