delete dtb_file;
// Kernel boot requirements to set up r0, r1 and r2 in ARMv7
- for (auto tc: system->threadContexts) {
+ for (auto *tc: system->threads) {
tc->setIntReg(0, 0);
tc->setIntReg(1, params()->machine_type);
tc->setIntReg(2, params()->atags_addr + _loadAddrOffset);
// FPEXC.EN = 0
- for (auto *tc: system->threadContexts) {
+ for (auto *tc: system->threads) {
Reset().invoke(tc);
tc->activate();
}
fatal_if(!arm_sys->params()->gic_cpu_addr && is_gic_v2,
"gic_cpu_addr must be set with bootloader");
- for (auto tc: arm_sys->threadContexts) {
+ for (auto *tc: arm_sys->threads) {
if (!arm_sys->highestELIs64())
tc->setIntReg(3, kernelEntry);
if (is_gic_v2)
} else {
// Set the initial PC to be at start of the kernel code
if (!arm_sys->highestELIs64())
- arm_sys->threadContexts[0]->pcState(kernelObj->entryPoint());
+ arm_sys->threads[0]->pcState(kernelObj->entryPoint());
}
}
// mostly unimplemented, just set NumCPUs field from sim and return
L2CTLR l2ctlr = 0;
// b00:1CPU to b11:4CPUs
- l2ctlr.numCPUs = tc->getSystemPtr()->numContexts() - 1;
+ l2ctlr.numCPUs = tc->getSystemPtr()->threads.size() - 1;
return l2ctlr;
}
case MISCREG_DBGDIDR:
sevCode = '''
SevMailbox = 1;
System *sys = xc->tcBase()->getSystemPtr();
- for (int x = 0; x < sys->numContexts(); x++) {
- ThreadContext *oc = sys->getThreadContext(x);
+ for (int x = 0; x < sys->threads.size(); x++) {
+ ThreadContext *oc = sys->threads[x];
if (oc == xc->tcBase())
continue;
MuxingKvmGic::copyBankedDistRange(BaseGicRegisters* from, BaseGicRegisters* to,
Addr daddr, size_t size)
{
- for (int ctx = 0; ctx < system.numContexts(); ++ctx)
+ for (int ctx = 0; ctx < system.threads.size(); ++ctx)
for (auto a = daddr; a < daddr + size; a += 4)
copyDistRegister(from, to, ctx, a);
}
MuxingKvmGic::clearBankedDistRange(BaseGicRegisters* to,
Addr daddr, size_t size)
{
- for (int ctx = 0; ctx < system.numContexts(); ++ctx)
+ for (int ctx = 0; ctx < system.threads.size(); ++ctx)
for (auto a = daddr; a < daddr + size; a += 4)
to->writeDistributor(ctx, a, 0xFFFFFFFF);
}
// Copy CPU Interface Control Register (CTLR),
// Interrupt Priority Mask Register (PMR), and
// Binary Point Register (BPR)
- for (int ctx = 0; ctx < system.numContexts(); ++ctx) {
+ for (int ctx = 0; ctx < system.threads.size(); ++ctx) {
copyCpuRegister(from, to, ctx, GICC_CTLR);
copyCpuRegister(from, to, ctx, GICC_PMR);
copyCpuRegister(from, to, ctx, GICC_BPR);
// have been shifted by three bits due to its having been emulated by
// a VGIC with only 5 PMR bits in its VMCR register. Presently the
// Linux kernel does not repair this inaccuracy, so we correct it here.
- for (int cpu = 0; cpu < system.numContexts(); ++cpu) {
+ for (int cpu = 0; cpu < system.threads.size(); ++cpu) {
cpuPriority[cpu] <<= 3;
assert((cpuPriority[cpu] & ~0xff) == 0);
}
}
// Kernel boot requirements to set up r0, r1 and r2 in ARMv7
- for (auto tc: system->threadContexts) {
+ for (auto *tc: system->threads) {
tc->setIntReg(0, 0);
tc->setIntReg(1, params()->machine_type);
tc->setIntReg(2, params()->atags_addr + _loadAddrOffset);
std::string task_filename = "tasks.txt";
taskFile = simout.create(name() + "." + task_filename);
- for (const auto tc : system->threadContexts) {
+ for (auto *tc: system->threads) {
uint32_t pid = tc->getCpuPtr()->getPid();
if (pid != BaseCPU::invldPid) {
mapPid(tc, pid);
void
FsLinux::dumpDmesg()
{
- Linux::dumpDmesg(system->getThreadContext(0), std::cout);
+ Linux::dumpDmesg(system->threads[0], std::cout);
}
/**
{
ArmProcess32::initState();
allocateMem(commPage, PageBytes);
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
uint8_t swiNeg1[] = {
0xff, 0xff, 0xff, 0xef // swi -1
{
Process::initState();
argsInit<uint32_t>(PageBytes, INTREG_SP);
- for (int i = 0; i < contextIds.size(); i++) {
- ThreadContext * tc = system->getThreadContext(contextIds[i]);
+ for (auto id: contextIds) {
+ ThreadContext *tc = system->threads[id];
CPACR cpacr = tc->readMiscReg(MISCREG_CPACR);
// Enable the floating point coprocessors.
cpacr.cp10 = 0x3;
{
Process::initState();
argsInit<uint64_t>(PageBytes, INTREG_SP0);
- for (int i = 0; i < contextIds.size(); i++) {
- ThreadContext * tc = system->getThreadContext(contextIds[i]);
+ for (auto id: contextIds) {
+ ThreadContext *tc = system->threads[id];
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
cpsr.mode = MODE_EL0T;
tc->setMiscReg(MISCREG_CPSR, cpsr);
uint32_t hwcap = 0;
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
const AA64PFR0 pf_r0 = tc->readMiscReg(MISCREG_ID_AA64PFR0_EL1);
initVirtMem->writeBlob(argc_base, &guestArgc, intSize);
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
//Set the stack pointer register
tc->setIntReg(spIndex, memState->getStackMin());
//A pointer to a function to run when the program exits. We'll set this
void
broadcast(ThreadContext *tc)
{
- System *sys = tc->getSystemPtr();
- for (int x = 0; x < sys->numContexts(); x++) {
- ThreadContext *oc = sys->getThreadContext(x);
+ for (auto *oc: tc->getSystemPtr()->threads)
(*this)(oc);
- }
}
protected:
initVirtMem->write(auxv_array_end, zero);
auxv_array_end += sizeof(zero);
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
tc->setIntReg(FirstArgumentReg, argc);
tc->setIntReg(FirstArgumentReg + 1, argv_array_base);
initVirtMem->writeBlob(argc_base, &guestArgc, intSize);
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
//Set the stack pointer register
tc->setIntReg(StackPointerReg, stack_min);
{
RiscvISA::FsWorkload::initState();
- for (auto *tc: system->threadContexts) {
+ for (auto *tc: system->threads) {
RiscvISA::Reset().invoke(tc);
tc->activate();
}
warn_if(!bootloader->buildImage().write(system->physProxy),
"Could not load sections to memory.");
- for (auto *tc: system->threadContexts) {
+ for (auto *tc: system->threads) {
RiscvISA::Reset().invoke(tc);
tc->activate();
}
argsInit<uint64_t>(PageBytes);
for (ContextID ctx: contextIds)
- system->getThreadContext(ctx)->setMiscRegNoEffect(MISCREG_PRV, PRV_U);
+ system->threads[ctx]->setMiscRegNoEffect(MISCREG_PRV, PRV_U);
}
void
argsInit<uint32_t>(PageBytes);
for (ContextID ctx: contextIds) {
- system->getThreadContext(ctx)->setMiscRegNoEffect(MISCREG_PRV, PRV_U);
- PCState pc = system->getThreadContext(ctx)->pcState();
+ auto *tc = system->threads[ctx];
+ tc->setMiscRegNoEffect(MISCREG_PRV, PRV_U);
+ PCState pc = tc->pcState();
pc.rv32(true);
- system->getThreadContext(ctx)->pcState(pc);
+ tc->pcState(pc);
}
}
pushOntoStack(aux.val);
}
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
tc->setIntReg(StackPointerReg, memState->getStackMin());
tc->pcState(getStartPC());
{
Workload::initState();
- if (system->threadContexts.empty())
+ if (system->threads.empty())
return;
// Other CPUs will get activated by IPIs.
- auto *tc = system->threadContexts[0];
+ auto *tc = system->threads[0];
SparcISA::PowerOnReset().invoke(tc);
tc->activate();
}
{
Process::initState();
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
// From the SPARC ABI
// Setup default FP state
{
SparcProcess::initState();
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
// The process runs in user mode with 32 bit addresses
PSTATE pstate = 0;
pstate.ie = 1;
{
SparcProcess::initState();
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
// The process runs in user mode
PSTATE pstate = 0;
pstate.ie = 1;
fillStart = memState->getStackBase();
spillStart = fillStart + sizeof(MachInst) * numFillInsts;
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
// Set up the thread context to start running the process
// assert(NumArgumentRegs >= 2);
// tc->setIntReg(ArgumentReg[0], argc);
}
break;
case ASI_SWVR_UDB_INTR_W:
- tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
- postInterrupt(0, bits(data, 5, 0), 0);
+ tc->getSystemPtr()->threads[bits(data,12,8)]->
+ getCpuPtr()->postInterrupt(0, bits(data, 5, 0), 0);
break;
default:
doMmuWriteError:
temp = readMiscRegNoEffect(miscReg) & (STS::active | STS::speculative);
// Check that the CPU array is fully populated
// (by calling getNumCPus())
- assert(sys->numContexts() > tc->contextId());
+ assert(sys->threads.size() > tc->contextId());
temp |= tc->contextId() << STS::shft_id;
- for (x = tc->contextId() & ~3; x < sys->threadContexts.size(); x++) {
- switch (sys->threadContexts[x]->status()) {
+ for (x = tc->contextId() & ~3; x < sys->threads.size(); x++) {
+ switch (sys->threads[x]->status()) {
case ThreadContext::Active:
temp |= STS::st_run << (STS::shft_fsm0 -
((x & 0x3) * (STS::shft_fsm0-STS::shft_fsm1)));
{
KernelWorkload::initState();
- for (auto *tc: system->threadContexts) {
+ for (auto *tc: system->threads) {
X86ISA::InitInterrupt(0).invoke(tc);
if (tc->contextId() == 0) {
fatal_if(kernelObj->getArch() == Loader::I386,
"Loading a 32 bit x86 kernel is not supported.");
- ThreadContext *tc = system->threadContexts[0];
+ ThreadContext *tc = system->threads[0];
auto phys_proxy = system->physProxy;
// This is the boot strap processor (BSP). Initialize it to look like
message.level = low.level;
message.trigger = low.trigger;
std::list<int> apics;
- int numContexts = sys->numContexts();
+ int numContexts = sys->threads.size();
switch (low.destShorthand) {
case 0:
if (message.deliveryMode == DeliveryMode::LowestPriority) {
* Pass the location of the real mode data structure to the kernel
* using register %esi. We'll use %rsi which should be equivalent.
*/
- system->threadContexts[0]->setIntReg(INTREG_RSI, realModeData);
+ system->threads[0]->setIntReg(INTREG_RSI, realModeData);
}
} // namespace X86ISA
tss_attr.unusable = 0;
for (int i = 0; i < contextIds.size(); i++) {
- ThreadContext * tc = system->getThreadContext(contextIds[i]);
+ ThreadContext *tc = system->threads[contextIds[i]];
tc->setMiscReg(MISCREG_CS, cs);
tc->setMiscReg(MISCREG_DS, ds);
16 * PageBytes, false);
} else {
for (int i = 0; i < contextIds.size(); i++) {
- ThreadContext * tc = system->getThreadContext(contextIds[i]);
+ ThreadContext * tc = system->threads[contextIds[i]];
SegAttr dataAttr = 0;
dataAttr.dpl = 3;
vsysexitBlob, sizeof(vsysexitBlob));
for (int i = 0; i < contextIds.size(); i++) {
- ThreadContext * tc = system->getThreadContext(contextIds[i]);
+ ThreadContext * tc = system->threads[contextIds[i]];
SegAttr dataAttr = 0;
dataAttr.dpl = 3;
initVirtMem->writeBlob(argc_base, &guestArgc, intSize);
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
// Set the stack pointer register
tc->setIntReg(StackPointerReg, stack_min);
#include "arch/isa_traits.hh"
#include "arch/microcode_rom.hh"
#include "base/statistics.hh"
+#include "mem/port_proxy.hh"
#include "sim/clocked_object.hh"
#include "sim/eventq.hh"
#include "sim/full_system.hh"
IntrControl::post(int cpu_id, int int_num, int index)
{
DPRINTF(IntrControl, "post %d:%d (cpu %d)\n", int_num, index, cpu_id);
- ThreadContext *tc = sys->getThreadContext(cpu_id);
+ auto *tc = sys->threads[cpu_id];
tc->getCpuPtr()->postInterrupt(tc->threadId(), int_num, index);
}
IntrControl::clear(int cpu_id, int int_num, int index)
{
DPRINTF(IntrControl, "clear %d:%d (cpu %d)\n", int_num, index, cpu_id);
- ThreadContext *tc = sys->getThreadContext(cpu_id);
+ auto *tc = sys->threads[cpu_id];
tc->getCpuPtr()->clearInterrupt(tc->threadId(), int_num, index);
}
IntrControl::clearAll(int cpu_id)
{
DPRINTF(IntrControl, "Clear all pending interrupts for CPU %d\n", cpu_id);
- ThreadContext *tc = sys->getThreadContext(cpu_id);
+ auto *tc = sys->threads[cpu_id];
tc->getCpuPtr()->clearInterrupts(tc->threadId());
}
IntrControl::havePosted(int cpu_id) const
{
DPRINTF(IntrControl, "Check pending interrupts for CPU %d\n", cpu_id);
- ThreadContext *tc = sys->getThreadContext(cpu_id);
+ auto *tc = sys->threads[cpu_id];
return tc->getCpuPtr()->checkInterrupts(tc);
}
{
assert(system != nullptr);
return dynamic_cast<BaseKvmCPU*>
- (system->getThreadContext(ctx)->getCpuPtr())->getVCpuID();
+ (system->threads[ctx]->getCpuPtr())->getVCpuID();
}
int
// and not in the ThreadContext.
ThreadContext *src_tc;
if (FullSystem)
- src_tc = system->threadContexts[tid];
+ src_tc = system->threads[tid];
else
src_tc = tcBase(tid);
pkt->setLE(1); // SCU already enabled
break;
case Config:
- /* Without making a completely new SCU, we can use the core count field
- * as 4 bits and inform the OS of up to 16 CPUs. Although the core
- * count is technically bits [1:0] only, bits [3:2] are SBZ for future
- * expansion like this.
- */
- if (sys->numContexts() > 4) {
- warn_once("A9SCU with >4 CPUs is unsupported\n");
- if (sys->numContexts() > 15)
- fatal("Too many CPUs (%d) for A9SCU!\n", sys->numContexts());
+ {
+ /* Without making a completely new SCU, we can use the core count
+ * field as 4 bits and inform the OS of up to 16 CPUs. Although
+ * the core count is technically bits [1:0] only, bits [3:2] are
+ * SBZ for future expansion like this.
+ */
+ int threads = sys->threads.size();
+ if (threads > 4) {
+ warn_once("A9SCU with >4 CPUs is unsupported");
+ fatal_if(threads > 15,
+ "Too many CPUs (%d) for A9SCU!", threads);
+ }
+ int smp_bits, core_cnt;
+ smp_bits = (1 << threads) - 1;
+ core_cnt = threads - 1;
+ pkt->setLE(smp_bits << 4 | core_cnt);
}
- int smp_bits, core_cnt;
- smp_bits = (1 << sys->numContexts()) - 1;
- core_cnt = sys->numContexts() - 1;
- pkt->setLE(smp_bits << 4 | core_cnt);
break;
default:
// Only configuration register is implemented
FVPBasePwrCtrl::init()
{
// All cores are ON by default (PwrStatus.{l0,l1} = 0b1)
- corePwrStatus.resize(sys->numContexts(), 0x60000000);
- for (const auto &tc : sys->threadContexts)
+ corePwrStatus.resize(sys->threads.size(), 0x60000000);
+ for (const auto &tc : sys->threads)
poweredCoresPerCluster[tc->socketId()] += 1;
BasicPioDevice::init();
}
regs.pcoffr = ~0;
} else if (pwrs->l0) {
// Power off all cores in the cluster
- for (const auto &tco : sys->threadContexts) {
+ for (const auto &tco : sys->threads) {
if (tc->socketId() == tco->socketId()) {
PwrStatus *npwrs = getCorePwrStatus(tco);
// Set pending cluster power off
ThreadContext *
FVPBasePwrCtrl::getThreadContextByMPID(uint32_t mpid) const
{
- for (auto &tc : sys->threadContexts) {
+ for (auto &tc : sys->threads) {
if (mpid == ArmISA::getAffinity(&system, tc))
return tc;
}
// Clear pending power-offs to the core
pwrs->pp = 0;
// Clear pending power-offs to the core's cluster
- for (const auto &tco : sys->threadContexts) {
+ for (const auto &tco : sys->threads) {
if (tc->socketId() == tco->socketId()) {
PwrStatus *npwrs = getCorePwrStatus(tco);
npwrs->pc = 0;
timers.resize(cpus);
for (unsigned i = old_cpu_count; i < cpus; ++i) {
- ThreadContext *tc = system.getThreadContext(i);
+ ThreadContext *tc = system.threads[i];
timers[i].reset(
new CoreTimers(*this, system, i,
GenericTimer::setMiscReg(int reg, unsigned cpu, RegVal val)
{
CoreTimers &core(getTimers(cpu));
- ThreadContext *tc = system.getThreadContext(cpu);
+ ThreadContext *tc = system.threads[cpu];
switch (reg) {
case MISCREG_CNTFRQ:
ArmInterruptPin *_irqVirt, ArmInterruptPin *_irqHyp)
: parent(_parent),
cntfrq(parent.params()->cntfrq),
- threadContext(system.getThreadContext(cpu)),
+ threadContext(system.threads[cpu]),
irqPhysS(_irqPhysS),
irqPhysNS(_irqPhysNS),
irqVirt(_irqVirt),
/* The 0x100 is a made-up flag to show that gem5 extensions
* are available,
* write 0x200 to this register to enable it. */
- return (((sys->numRunningContexts() - 1) << 5) |
+ return (((sys->threads.numRunning() - 1) << 5) |
(itLines/INT_BITS_MAX -1) |
(haveGem5Extensions ? 0x100 : 0x0));
case GICD_PIDR0:
assert(pkt->req->hasContextId());
const ContextID ctx = pkt->req->contextId();
- assert(ctx < sys->numRunningContexts());
+ assert(ctx < sys->threads.numRunning());
- DPRINTF(GIC, "gic cpu read register %#x cpu context: %d\n", daddr,
- ctx);
+ DPRINTF(GIC, "gic cpu read register %#x cpu context: %d\n", daddr, ctx);
pkt->setLE<uint32_t>(readCpu(ctx, daddr));
panic_if(!cpuSgiPending[active_int],
"Interrupt %d active but no CPU generated it?\n",
active_int);
- for (int x = 0; x < sys->numRunningContexts(); x++) {
+ for (int x = 0; x < sys->threads.numRunning(); x++) {
// See which CPU generated the interrupt
uint8_t cpugen =
bits(cpuSgiPending[active_int], 7 + 8 * x, 8 * x);
} break;
case 1: {
// interrupt all
- for (int i = 0; i < sys->numContexts(); i++) {
+ for (int i = 0; i < sys->threads.size(); i++) {
DPRINTF(IPI, "Processing CPU %d\n", i);
if (!cpuEnabled(i))
continue;
// interrupt all
uint8_t cpu_list;
cpu_list = 0;
- for (int x = 0; x < sys->numContexts(); x++)
+ for (int x = 0; x < sys->threads.size(); x++)
cpu_list |= cpuEnabled(x) ? 1 << x : 0;
swi.cpu_list = cpu_list;
break;
DPRINTF(IPI, "Generating softIRQ from CPU %d for %#x\n", ctx,
swi.cpu_list);
- for (int i = 0; i < sys->numContexts(); i++) {
+ for (int i = 0; i < sys->threads.size(); i++) {
DPRINTF(IPI, "Processing CPU %d\n", i);
if (!cpuEnabled(i))
continue;
uint64_t
GicV2::genSwiMask(int cpu)
{
- if (cpu > sys->numContexts())
- panic("Invalid CPU ID\n");
+ panic_if(cpu > sys->threads.size(), "Invalid CPU ID.");
return ULL(0x0101010101010101) << cpu;
}
void
GicV2::updateIntState(int hint)
{
- for (int cpu = 0; cpu < sys->numContexts(); cpu++) {
+ for (int cpu = 0; cpu < sys->threads.size(); cpu++) {
if (!cpuEnabled(cpu))
continue;
}
}
- bool mp_sys = sys->numRunningContexts() > 1;
+ bool mp_sys = sys->threads.numRunning() > 1;
// Check other ints
for (int x = 0; x < (itLines/INT_BITS_MAX); x++) {
if (getIntEnabled(cpu, x) & getPendingInt(cpu, x)) {
void
GicV2::updateRunPri()
{
- for (int cpu = 0; cpu < sys->numContexts(); cpu++) {
+ for (int cpu = 0; cpu < sys->threads.size(); cpu++) {
if (!cpuEnabled(cpu))
continue;
uint8_t maxPriority = 0xff;
uint8_t cpuTarget[GLOBAL_INT_LINES];
uint8_t getCpuTarget(ContextID ctx, uint32_t ix) {
- assert(ctx < sys->numRunningContexts());
+ assert(ctx < sys->threads.numRunning());
assert(ix < INT_LINES_MAX);
if (ix < SGI_MAX + PPI_MAX) {
// "GICD_ITARGETSR0 to GICD_ITARGETSR7 are read-only, and each
Gicv3::init()
{
distributor = new Gicv3Distributor(this, params()->it_lines);
- redistributors.resize(sys->numContexts(), nullptr);
- cpuInterfaces.resize(sys->numContexts(), nullptr);
+ int threads = sys->threads.size();
+ redistributors.resize(threads, nullptr);
+ cpuInterfaces.resize(threads, nullptr);
- panic_if(sys->numContexts() > params()->cpu_max,
+ panic_if(threads > params()->cpu_max,
"Exceeding maximum number of PEs supported by GICv3: "
- "using %u while maximum is %u\n", sys->numContexts(),
- params()->cpu_max);
+ "using %u while maximum is %u.", threads, params()->cpu_max);
- for (int i = 0; i < sys->numContexts(); i++) {
+ for (int i = 0; i < threads; i++) {
redistributors[i] = new Gicv3Redistributor(this, i);
cpuInterfaces[i] = new Gicv3CPUInterface(this, i);
}
Gicv3Distributor::ADDR_RANGE_SIZE - 1);
redistSize = redistributors[0]->addrRangeSize;
- redistRange = RangeSize(params()->redist_addr,
- redistSize * sys->numContexts() - 1);
+ redistRange = RangeSize(params()->redist_addr, redistSize * threads - 1);
addrRanges = {distRange, redistRange};
distributor->init();
- for (int i = 0; i < sys->numContexts(); i++) {
+ for (int i = 0; i < threads; i++) {
redistributors[i]->init();
cpuInterfaces[i]->init();
}
Gicv3::postInt(uint32_t cpu, ArmISA::InterruptTypes int_type)
{
platform->intrctrl->post(cpu, int_type, 0);
- ArmSystem::callClearStandByWfi(sys->getThreadContext(cpu));
+ ArmSystem::callClearStandByWfi(sys->threads[cpu]);
}
bool
bool ns = !inSecureState();
- for (int i = 0; i < gic->getSystem()->numContexts(); i++) {
+ for (int i = 0; i < gic->getSystem()->threads.size(); i++) {
Gicv3Redistributor * redistributor_i =
gic->getRedistributor(i);
uint32_t affinity_i = redistributor_i->getAffinity();
void
Gicv3CPUInterface::assertWakeRequest()
{
- ThreadContext *tc = gic->getSystem()->getThreadContext(cpuId);
+ auto *tc = gic->getSystem()->threads[cpuId];
if (ArmSystem::callSetWakeRequest(tc)) {
Reset().invoke(tc);
tc->activate();
void
Gicv3CPUInterface::deassertWakeRequest()
{
- ThreadContext *tc = gic->getSystem()->getThreadContext(cpuId);
+ auto *tc = gic->getSystem()->threads[cpuId];
ArmSystem::callClearWakeRequest(tc);
}
if (affinity_routing.IRM) {
// Interrupts routed to any PE defined as a participating node
- for (int i = 0; i < gic->getSystem()->numContexts(); i++) {
+ for (int i = 0; i < gic->getSystem()->threads.size(); i++) {
Gicv3Redistributor * redistributor_i =
gic->getRedistributor(i);
}
// Update all redistributors
- for (int i = 0; i < gic->getSystem()->numContexts(); i++) {
+ for (int i = 0; i < gic->getSystem()->threads.size(); i++) {
gic->getRedistributor(i)->update();
}
}
* (physical LPIs supported)
*/
uint64_t affinity = getAffinity();
- int last = cpuId == (gic->getSystem()->numContexts() - 1);
+ int last = cpuId == (gic->getSystem()->threads.size() - 1);
return (affinity << 32) | (1 << 24) | (cpuId << 8) |
(1 << 5) | (last << 4) | (1 << 3) | (1 << 0);
}
uint32_t
Gicv3Redistributor::getAffinity() const
{
- ThreadContext * tc = gic->getSystem()->getThreadContext(cpuId);
+ ThreadContext *tc = gic->getSystem()->threads[cpuId];
uint64_t mpidr = getMPIDR(gic->getSystem(), tc);
/*
* Aff3 = MPIDR[39:32]
{
auto p = params();
// Initialize the timer registers for each per cpu timer
- for (int i = 0; i < sys->numContexts(); i++) {
- ThreadContext* tc = sys->getThreadContext(i);
+ for (int i = 0; i < sys->threads.size(); i++) {
+ ThreadContext* tc = sys->threads[i];
std::stringstream oss;
oss << name() << ".timer" << i;
void
CpuLocalTimer::serialize(CheckpointOut &cp) const
{
- for (int i = 0; i < sys->numContexts(); i++)
+ for (int i = 0; i < sys->threads.size(); i++)
localTimer[i]->serializeSection(cp, csprintf("timer%d", i));
}
void
CpuLocalTimer::unserialize(CheckpointIn &cp)
{
- for (int i = 0; i < sys->numContexts(); i++)
+ for (int i = 0; i < sys->threads.size(); i++)
localTimer[i]->unserializeSection(cp, csprintf("timer%d", i));
}
maintIntPosted[x] = false;
vIntPosted[x] = false;
}
- assert(sys->numRunningContexts() <= VGIC_CPU_MAX);
+ assert(sys->threads.numRunning() <= VGIC_CPU_MAX);
}
VGic::~VGic()
}
}
- assert(sys->numRunningContexts() <= VGIC_CPU_MAX);
- for (int i = 0; i < sys->numRunningContexts(); i++) {
+ assert(sys->threads.numRunning() <= VGIC_CPU_MAX);
+ for (int i = 0; i < sys->threads.numRunning(); i++) {
struct vcpuIntData *vid = &vcpuData[i];
// Are any LRs active that weren't before?
if (!vIntPosted[i]) {
void
MaltaCChip::postIntr(uint32_t interrupt)
{
- uint64_t size = sys->threadContexts.size();
+ uint64_t size = sys->threads.size();
assert(size <= Malta::Max_CPUs);
for (int i=0; i < size; i++) {
void
MaltaCChip::clearIntr(uint32_t interrupt)
{
- uint64_t size = sys->threadContexts.size();
+ uint64_t size = sys->threads.size();
assert(size <= Malta::Max_CPUs);
for (int i=0; i < size; i++) {
start();
} else {
// Wake up thread contexts on non-switch nodes.
- for (int i = 0; i < DistIface::master->sys->numContexts(); i++) {
- ThreadContext *tc =
- DistIface::master->sys->getThreadContext(i);
+ for (auto *tc: master->sys->threads) {
if (tc->status() == ThreadContext::Suspended)
tc->activate();
else
// Dist-gem5 will reactivate all thread contexts when everyone has
// reached the sync stop point.
#if THE_ISA != NULL_ISA
- for (int i = 0; i < master->sys->numContexts(); i++) {
- ThreadContext *tc = master->sys->getThreadContext(i);
+ for (auto *tc: master->sys->threads) {
if (tc->status() == ThreadContext::Active)
tc->quiesce();
}
// activation here, since we know exactly when the next sync will
// occur.
#if THE_ISA != NULL_ISA
- for (int i = 0; i < master->sys->numContexts(); i++) {
- ThreadContext *tc = master->sys->getThreadContext(i);
+ for (auto *tc: master->sys->threads) {
if (tc->status() == ThreadContext::Active)
tc->quiesceTick(master->syncEvent->when() + 1);
}
iobManSize = ULL(0x0100000000);
iobJBusAddr = ULL(0x9F00000000);
iobJBusSize = ULL(0x0100000000);
- assert (params()->system->threadContexts.size() <= MaxNiagaraProcs);
+ assert(params()->system->threads.size() <= MaxNiagaraProcs);
pioDelay = p->pio_latency;
Iob::generateIpi(Type type, int cpu_id, int vector)
{
SparcISA::SparcFault<SparcISA::PowerOnReset> *por = new SparcISA::PowerOnReset();
- if (cpu_id >= sys->numContexts())
+ if (cpu_id >= sys->threads.size())
return;
switch (type) {
warn("Sending reset to CPU: %d\n", cpu_id);
if (vector != por->trapType())
panic("Don't know how to set non-POR reset to cpu\n");
- por->invoke(sys->threadContexts[cpu_id]);
- sys->threadContexts[cpu_id]->activate();
+ por->invoke(sys->threads[cpu_id]);
+ sys->threads[cpu_id]->activate();
break;
case 2: // idle -- this means stop executing and don't wake on interrupts
DPRINTF(Iob, "Idling CPU because of I/O write cpu: %d\n", cpu_id);
- sys->threadContexts[cpu_id]->halt();
+ sys->threads[cpu_id]->halt();
break;
case 3: // resume
DPRINTF(Iob, "Resuming CPU because of I/O write cpu: %d\n", cpu_id);
- sys->threadContexts[cpu_id]->activate();
+ sys->threads[cpu_id]->activate();
break;
default:
panic("Invalid type to generate ipi\n");
message.level = entry.polarity;
message.trigger = entry.trigger;
std::list<int> apics;
- int numContexts = sys->numContexts();
+ int numContexts = sys->threads.size();
if (message.destMode == 0) {
if (message.deliveryMode == DeliveryMode::LowestPriority) {
panic("Lowest priority delivery mode from the "
}
} else {
for (int i = 0; i < numContexts; i++) {
- BaseInterrupts *base_int = sys->getThreadContext(i)->
+ BaseInterrupts *base_int = sys->threads[i]->
getCpuPtr()->getInterruptController(0);
auto *localApic = dynamic_cast<Interrupts *>(base_int);
if ((localApic->readReg(APIC_LOGICAL_DESTINATION) >> 24) &
std::string
Linux::cpuOnline(Process *process, ThreadContext *tc)
{
- return csprintf("0-%d\n",
- tc->getSystemPtr()->numContexts() - 1);
+ return csprintf("0-%d\n", tc->getSystemPtr()->threads.size() - 1);
}
std::string
req->contextId() :
InvalidContextID;
if (owner_cid != requester_cid) {
- ThreadContext* ctx = system()->getThreadContext(owner_cid);
+ ThreadContext* ctx = system()->threads[owner_cid];
TheISA::globalClearExclusive(ctx);
}
i = lockedAddrList.erase(i);
} else {
// Add the translation request and try to resolve it later
dpp.setTranslationRequest(translation_req);
- dpp.tc = cache->system->getThreadContext(translation_req->contextId());
+ dpp.tc = cache->system->threads[translation_req->contextId()];
DPRINTF(HWPrefetch, "Prefetch queued with no translation. "
"addr:%#x priority: %3d\n", new_pfi.getAddr(), priority);
addToQueue(pfqMissingTranslation, dpp);
* There is currently no general method across all TLB implementations
* that can flush just part of the address space.
*/
- for (auto tc : _ownerProcess->system->threadContexts) {
+ for (auto *tc: _ownerProcess->system->threads) {
tc->getDTBPtr()->flushAll();
tc->getITBPtr()->flushAll();
}
* There is currently no general method across all TLB implementations
* that can flush just part of the address space.
*/
- for (auto tc : _ownerProcess->system->threadContexts) {
+ for (auto *tc: _ownerProcess->system->threads) {
tc->getDTBPtr()->flushAll();
tc->getITBPtr()->flushAll();
}
* ThreadContexts associated with this process.
*/
for (auto &cid : _ownerProcess->contextIds) {
- ThreadContext *tc =
- _ownerProcess->system->getThreadContext(cid);
+ auto *tc = _ownerProcess->system->threads[cid];
SETranslatingPortProxy
virt_mem(tc, SETranslatingPortProxy::Always);
vma.fillMemPages(vpage_start, _pageBytes, virt_mem);
fatal("Process %s is not associated with any HW contexts!\n", name());
// first thread context for this process... initialize & enable
- ThreadContext *tc = system->getThreadContext(contextIds[0]);
+ ThreadContext *tc = system->threads[contextIds[0]];
// mark this context as active so it will start ticking.
tc->activate();
DPRINTF(PseudoInst, "PseudoInst::wakeCPU(%i)\n", cpuid);
System *sys = tc->getSystemPtr();
- if (sys->numContexts() <= cpuid) {
+ if (sys->threads.size() <= cpuid) {
warn("PseudoInst::wakeCPU(%i), cpuid greater than number of contexts"
- "(%i)\n",cpuid, sys->numContexts());
+ "(%i)\n", cpuid, sys->threads.size());
return;
}
- ThreadContext *other_tc = sys->threadContexts[cpuid];
+ ThreadContext *other_tc = sys->threads[cpuid];
if (other_tc->status() == ThreadContext::Suspended)
other_tc->activate();
}
bool last_thread = true;
Process *parent = nullptr, *tg_lead = nullptr;
- for (int i = 0; last_thread && i < sys->numContexts(); i++) {
+ for (int i = 0; last_thread && i < sys->threads.size(); i++) {
Process *walk;
- if (!(walk = sys->threadContexts[i]->getProcessPtr()))
+ if (!(walk = sys->threads[i]->getProcessPtr()))
continue;
/**
if (walk->pid() == p->tgid())
tg_lead = walk;
- if ((sys->threadContexts[i]->status() != ThreadContext::Halted) &&
- (sys->threadContexts[i]->status() != ThreadContext::Halting) &&
+ auto *tc = sys->threads[i];
+ if ((tc->status() != ThreadContext::Halted) &&
+ (tc->status() != ThreadContext::Halting) &&
(walk != p)) {
/**
* Check if we share thread group with the pointer; this denotes
* all threads in the group.
*/
if (*(p->exitGroup)) {
- sys->threadContexts[i]->halt();
+ tc->halt();
} else {
last_thread = false;
}
*/
int activeContexts = 0;
for (auto &system: sys->systemList)
- activeContexts += system->numRunningContexts();
+ activeContexts += system->threads.numRunning();
if (activeContexts == 0) {
/**
System *sysh = tc->getSystemPtr();
// Retrieves process pointer from active/suspended thread contexts.
- for (int i = 0; i < sysh->numContexts(); i++) {
- if (sysh->threadContexts[i]->status() != ThreadContext::Halted) {
- Process *temp_h = sysh->threadContexts[i]->getProcessPtr();
+ for (auto *tc: sysh->threads) {
+ if (tc->status() != ThreadContext::Halted) {
+ Process *temp_h = tc->getProcessPtr();
Process *walk_ph = (Process*)temp_h;
if (walk_ph && walk_ph->pid() == process->pid())
return -EINVAL;
ThreadContext *ctc;
- if (!(ctc = tc->getSystemPtr()->findFreeContext())) {
+ if (!(ctc = tc->getSystemPtr()->threads.findFree())) {
DPRINTF_SYSCALL(Verbose, "clone: no spare thread context in system"
"[cpu %d, thread %d]", tc->cpuId(), tc->threadId());
return -EAGAIN;
break;
case OS::TGT_RLIMIT_NPROC:
- rlp->rlim_cur = rlp->rlim_max = tc->getSystemPtr()->numContexts();
+ rlp->rlim_cur = rlp->rlim_max = tc->getSystemPtr()->threads.size();
rlp->rlim_cur = htog(rlp->rlim_cur, bo);
rlp->rlim_max = htog(rlp->rlim_max, bo);
break;
System *sys = tc->getSystemPtr();
Process *tgt_proc = nullptr;
- for (int i = 0; i < sys->numContexts(); i++) {
- Process *temp = sys->threadContexts[i]->getProcessPtr();
+ for (auto *tc: sys->threads) {
+ Process *temp = tc->getProcessPtr();
if (temp->pid() == tid) {
tgt_proc = temp;
break;
#include "cpu/base.hh"
#include "cpu/thread_context.hh"
#include "debug/Loader.hh"
+#include "debug/Quiesce.hh"
#include "debug/WorkItems.hh"
#include "mem/abstract_mem.hh"
#include "mem/physical.hh"
vector<System *> System::systemList;
+ContextID
+System::Threads::insert(ThreadContext *tc, ContextID id)
+{
+ if (id == InvalidContextID) {
+ for (id = 0; id < size(); id++) {
+ if (!threads[id].context)
+ break;
+ }
+ }
+
+ if (id >= size())
+ threads.resize(id + 1);
+
+ fatal_if(threads[id].context,
+ "Cannot have two thread contexts with the same id (%d).", id);
+
+ auto &t = thread(id);
+ t.context = tc;
+# if THE_ISA != NULL_ISA
+ int port = getRemoteGDBPort();
+ if (port) {
+ t.gdb = new RemoteGDB(tc->getSystemPtr(), tc, port + id);
+ t.gdb->listen();
+ }
+# endif
+
+ return id;
+}
+
+void
+System::Threads::replace(ThreadContext *tc, ContextID id)
+{
+ auto &t = thread(id);
+ t.context = tc;
+ if (t.gdb)
+ t.gdb->replaceThreadContext(tc);
+}
+
+ThreadContext *
+System::Threads::findFree()
+{
+ for (auto &thread: threads) {
+ if (thread.context->status() == ThreadContext::Halted)
+ return thread.context;
+ }
+ return nullptr;
+}
+
+int
+System::Threads::numRunning() const
+{
+ int count = 0;
+ for (auto &thread: threads) {
+ auto status = thread.context->status();
+ if (status != ThreadContext::Halted &&
+ status != ThreadContext::Halting) {
+ count++;
+ }
+ }
+ return count;
+}
+
int System::numSystemsRunning = 0;
System::System(Params *p)
// Now that we're about to start simulation, wait for GDB connections if
// requested.
#if THE_ISA != NULL_ISA
- for (auto *tc: threadContexts) {
- auto *cpu = tc->getCpuPtr();
- auto id = tc->contextId();
- if (remoteGDB.size() <= id)
- continue;
- auto *rgdb = remoteGDB[id];
-
- if (cpu->waitForRemoteGDB()) {
- inform("%s: Waiting for a remote GDB connection on port %d.\n",
- cpu->name(), rgdb->port());
-
- rgdb->connect();
+ for (int i = 0; i < threads.size(); i++) {
+ auto *gdb = threads.thread(i).gdb;
+ auto *cpu = threads[i]->getCpuPtr();
+ if (gdb && cpu->waitForRemoteGDB()) {
+ inform("%s: Waiting for a remote GDB connection on port %d.",
+ cpu->name(), gdb->port());
+ gdb->connect();
}
}
#endif
bool System::breakpoint()
{
- if (remoteGDB.size())
- return remoteGDB[0]->breakpoint();
- return false;
+ if (!threads.size())
+ return false;
+ auto *gdb = threads.thread(0).gdb;
+ if (!gdb)
+ return false;
+ return gdb->breakpoint();
}
ContextID
System::registerThreadContext(ThreadContext *tc, ContextID assigned)
{
- int id = assigned;
- if (id == InvalidContextID) {
- // Find an unused context ID for this thread.
- id = 0;
- while (id < threadContexts.size() && threadContexts[id])
- id++;
- }
-
- if (threadContexts.size() <= id)
- threadContexts.resize(id + 1);
+ ContextID id = threads.insert(tc, assigned);
- fatal_if(threadContexts[id],
- "Cannot have two CPUs with the same id (%d)\n", id);
-
- threadContexts[id] = tc;
for (auto *e: liveEvents)
tc->schedule(e);
-#if THE_ISA != NULL_ISA
- int port = getRemoteGDBPort();
- if (port) {
- RemoteGDB *rgdb = new RemoteGDB(this, tc, port + id);
- rgdb->listen();
-
- if (remoteGDB.size() <= id)
- remoteGDB.resize(id + 1);
-
- remoteGDB[id] = rgdb;
- }
-#endif
-
- activeCpus.push_back(false);
-
return id;
}
-ThreadContext *
-System::findFreeContext()
-{
- for (auto &it : threadContexts) {
- if (ThreadContext::Halted == it->status())
- return it;
- }
- return nullptr;
-}
-
bool
System::schedule(PCEvent *event)
{
bool all = true;
liveEvents.push_back(event);
- for (auto *tc: threadContexts)
+ for (auto *tc: threads)
all = tc->schedule(event) && all;
return all;
}
{
bool all = true;
liveEvents.remove(event);
- for (auto *tc: threadContexts)
+ for (auto *tc: threads)
all = tc->remove(event) && all;
return all;
}
-int
-System::numRunningContexts()
-{
- return std::count_if(
- threadContexts.cbegin(),
- threadContexts.cend(),
- [] (ThreadContext* tc) {
- return ((tc->status() != ThreadContext::Halted) &&
- (tc->status() != ThreadContext::Halting));
- }
- );
-}
-
void
System::replaceThreadContext(ThreadContext *tc, ContextID context_id)
{
- if (context_id >= threadContexts.size()) {
- panic("replaceThreadContext: bad id, %d >= %d\n",
- context_id, threadContexts.size());
- }
+ auto *otc = threads[context_id];
+ threads.replace(tc, context_id);
for (auto *e: liveEvents) {
- threadContexts[context_id]->remove(e);
+ otc->remove(e);
tc->schedule(e);
}
- threadContexts[context_id] = tc;
- if (context_id < remoteGDB.size())
- remoteGDB[context_id]->replaceThreadContext(tc);
}
bool
System::validKvmEnvironment() const
{
#if USE_KVM
- if (threadContexts.empty())
+ if (threads.empty())
return false;
- for (auto tc : threadContexts) {
- if (dynamic_cast<BaseKvmCPU*>(tc->getCpuPtr()) == nullptr) {
+ for (auto *tc: threads) {
+ if (!dynamic_cast<BaseKvmCPU *>(tc->getCpuPtr()))
return false;
- }
}
+
return true;
#else
return false;
#include "base/loader/symtab.hh"
#include "base/statistics.hh"
#include "config/the_isa.hh"
+#include "cpu/base.hh"
#include "cpu/pc_event.hh"
#include "enums/MemoryMode.hh"
#include "mem/mem_master.hh"
public:
+ class Threads
+ {
+ private:
+ struct Thread
+ {
+ ThreadContext *context = nullptr;
+ bool active = false;
+ BaseRemoteGDB *gdb = nullptr;
+ };
+
+ std::vector<Thread> threads;
+
+ Thread &
+ thread(ContextID id)
+ {
+ assert(id < size());
+ return threads[id];
+ }
+
+ const Thread &
+ thread(ContextID id) const
+ {
+ assert(id < size());
+ return threads[id];
+ }
+
+ ContextID insert(ThreadContext *tc, ContextID id=InvalidContextID);
+ void replace(ThreadContext *tc, ContextID id);
+
+ friend class System;
+
+ public:
+ class const_iterator
+ {
+ private:
+ const Threads &threads;
+ int pos;
+
+ friend class Threads;
+
+ const_iterator(const Threads &_threads, int _pos) :
+ threads(_threads), pos(_pos)
+ {}
+
+ public:
+ const_iterator(const const_iterator &) = default;
+ const_iterator &operator = (const const_iterator &) = default;
+
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = ThreadContext *;
+ using difference_type = int;
+ using pointer = const value_type *;
+ using reference = const value_type &;
+
+ const_iterator &
+ operator ++ ()
+ {
+ pos++;
+ return *this;
+ }
+
+ const_iterator
+ operator ++ (int)
+ {
+ return const_iterator(threads, pos++);
+ }
+
+ reference operator * () { return threads.thread(pos).context; }
+ pointer operator -> () { return &threads.thread(pos).context; }
+
+ bool
+ operator == (const const_iterator &other) const
+ {
+ return &threads == &other.threads && pos == other.pos;
+ }
+
+ bool
+ operator != (const const_iterator &other) const
+ {
+ return !(*this == other);
+ }
+ };
+
+ ThreadContext *findFree();
+
+ ThreadContext *
+ operator [](ContextID id) const
+ {
+ return thread(id).context;
+ }
+
+ void markActive(ContextID id) { thread(id).active = true; }
+
+ int size() const { return threads.size(); }
+ bool empty() const { return threads.empty(); }
+ int numRunning() const;
+ int
+ numActive() const
+ {
+ int count = 0;
+ for (auto &thread: threads) {
+ if (thread.active)
+ count++;
+ }
+ return count;
+ }
+
+ void resume(ContextID id, Tick when);
+
+ const_iterator begin() const { return const_iterator(*this, 0); }
+ const_iterator end() const { return const_iterator(*this, size()); }
+ };
+
+ /**
+ * After all objects have been created and all ports are
+ * connected, check that the system port is connected.
+ */
void init() override;
void startup() override;
*/
unsigned int cacheLineSize() const { return _cacheLineSize; }
- std::vector<ThreadContext *> threadContexts;
- ThreadContext *findFreeContext();
-
- ThreadContext *
- getThreadContext(ContextID tid) const
- {
- return threadContexts[tid];
- }
+ Threads threads;
const bool multiThread;
bool schedule(PCEvent *event) override;
bool remove(PCEvent *event) override;
- unsigned numContexts() const { return threadContexts.size(); }
-
- /** Return number of running (non-halted) thread contexts in
- * system. These threads could be Active or Suspended. */
- int numRunningContexts();
-
Addr pagePtr;
uint64_t init_param;
uint64_t workItemsBegin;
uint64_t workItemsEnd;
uint32_t numWorkIds;
- std::vector<bool> activeCpus;
/** This array is a per-system list of all devices capable of issuing a
* memory system request and an associated string for each master id.
int
markWorkItem(int index)
{
- int count = 0;
- assert(index < activeCpus.size());
- activeCpus[index] = true;
- for (std::vector<bool>::iterator i = activeCpus.begin();
- i < activeCpus.end(); i++) {
- if (*i) count++;
- }
- return count;
+ threads.markActive(index);
+ return threads.numActive();
}
inline void workItemBegin(uint32_t tid, uint32_t workid)
/// @return Starting address of first page
Addr allocPhysPages(int npages);
- ContextID registerThreadContext(ThreadContext *tc,
- ContextID assigned = InvalidContextID);
+ ContextID registerThreadContext(
+ ThreadContext *tc, ContextID assigned=InvalidContextID);
void replaceThreadContext(ThreadContext *tc, ContextID context_id);
void serialize(CheckpointOut &cp) const override;