// Machine dependent functions
//
void
-AlphaISA::init(void *mem, RegFile *regs)
+AlphaISA::initCPU(RegFile *regs)
{
- ipr_init(mem, regs);
+ initIPRs(regs);
}
void
//
//
void
-AlphaISA::ipr_init(void *mem, RegFile *regs)
+AlphaISA::initIPRs(RegFile *regs)
{
uint64_t *ipr = regs->ipr;
case OSF::GSI_MAX_CPU: {
TypedBufferArg<uint32_t> max_cpu(getArg(xc, 1));
- *max_cpu = process->numCpus;
+ *max_cpu = process->numCpus();
max_cpu.copyOut(xc->mem);
return 1;
}
case OSF::GSI_CPUS_IN_BOX: {
TypedBufferArg<uint32_t> cpus_in_box(getArg(xc, 1));
- *cpus_in_box = process->numCpus;
+ *cpus_in_box = process->numCpus();
cpus_in_box.copyOut(xc->mem);
return 1;
}
TypedBufferArg<OSF::cpu_info> infop(getArg(xc, 1));
infop->current_cpu = 0;
- infop->cpus_in_box = process->numCpus;
+ infop->cpus_in_box = process->numCpus();
infop->cpu_type = 57;
- infop->ncpus = process->numCpus;
- int cpumask = (1 << process->numCpus) - 1;
+ infop->ncpus = process->numCpus();
+ int cpumask = (1 << process->numCpus()) - 1;
infop->cpus_present = infop->cpus_running = cpumask;
infop->cpu_binding = 0;
infop->cpu_ex_binding = 0;
elp->si_hz = clk_hz;
elp->si_phz = clk_hz;
elp->si_boottime = seconds_since_epoch; // seconds since epoch?
- elp->si_max_procs = process->numCpus;
+ elp->si_max_procs = process->numCpus();
elp.copyOut(xc->mem);
return 0;
}
cur_addr += sizeof(OSF::nxm_config_info);
// next comes the per-cpu state vector
Addr slot_state_addr = cur_addr;
- int slot_state_size = process->numCpus * sizeof(OSF::nxm_slot_state_t);
+ int slot_state_size = process->numCpus() * sizeof(OSF::nxm_slot_state_t);
cur_addr += slot_state_size;
// now the per-RAD state struct (we only support one RAD)
cur_addr = 0x14000; // bump up addr for alignment
Addr rad_state_addr = cur_addr;
int rad_state_size =
(sizeof(OSF::nxm_shared)
- + (process->numCpus-1) * sizeof(OSF::nxm_sched_state));
+ + (process->numCpus()-1) * sizeof(OSF::nxm_sched_state));
cur_addr += rad_state_size;
// now initialize a config_info struct and copy it out to user space
TypedBufferArg<OSF::nxm_config_info> config(config_addr);
- config->nxm_nslots_per_rad = process->numCpus;
+ config->nxm_nslots_per_rad = process->numCpus();
config->nxm_nrads = 1; // only one RAD in our system!
config->nxm_slot_state = slot_state_addr;
config->nxm_rad[0] = rad_state_addr;
// initialize the slot_state array and copy it out
TypedBufferArg<OSF::nxm_slot_state_t> slot_state(slot_state_addr,
slot_state_size);
- for (int i = 0; i < process->numCpus; ++i) {
+ for (int i = 0; i < process->numCpus(); ++i) {
// CPU 0 is bound to the calling process; all others are available
slot_state[i] = (i == 0) ? OSF::NXM_SLOT_BOUND : OSF::NXM_SLOT_AVAIL;
}
rad_state->nxm_callback = attrp->nxm_callback;
rad_state->nxm_version = attrp->nxm_version;
rad_state->nxm_uniq_offset = attrp->nxm_uniq_offset;
- for (int i = 0; i < process->numCpus; ++i) {
+ for (int i = 0; i < process->numCpus(); ++i) {
OSF::nxm_sched_state *ssp = &rad_state->nxm_ss[i];
ssp->nxm_u.sigmask = 0;
ssp->nxm_u.sig = 0;
abort();
}
- if (thread_index < 0 | thread_index > process->numCpus) {
+ if (thread_index < 0 | thread_index > process->numCpus()) {
cerr << "nxm_thread_create: bad thread index " << thread_index
<< endl;
abort();
// back out again.
int rad_state_size =
(sizeof(OSF::nxm_shared) +
- (process->numCpus-1) * sizeof(OSF::nxm_sched_state));
+ (process->numCpus()-1) * sizeof(OSF::nxm_sched_state));
TypedBufferArg<OSF::nxm_shared> rad_state(0x14000,
rad_state_size);
rad_state.copyOut(xc->mem);
Addr slot_state_addr = 0x12000 + sizeof(OSF::nxm_config_info);
- int slot_state_size = process->numCpus * sizeof(OSF::nxm_slot_state_t);
+ int slot_state_size = process->numCpus() * sizeof(OSF::nxm_slot_state_t);
TypedBufferArg<OSF::nxm_slot_state_t> slot_state(slot_state_addr,
slot_state_size);
slot_state.copyOut(xc->mem);
// Find a free simulator execution context.
- list<ExecContext *> &ecList = process->execContexts;
- list<ExecContext *>::iterator i = ecList.begin();
- list<ExecContext *>::iterator end = ecList.end();
- for (; i != end; ++i) {
- ExecContext *xc = *i;
+ for (int i = 0; i < process->numCpus(); ++i) {
+ ExecContext *xc = process->execContexts[i];
if (xc->status() == ExecContext::Unallocated) {
// inactive context... grab it
RemoteGDB(System *system, ExecContext *context);
~RemoteGDB();
+ void replaceExecContext(ExecContext *xc) { context = xc; }
+
void attach(int fd);
void detach();
bool isattached();
Counter max_insts_all_threads,
Counter max_loads_any_thread,
Counter max_loads_all_threads,
- System *_system, int num, Tick freq)
- : SimObject(_name), number(num), frequency(freq),
+ System *_system, Tick freq)
+ : SimObject(_name), frequency(freq),
number_of_threads(_number_of_threads), system(_system)
#else
BaseCPU::BaseCPU(const string &_name, int _number_of_threads,
max_loads_all_threads, *counter);
}
-
#ifdef FULL_SYSTEM
memset(interrupts, 0, sizeof(interrupts));
intstatus = 0;
#endif
}
+
void
BaseCPU::regStats()
{
- int size = contexts.size();
+ int size = execContexts.size();
if (size > 1) {
for (int i = 0; i < size; ++i) {
stringstream namestr;
ccprintf(namestr, "%s.ctx%d", name(), i);
- contexts[i]->regStats(namestr.str());
+ execContexts[i]->regStats(namestr.str());
}
} else if (size == 1)
- contexts[0]->regStats(name());
+ execContexts[0]->regStats(name());
+}
+
+
+void
+BaseCPU::registerExecContexts()
+{
+ for (int i = 0; i < execContexts.size(); ++i) {
+ ExecContext *xc = execContexts[i];
+ int cpu_id;
+
+#ifdef FULL_SYSTEM
+ cpu_id = system->registerExecContext(xc);
+#else
+ cpu_id = xc->process->registerExecContext(xc);
+#endif
+
+ xc->cpu_id = cpu_id;
+ }
+}
+
+
+void
+BaseCPU::switchOut()
+{
+ // default: do nothing
}
+void
+BaseCPU::takeOverFrom(BaseCPU *oldCPU)
+{
+ assert(execContexts.size() == oldCPU->execContexts.size());
+
+ for (int i = 0; i < execContexts.size(); ++i) {
+ ExecContext *newXC = execContexts[i];
+ ExecContext *oldXC = oldCPU->execContexts[i];
+
+ newXC->takeOverFrom(oldXC);
+ assert(newXC->cpu_id == oldXC->cpu_id);
+#ifdef FULL_SYSTEM
+ system->replaceExecContext(newXC->cpu_id, newXC);
+#else
+ assert(newXC->process == oldXC->process);
+ newXC->process->replaceExecContext(newXC->cpu_id, newXC);
+#endif
+ }
+}
+
+
#ifdef FULL_SYSTEM
void
BaseCPU::post_interrupt(int int_num, int index)
#endif // FULL_SYSTEM
+//
+// This declaration is not needed now that SamplingCPU provides a
+// BaseCPUBuilder object.
+//
+#if 0
DEFINE_SIM_OBJECT_CLASS_NAME("BaseCPU", BaseCPU)
+#endif
{
#ifdef FULL_SYSTEM
protected:
- int number;
Tick frequency;
uint8_t interrupts[NumInterruptLevels];
uint64_t intstatus;
#endif
protected:
- std::vector<ExecContext *> contexts;
+ std::vector<ExecContext *> execContexts;
public:
virtual void execCtxStatusChg() {}
BaseCPU(const std::string &_name, int _number_of_threads,
Counter max_insts_any_thread, Counter max_insts_all_threads,
Counter max_loads_any_thread, Counter max_loads_all_threads,
- System *_system,
- int num, Tick freq);
+ System *_system, Tick freq);
#else
BaseCPU(const std::string &_name, int _number_of_threads,
Counter max_insts_any_thread = 0,
virtual void regStats();
+ virtual void registerExecContexts();
+
+ /// Prepare for another CPU to take over execution. Called by
+ /// takeOverFrom() on its argument.
+ virtual void switchOut();
+
+ /// Take over execution from the given CPU. Used for warm-up and
+ /// sampling.
+ virtual void takeOverFrom(BaseCPU *);
+
/**
* Number of threads we're actually simulating (<= SMT_MAX_THREADS).
* This is a constant for the duration of the simulation.
#ifdef FULL_SYSTEM
ExecContext::ExecContext(BaseCPU *_cpu, int _thread_num, System *_sys,
AlphaItb *_itb, AlphaDtb *_dtb,
- FunctionalMemory *_mem, int _cpu_id)
- : kernelStats(this, _cpu), cpu(_cpu), thread_num(_thread_num), mem(_mem),
- itb(_itb), dtb(_dtb), cpu_id(_cpu_id), system(_sys),
- memCtrl(_sys->memCtrl), physmem(_sys->physmem)
+ FunctionalMemory *_mem)
+ : kernelStats(this, _cpu), cpu(_cpu), thread_num(_thread_num),
+ cpu_id(-1), mem(_mem), itb(_itb), dtb(_dtb), system(_sys),
+ memCtrl(_sys->memCtrl), physmem(_sys->physmem),
+ func_exe_insn(0), storeCondFailures(0)
{
memset(®s, 0, sizeof(RegFile));
- _status = Active;
- func_exe_insn = 0;
- storeCondFailures = 0;
- system->registerExecContext(this);
+ setStatus(ExecContext::Unallocated);
}
#else
ExecContext::ExecContext(BaseCPU *_cpu, int _thread_num,
Process *_process, int _asid)
- : cpu(_cpu), thread_num(_thread_num), process(_process), asid (_asid)
+ : cpu(_cpu), thread_num(_thread_num), cpu_id(-1),
+ process(_process), asid (_asid),
+ func_exe_insn(0), storeCondFailures(0)
{
-
- // Register with process object. Our 'active' will be set by the
- // process iff we're the initial context. Others are reserved for
- // dynamically created threads.
- process->registerExecContext(this);
+ setStatus(ExecContext::Unallocated);
mem = process->getMemory();
-
- func_exe_insn = 0;
- storeCondFailures = 0;
}
ExecContext::ExecContext(BaseCPU *_cpu, int _thread_num,
FunctionalMemory *_mem, int _asid)
- : cpu(_cpu), thread_num(_thread_num), process(NULL), mem(_mem),
- asid(_asid)
+ : cpu(_cpu), thread_num(_thread_num), process(0), mem(_mem), asid(_asid),
+ func_exe_insn(0), storeCondFailures(0)
{
}
#endif
+
+void
+ExecContext::takeOverFrom(ExecContext *oldContext)
+{
+ // some things should already be set up
+ assert(mem == oldContext->mem);
+#ifdef FULL_SYSTEM
+ assert(system == oldContext->system);
+#else
+ assert(process == oldContext->process);
+#endif
+
+ // copy over functional state
+ _status = oldContext->_status;
+#ifdef FULL_SYSTEM
+ kernelStats = oldContext->kernelStats;
+#endif
+ regs = oldContext->regs;
+ cpu_id = oldContext->cpu_id;
+ func_exe_insn = oldContext->func_exe_insn;
+
+ storeCondFailures = 0;
+
+ oldContext->_status = ExecContext::Unallocated;
+}
+
+
void
ExecContext::setStatus(Status new_status)
{
public:
Status status() const { return _status; }
+
+ // Unlike setStatus(), initStatus() has no side effects other than
+ // setting the _status variable.
+ void initStatus(Status init_status) { _status = init_status; }
+
void setStatus(Status new_status);
#ifdef FULL_SYSTEM
// Index of hardware thread context on the CPU that this represents.
int thread_num;
+ // ID of this context w.r.t. the System or Process object to which
+ // it belongs. For full-system mode, this is the system CPU ID.
+ int cpu_id;
+
#ifdef FULL_SYSTEM
FunctionalMemory *mem;
AlphaItb *itb;
AlphaDtb *dtb;
- int cpu_id;
System *system;
// the following two fields are redundant, since we can always
// constructor: initialize context from given process structure
#ifdef FULL_SYSTEM
ExecContext(BaseCPU *_cpu, int _thread_num, System *_system,
- AlphaItb *_itb, AlphaDtb *_dtb, FunctionalMemory *_dem,
- int _cpu_id);
+ AlphaItb *_itb, AlphaDtb *_dtb, FunctionalMemory *_dem);
#else
ExecContext(BaseCPU *_cpu, int _thread_num, Process *_process, int _asid);
ExecContext(BaseCPU *_cpu, int _thread_num, FunctionalMemory *_mem,
#endif
virtual ~ExecContext() {}
+ virtual void takeOverFrom(ExecContext *oldContext);
+
void regStats(const std::string &name);
#ifdef FULL_SYSTEM
return dtb->translate(req, true);
}
-
#else
bool validInstAddr(Addr addr)
{ return process->validInstAddr(addr); }
// and all other stores (WH64?). Unsuccessful Store
// Conditionals would have returned above, and wouldn't fall
// through.
- for (int i = 0; i < system->xcvec.size(); i++){
- cregs = &system->xcvec[i]->regs.miscRegs;
+ for (int i = 0; i < system->execContexts.size(); i++){
+ cregs = &system->execContexts[i]->regs.miscRegs;
if ((cregs->lock_addr & ~0xf) == (req->paddr & ~0xf)) {
cregs->lock_flag = false;
}
FunctionalMemory *mem,
MemInterface *icache_interface,
MemInterface *dcache_interface,
- int cpu_id, Tick freq)
+ Tick freq)
: BaseCPU(_name, /* number_of_threads */ 1,
max_insts_any_thread, max_insts_all_threads,
max_loads_any_thread, max_loads_all_threads,
- _system, cpu_id, freq),
+ _system, freq),
#else
SimpleCPU::SimpleCPU(const string &_name, Process *_process,
Counter max_insts_any_thread,
#endif
tickEvent(this), xc(NULL), cacheCompletionEvent(this)
{
+ _status = Idle;
#ifdef FULL_SYSTEM
- xc = new ExecContext(this, 0, system, itb, dtb, mem, cpu_id);
+ xc = new ExecContext(this, 0, system, itb, dtb, mem);
- _status = Running;
- if (cpu_id != 0) {
+ TheISA::initCPU(&xc->regs);
- xc->setStatus(ExecContext::Unallocated);
+ IntReg *ipr = xc->regs.ipr;
+ ipr[TheISA::IPR_MCSR] = 0x6;
- //Open a GDB debug session on port (7000 + the cpu_id)
- (new GDBListener(new RemoteGDB(system, xc), 7000 + cpu_id))->listen();
-
- AlphaISA::init(system->physmem, &xc->regs);
-
- fault = Reset_Fault;
-
- IntReg *ipr = xc->regs.ipr;
- ipr[TheISA::IPR_MCSR] = 0x6;
-
- AlphaISA::swap_palshadow(&xc->regs, true);
-
- xc->regs.pc =
- ipr[TheISA::IPR_PAL_BASE] + AlphaISA::fault_addr[fault];
- xc->regs.npc = xc->regs.pc + sizeof(MachInst);
-
- _status = Idle;
- }
- else {
- system->init(xc);
-
- // Reset the system
- //
- AlphaISA::init(system->physmem, &xc->regs);
-
- fault = Reset_Fault;
-
- IntReg *ipr = xc->regs.ipr;
- ipr[TheISA::IPR_MCSR] = 0x6;
-
- AlphaISA::swap_palshadow(&xc->regs, true);
-
- xc->regs.pc = ipr[TheISA::IPR_PAL_BASE] + AlphaISA::fault_addr[fault];
- xc->regs.npc = xc->regs.pc + sizeof(MachInst);
-
- _status = Running;
- tickEvent.schedule(0);
- }
+ AlphaISA::swap_palshadow(&xc->regs, true);
+ fault = Reset_Fault;
+ xc->regs.pc = ipr[TheISA::IPR_PAL_BASE] + AlphaISA::fault_addr[fault];
+ xc->regs.npc = xc->regs.pc + sizeof(MachInst);
#else
xc = new ExecContext(this, /* thread_num */ 0, _process, /* asid */ 0);
fault = No_Fault;
- if (xc->status() == ExecContext::Active) {
- _status = Running;
- tickEvent.schedule(0);
- } else
- _status = Idle;
#endif // !FULL_SYSTEM
icacheInterface = icache_interface;
lastIcacheStall = 0;
lastDcacheStall = 0;
- contexts.push_back(xc);
+ execContexts.push_back(xc);
}
SimpleCPU::~SimpleCPU()
{
}
+
+void
+SimpleCPU::registerExecContexts()
+{
+ BaseCPU::registerExecContexts();
+
+ // if any of this CPU's ExecContexts are active, mark the CPU as
+ // running and schedule its tick event.
+ for (int i = 0; i < execContexts.size(); ++i) {
+ ExecContext *xc = execContexts[i];
+ if (xc->status() == ExecContext::Active && _status != Running) {
+ _status = Running;
+ // this should only happen at initialization time
+ assert(curTick == 0);
+ tickEvent.schedule(0);
+ }
+ }
+}
+
+
+void
+SimpleCPU::switchOut()
+{
+ _status = SwitchedOut;
+ if (tickEvent.scheduled())
+ tickEvent.squash();
+}
+
+
+void
+SimpleCPU::takeOverFrom(BaseCPU *oldCPU)
+{
+ BaseCPU::takeOverFrom(oldCPU);
+
+ assert(!tickEvent.scheduled());
+
+ // if any of this CPU's ExecContexts are active, mark the CPU as
+ // running and schedule its tick event.
+ for (int i = 0; i < execContexts.size(); ++i) {
+ ExecContext *xc = execContexts[i];
+ if (xc->status() == ExecContext::Active && _status != Running) {
+ _status = Running;
+ tickEvent.schedule(curTick + 1);
+ }
+ }
+}
+
+
void
SimpleCPU::regStats()
{
dcacheStallCycles += curTick - lastDcacheStall;
setStatus(Running);
break;
+ case SwitchedOut:
+ // If this CPU has been switched out due to sampling/warm-up,
+ // ignore any further status changes (e.g., due to cache
+ // misses outstanding at the time of the switch).
+ return;
default:
panic("SimpleCPU::processCacheCompletion: bad state");
break;
SimObjectParam<AlphaDtb *> dtb;
SimObjectParam<FunctionalMemory *> mem;
SimObjectParam<System *> system;
- Param<int> cpu_id;
Param<int> mult;
#else
SimObjectParam<Process *> workload;
SimObjectParam<BaseMem *> icache;
SimObjectParam<BaseMem *> dcache;
+ Param<bool> defer_registration;
+
END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
INIT_PARAM(dtb, "Data TLB"),
INIT_PARAM(mem, "memory"),
INIT_PARAM(system, "system object"),
- INIT_PARAM_DFLT(cpu_id, "CPU identification number", 0),
INIT_PARAM_DFLT(mult, "system clock multiplier", 1),
#else
INIT_PARAM(workload, "processes to run"),
#endif // FULL_SYSTEM
INIT_PARAM_DFLT(icache, "L1 instruction cache object", NULL),
- INIT_PARAM_DFLT(dcache, "L1 data cache object", NULL)
+ INIT_PARAM_DFLT(dcache, "L1 data cache object", NULL),
+ INIT_PARAM_DFLT(defer_registration, "defer registration with system "
+ "(for sampling)", false)
END_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
CREATE_SIM_OBJECT(SimpleCPU)
{
+ SimpleCPU *cpu;
#ifdef FULL_SYSTEM
if (mult != 1)
panic("processor clock multiplier must be 1\n");
- return new SimpleCPU(getInstanceName(), system,
- max_insts_any_thread, max_insts_all_threads,
- max_loads_any_thread, max_loads_all_threads,
- itb, dtb, mem,
- (icache) ? icache->getInterface() : NULL,
- (dcache) ? dcache->getInterface() : NULL,
- cpu_id, ticksPerSecond * mult);
+ cpu = new SimpleCPU(getInstanceName(), system,
+ max_insts_any_thread, max_insts_all_threads,
+ max_loads_any_thread, max_loads_all_threads,
+ itb, dtb, mem,
+ (icache) ? icache->getInterface() : NULL,
+ (dcache) ? dcache->getInterface() : NULL,
+ ticksPerSecond * mult);
#else
- return new SimpleCPU(getInstanceName(), workload,
- max_insts_any_thread, max_insts_all_threads,
- max_loads_any_thread, max_loads_all_threads,
- icache->getInterface(), dcache->getInterface());
+ cpu = new SimpleCPU(getInstanceName(), workload,
+ max_insts_any_thread, max_insts_all_threads,
+ max_loads_any_thread, max_loads_all_threads,
+ icache->getInterface(), dcache->getInterface());
#endif // FULL_SYSTEM
+
+ if (!defer_registration) {
+ cpu->registerExecContexts();
+ }
+
+ return cpu;
}
REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU)
Idle,
IcacheMissStall,
IcacheMissComplete,
- DcacheMissStall
+ DcacheMissStall,
+ SwitchedOut
};
private:
Counter max_loads_any_thread, Counter max_loads_all_threads,
AlphaItb *itb, AlphaDtb *dtb, FunctionalMemory *mem,
MemInterface *icache_interface, MemInterface *dcache_interface,
- int cpu_id, Tick freq);
+ Tick freq);
#else
// execution context
ExecContext *xc;
+ void registerExecContexts();
+
+ void switchOut();
+ void takeOverFrom(BaseCPU *oldCPU);
+
#ifdef FULL_SYSTEM
Addr dbg_vtophys(Addr addr);
CacheCompletionEvent cacheCompletionEvent;
Status status() const { return _status; }
+
virtual void execCtxStatusChg() {
if (xc) {
if (xc->status() == ExecContext::Active)
void setStatus(Status new_status) {
Status old_status = status();
+
+ // We should never even get here if the CPU has been switched out.
+ assert(old_status != SwitchedOut);
+
_status = new_status;
switch (status()) {
int cpu = val;
assert(cpu > 0 && "Must not access primary cpu");
- ExecContext *other_xc = req->xc->system->xcvec[cpu];
+ ExecContext *other_xc = req->xc->system->execContexts[cpu];
other_xc->regs.intRegFile[16] = cpu;
other_xc->regs.ipr[TheISA::IPR_PALtemp16] = cpu;
other_xc->regs.intRegFile[0] = cpu;
}
-void
-Tru64System::init(ExecContext *xc)
+int
+Tru64System::registerExecContext(ExecContext *xc)
{
- xc->regs = *initRegs;
+ int xcIndex = System::registerExecContext(xc);
+
+ if (xcIndex == 0) {
+ // xc->regs = *initRegs;
+ xc->initStatus(ExecContext::Active);
+ }
+ else {
+ xc->initStatus(ExecContext::Unallocated);
+ }
+
+ RemoteGDB *rgdb = new RemoteGDB(this, xc);
+ GDBListener *gdbl = new GDBListener(rgdb, 7000 + xcIndex);
+ gdbl->listen();
+
+ if (remoteGDB.size() <= xcIndex) {
+ remoteGDB.resize(xcIndex+1);
+ }
+
+ remoteGDB[xcIndex] = rgdb;
+
+ return xcIndex;
+}
- remoteGDB = new RemoteGDB(this, xc);
- gdbListen = new GDBListener(remoteGDB, 7000);
- gdbListen->listen();
- // Reset the system
- //
- TheISA::init(physmem, &xc->regs);
+void
+Tru64System::replaceExecContext(ExecContext *xc, int xcIndex)
+{
+ System::replaceExecContext(xcIndex, xc);
+ remoteGDB[xcIndex]->replaceExecContext(xc);
}
bool
Tru64System::breakpoint()
{
- return remoteGDB->trap(ALPHA_KENTRY_IF);
+ return remoteGDB[0]->trap(ALPHA_KENTRY_IF);
}
BEGIN_DECLARE_SIM_OBJECT_PARAMS(Tru64System)
#ifndef __TRU64_SYSTEM_HH__
#define __TRU64_SYSTEM_HH__
+#include <vector>
+
#include "sim/system.hh"
#include "targetarch/isa_traits.hh"
class Tru64System : public System
{
private:
- ExecContext *xc;
-
EcoffObject *kernel;
EcoffObject *console;
Addr kernelEntry;
public:
- RemoteGDB *remoteGDB;
- GDBListener *gdbListen;
+ std::vector<RemoteGDB *> remoteGDB;
+ std::vector<GDBListener *> gdbListen;
public:
Tru64System(const std::string _name,
const std::string &boot_osflags);
~Tru64System();
- void init(ExecContext *xc);
+ int registerExecContext(ExecContext *xc);
+ void replaceExecContext(ExecContext *xc, int xcIndex);
Addr getKernelStart() const { return kernelStart; }
Addr getKernelEnd() const { return kernelEnd; }
fd_map[i] = -1;
}
- numCpus = 0;
-
num_syscalls = 0;
// other parameters will be initialized when the program is loaded
}
-void
-Process::registerExecContext(ExecContext *ec)
+int
+Process::registerExecContext(ExecContext *xc)
{
- if (execContexts.empty()) {
+ // add to list
+ int myIndex = execContexts.size();
+ execContexts.push_back(xc);
+
+ if (myIndex == 0) {
// first exec context for this process... initialize & enable
// copy process's initial regs struct
- ec->regs = *init_regs;
+ xc->regs = *init_regs;
// mark this context as active
- ec->setStatus(ExecContext::Active);
+ xc->initStatus(ExecContext::Active);
}
else {
- ec->setStatus(ExecContext::Unallocated);
+ xc->initStatus(ExecContext::Unallocated);
}
- // add to list
- execContexts.push_back(ec);
-
- // increment available CPU count
- ++numCpus;
+ // return CPU number to caller and increment available CPU count
+ return myIndex;
}
+void
+Process::replaceExecContext(int xcIndex, ExecContext *xc)
+{
+ if (xcIndex >= execContexts.size()) {
+ panic("replaceExecContext: bad xcIndex, %d >= %d\n",
+ xcIndex, execContexts.size());
+ }
+
+ execContexts[xcIndex] = xc;
+}
+
// map simulator fd sim_fd to target fd tgt_fd
void
Process::dup_fd(int sim_fd, int tgt_fd)
//
#ifndef FULL_SYSTEM
-#include <list>
+#include <vector>
#include "targetarch/isa_traits.hh"
#include "sim/sim_object.hh"
bool initialContextLoaded;
// execution contexts associated with this process
- std::list<ExecContext *> execContexts;
- // number of CPUs assigned to this process: should match number of
- // contexts in execContexts list
- unsigned numCpus;
+ std::vector<ExecContext *> execContexts;
+
+ // number of CPUs (esxec contexts, really) assigned to this process.
+ unsigned int numCpus() { return execContexts.size(); }
// record of blocked context
struct WaitRec
// override of virtual SimObject method: register statistics
virtual void regStats();
- // register an execution context for this process
- void registerExecContext(ExecContext *);
+ // register an execution context for this process.
+ // returns xc's cpu number (index into execContexts[])
+ int registerExecContext(ExecContext *xc);
+
+
+ void replaceExecContext(int xcIndex, ExecContext *xc);
// map simulator fd sim_fd to target fd tgt_fd
void dup_fd(int sim_fd, int tgt_fd);
}
-void
+int
System::registerExecContext(ExecContext *xc)
{
- if (xc->cpu_id >= 12/*MAX_CPUS*/)
- panic("Too many CPU's\n");
+ int myIndex = execContexts.size();
+ execContexts.push_back(xc);
+ return myIndex;
+}
+
- if (xc->cpu_id >= xcvec.size())
- xcvec.resize(xc->cpu_id + 1);
+void
+System::replaceExecContext(int xcIndex, ExecContext *xc)
+{
+ if (xcIndex >= execContexts.size()) {
+ panic("replaceExecContext: bad xcIndex, %d >= %d\n",
+ xcIndex, execContexts.size());
+ }
- xcvec[xc->cpu_id] = xc;
+ execContexts[xcIndex] = xc;
}
PCEventQueue pcEventQueue;
- std::vector<ExecContext *> xcvec;
- void registerExecContext(ExecContext *xc);
+ std::vector<ExecContext *> execContexts;
+
+ virtual int registerExecContext(ExecContext *xc);
+ virtual void replaceExecContext(int xcIndex, ExecContext *xc);
public:
System(const std::string _name, const int _init_param,
MemoryController *, PhysicalMemory *);
~System();
- virtual void init(ExecContext *xc) = 0;
-
virtual Addr getKernelStart() const = 0;
virtual Addr getKernelEnd() const = 0;
virtual Addr getKernelEntry() const = 0;