for i in range(3, 15):
assignISAInt(i, i)
-def makeLinuxX86System(mem_mode, numCPUs = 1, mdesc = None, Ruby = False):
+def makeLinuxX86System(mem_mode, options, mdesc = None, Ruby = False):
+ numCPUs = options.num_cpus
self = LinuxX86System()
# Build up the x86 system and then specialize it for Linux
# just to avoid corner cases.
assert(self.physmem.range.second.getValue() >= 0x200000)
+ # set work count options
+ if options.work_item_id != None:
+ self.work_item_id = options.work_item_id
+ if options.work_begin_cpu_id_exit != None:
+ self.work_begin_cpu_id_exit = options.work_begin_cpu_id_exit
+ if options.work_end_exit_count != None:
+ self.work_end_exit_count = options.work_end_exit_count
+ if options.work_end_checkpoint_count != None:
+ self.work_end_ckpt_count = options.work_end_checkpoint_count
+ if options.work_begin_exit_count != None:
+ self.work_begin_exit_count = options.work_begin_exit_count
+ if options.work_begin_checkpoint_count != None:
+ self.work_begin_ckpt_count = options.work_begin_checkpoint_count
+ if options.work_cpus_checkpoint_count != None:
+ self.work_cpus_ckpt_count = options.work_cpus_checkpoint_count
+
# Mark the first megabyte of memory as reserved
self.e820_table.entries.append(X86E820Entry(
addr = 0,
parser.add_option("--maxtime", type="float")
parser.add_option("--maxinsts", type="int")
parser.add_option("--prog_intvl", type="int")
+parser.add_option("--work-item-id", action="store", type="int",
+ help="the specific work id for exit & checkpointing")
+parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
+ help="exit when work starts on the specified cpu")
+parser.add_option("--work-end-exit-count", action="store", type="int",
+ help="exit at specified work end count")
+parser.add_option("--work-begin-exit-count", action="store", type="int",
+ help="exit at specified work begin count")
# Checkpointing options
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
+parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
+ help="checkpoint at specified work begin count")
+parser.add_option("--work-end-checkpoint-count", action="store", type="int",
+ help="checkpoint at specified work end count")
+parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
+ help="checkpoint and exit when active cpu count is reached")
# CPU Switching - default switch model goes from a checkpoint
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "x86":
- test_sys = makeLinuxX86System(test_mem_mode, np, bm[0])
+ test_sys = makeLinuxX86System(test_mem_mode, options, bm[0])
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeLinuxArmSystem(test_mem_mode, bm[0],
bare_metal=options.bare_metal, machine_type=options.machine_type)
system.piobus,
system.dma_devices)
elif buildEnv['TARGET_ISA'] == "x86":
- system = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0], True)
+ system = makeLinuxX86System(test_mem_mode, options, bm[0], True)
system.ruby = Ruby.create_system(options,
system,
system.piobus)
0x59: m5reserved5({{
warn("M5 reserved opcode 5 ignored.\n");
}}, IsNonSpeculative);
+ 0x5a: m5_work_begin({{
+ PseudoInst::workbegin(xc->tcBase(), Rdi, Rsi);
+ }}, IsNonSpeculative);
+ 0x5b: m5_work_end({{
+ PseudoInst::workend(xc->tcBase(), Rdi, Rsi);
+ }}, IsNonSpeculative);
default: Inst::UD2();
}
}
.desc("number of cpu cycles simulated")
;
+ numWorkItemsStarted
+ .name(name() + ".numWorkItemsStarted")
+ .desc("number of work items this cpu started")
+ ;
+
+ numWorkItemsCompleted
+ .name(name() + ".numWorkItemsCompleted")
+ .desc("number of work items this cpu completed")
+ ;
+
int size = threadContexts.size();
if (size > 1) {
for (int i = 0; i < size; ++i) {
inline Tick ticks(int numCycles) const { return clock * numCycles; }
inline Tick curCycle() const { return curTick() / clock; }
inline Tick tickToCycles(Tick val) const { return val / clock; }
+ inline void workItemBegin() { numWorkItemsStarted++; }
+ inline void workItemEnd() { numWorkItemsCompleted++; }
// @todo remove me after debugging with legion done
Tick instCount() { return instCnt; }
public:
// Number of CPU cycles simulated
Stats::Scalar numCycles;
+ Stats::Scalar numWorkItemsStarted;
+ Stats::Scalar numWorkItemsCompleted;
};
#endif // __CPU_BASE_HH__
TraceFlag('Thread')
TraceFlag('Timer')
TraceFlag('VtoPhys')
+TraceFlag('WorkItems')
physmem = Param.PhysicalMemory(Parent.any, "physical memory")
mem_mode = Param.MemoryMode('atomic', "The mode the memory system is in")
+ work_item_id = Param.Int(-1, "specific work item id")
+ work_begin_cpu_id_exit = Param.Int(-1,
+ "work started on specific id, now exit simulation")
+ work_begin_ckpt_count = Param.Counter(0,
+ "create checkpoint when work items begin count value is reached")
+ work_begin_exit_count = Param.Counter(0,
+ "exit simulation when work items begin count value is reached")
+ work_end_ckpt_count = Param.Counter(0,
+ "create checkpoint when work items end count value is reached")
+ work_end_exit_count = Param.Counter(0,
+ "exit simulation when work items end count value is reached")
+ work_cpus_ckpt_count = Param.Counter(0,
+ "create checkpoint when active cpu count value is reached")
+
if buildEnv['FULL_SYSTEM']:
abstract = True
boot_cpu_frequency = Param.Frequency(Self.cpu[0].clock.frequency,
exitSimLoop("switchcpu");
}
+//
+// This function is executed when annotated work items begin. Depending on
+// what the user specified at the command line, the simulation may exit and/or
+// take a checkpoint when a certain work item begins.
+//
+void
+workbegin(ThreadContext *tc, uint64_t workid, uint64_t threadid)
+{
+ tc->getCpuPtr()->workItemBegin();
+ System *sys = tc->getSystemPtr();
+
+ DPRINTF(WorkItems, "Work Begin workid: %d, threadid %d\n", workid,
+ threadid);
+
+ //
+ // If specified, determine if this is the specific work item the user
+ // identified
+ //
+ if (sys->params()->work_item_id == -1 ||
+ sys->params()->work_item_id == workid) {
+
+ uint64_t systemWorkBeginCount = sys->incWorkItemsBegin();
+ int cpuId = tc->getCpuPtr()->cpuId();
+
+ if (sys->params()->work_cpus_ckpt_count != 0 &&
+ sys->markWorkItem(cpuId) >= sys->params()->work_cpus_ckpt_count) {
+ //
+ // If active cpus equals checkpoint count, create checkpoint
+ //
+ Event *event = new SimLoopExitEvent("checkpoint", 0);
+ mainEventQueue.schedule(event, curTick());
+ }
+
+ if (systemWorkBeginCount == sys->params()->work_begin_ckpt_count) {
+ //
+ // Note: the string specified as the cause of the exit event must
+ // exactly equal "checkpoint" inorder to create a checkpoint
+ //
+ Event *event = new SimLoopExitEvent("checkpoint", 0);
+ mainEventQueue.schedule(event, curTick());
+ }
+
+ if (systemWorkBeginCount == sys->params()->work_begin_exit_count) {
+ //
+ // If a certain number of work items started, exit simulation
+ //
+ Event *event = new SimLoopExitEvent("work started count reach", 0);
+ mainEventQueue.schedule(event, curTick());
+ }
+
+ if (tc->getCpuPtr()->cpuId() == sys->params()->work_begin_cpu_id_exit) {
+ //
+ // If work started on the specific cpu id specified, exit simulation
+ //
+ Event *event = new SimLoopExitEvent("work started on specific cpu",
+ 0);
+
+ mainEventQueue.schedule(event, curTick() + 1);
+ }
+ }
+}
+
+//
+// This function is executed when annotated work items end. Depending on
+// what the user specified at the command line, the simulation may exit and/or
+// take a checkpoint when a certain work item ends.
+//
+void
+workend(ThreadContext *tc, uint64_t workid, uint64_t threadid)
+{
+ tc->getCpuPtr()->workItemEnd();
+ System *sys = tc->getSystemPtr();
+
+ DPRINTF(WorkItems, "Work End workid: %d, threadid %d\n", workid, threadid);
+
+ //
+ // If specified, determine if this is the specific work item the user
+ // identified
+ //
+ if (sys->params()->work_item_id == -1 ||
+ sys->params()->work_item_id == workid) {
+
+ uint64_t systemWorkEndCount = sys->incWorkItemsEnd();
+ int cpuId = tc->getCpuPtr()->cpuId();
+
+ if (sys->params()->work_cpus_ckpt_count != 0 &&
+ sys->markWorkItem(cpuId) >= sys->params()->work_cpus_ckpt_count) {
+ //
+ // If active cpus equals checkpoint count, create checkpoint
+ //
+ Event *event = new SimLoopExitEvent("checkpoint", 0);
+ mainEventQueue.schedule(event, curTick());
+ }
+
+ if (sys->params()->work_end_ckpt_count != 0 &&
+ systemWorkEndCount == sys->params()->work_end_ckpt_count) {
+ //
+ // If total work items completed equals checkpoint count, create
+ // checkpoint
+ //
+ Event *event = new SimLoopExitEvent("checkpoint", 0);
+ mainEventQueue.schedule(event, curTick());
+ }
+
+ if (sys->params()->work_end_exit_count != 0 &&
+ systemWorkEndCount == sys->params()->work_end_exit_count) {
+ //
+ // If total work items completed equals exit count, exit simulation
+ //
+ Event *event = new SimLoopExitEvent("work items exit count reached",
+ 0);
+
+ mainEventQueue.schedule(event, curTick());
+ }
+ }
+}
+
} // namespace PseudoInst
void m5checkpoint(ThreadContext *tc, Tick delay, Tick period);
void debugbreak(ThreadContext *tc);
void switchcpu(ThreadContext *tc);
+void workbegin(ThreadContext *tc, uint64_t workid, uint64_t threadid);
+void workend(ThreadContext *tc, uint64_t workid, uint64_t threadid);
} // namespace PseudoInst
pagePtr(0),
nextPID(0),
#endif
- memoryMode(p->mem_mode), _params(p),
+ memoryMode(p->mem_mode),
+ workItemsBegin(0),
+ workItemsEnd(0),
+ _params(p),
totalNumInsts(0),
instEventQueue("system instruction-based event queue")
{
// increment the number of running systms
numSystemsRunning++;
+
+ activeCpus.clear();
}
System::~System()
remoteGDB[id] = rgdb;
}
+ activeCpus.push_back(false);
+
return id;
}
protected:
Enums::MemoryMode memoryMode;
+ uint64_t workItemsBegin;
+ uint64_t workItemsEnd;
+ std::vector<bool> activeCpus;
+
+ public:
+ /**
+ * Called by pseudo_inst to track the number of work items started by this
+ * system.
+ */
+ uint64_t
+ incWorkItemsBegin()
+ {
+ return ++workItemsBegin;
+ }
+
+ /**
+ * Called by pseudo_inst to track the number of work items completed by
+ * this system.
+ */
+ uint64_t
+ incWorkItemsEnd()
+ {
+ return ++workItemsEnd;
+ }
+
+ /**
+ * Called by pseudo_inst to mark the cpus actively executing work items.
+ * Returns the total number of cpus that have executed work item begin or
+ * ends.
+ */
+ int
+ markWorkItem(int index)
+ {
+ int count = 0;
+ assert(index < activeCpus.size());
+ activeCpus[index] = true;
+ for (std::vector<bool>::iterator i = activeCpus.begin();
+ i < activeCpus.end(); i++) {
+ if (*i) count++;
+ }
+ return count;
+ }
#if FULL_SYSTEM
/**
TWO_BYTE_OP(m5_switchcpu, switchcpu_func)
TWO_BYTE_OP(m5_addsymbol, addsymbol_func)
TWO_BYTE_OP(m5_panic, panic_func)
+TWO_BYTE_OP(m5_work_begin, work_begin_func)
+TWO_BYTE_OP(m5_work_end, work_end_func)
#define reserved4_func 0x58 // Reserved for user
#define reserved5_func 0x59 // Reserved for user
+#define work_begin_func 0x5a
+#define work_end_func 0x5b
+
// These operations are for critical path annotation
#define annotate_func 0x55
#define an_bsm 0x1