inorder: treat SE mode syscalls as a trapping instruction
[gem5.git] / src / cpu / inorder / cpu.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <algorithm>
33
34 #include "arch/utility.hh"
35 #include "base/bigint.hh"
36 #include "config/full_system.hh"
37 #include "config/the_isa.hh"
38 #include "cpu/inorder/resources/resource_list.hh"
39 #include "cpu/inorder/cpu.hh"
40 #include "cpu/inorder/first_stage.hh"
41 #include "cpu/inorder/inorder_dyn_inst.hh"
42 #include "cpu/inorder/pipeline_traits.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "cpu/inorder/thread_context.hh"
45 #include "cpu/inorder/thread_state.hh"
46 #include "cpu/activity.hh"
47 #include "cpu/base.hh"
48 #include "cpu/exetrace.hh"
49 #include "cpu/simple_thread.hh"
50 #include "cpu/thread_context.hh"
51 #include "debug/Activity.hh"
52 #include "debug/InOrderCPU.hh"
53 #include "debug/RefCount.hh"
54 #include "debug/SkedCache.hh"
55 #include "mem/translating_port.hh"
56 #include "params/InOrderCPU.hh"
57 #include "sim/process.hh"
58 #include "sim/stat_control.hh"
59
60 #if FULL_SYSTEM
61 #include "cpu/quiesce_event.hh"
62 #include "sim/system.hh"
63 #endif
64
65 #if THE_ISA == ALPHA_ISA
66 #include "arch/alpha/osfpal.hh"
67 #endif
68
69 using namespace std;
70 using namespace TheISA;
71 using namespace ThePipeline;
72
73 InOrderCPU::TickEvent::TickEvent(InOrderCPU *c)
74 : Event(CPU_Tick_Pri), cpu(c)
75 { }
76
77
78 void
79 InOrderCPU::TickEvent::process()
80 {
81 cpu->tick();
82 }
83
84
85 const char *
86 InOrderCPU::TickEvent::description()
87 {
88 return "InOrderCPU tick event";
89 }
90
91 InOrderCPU::CPUEvent::CPUEvent(InOrderCPU *_cpu, CPUEventType e_type,
92 Fault fault, ThreadID _tid, DynInstPtr inst,
93 CPUEventPri event_pri)
94 : Event(event_pri), cpu(_cpu)
95 {
96 setEvent(e_type, fault, _tid, inst);
97 }
98
99
100 std::string InOrderCPU::eventNames[NumCPUEvents] =
101 {
102 "ActivateThread",
103 "ActivateNextReadyThread",
104 "DeactivateThread",
105 "HaltThread",
106 "SuspendThread",
107 "Trap",
108 "Syscall",
109 "SquashFromMemStall",
110 "UpdatePCs"
111 };
112
113 void
114 InOrderCPU::CPUEvent::process()
115 {
116 switch (cpuEventType)
117 {
118 case ActivateThread:
119 cpu->activateThread(tid);
120 cpu->resPool->activateThread(tid);
121 break;
122
123 case ActivateNextReadyThread:
124 cpu->activateNextReadyThread();
125 break;
126
127 case DeactivateThread:
128 cpu->deactivateThread(tid);
129 cpu->resPool->deactivateThread(tid);
130 break;
131
132 case HaltThread:
133 cpu->haltThread(tid);
134 cpu->resPool->deactivateThread(tid);
135 break;
136
137 case SuspendThread:
138 cpu->suspendThread(tid);
139 cpu->resPool->suspendThread(tid);
140 break;
141
142 case SquashFromMemStall:
143 cpu->squashDueToMemStall(inst->squashingStage, inst->seqNum, tid);
144 cpu->resPool->squashDueToMemStall(inst, inst->squashingStage,
145 inst->seqNum, tid);
146 break;
147
148 case Trap:
149 DPRINTF(InOrderCPU, "Trapping CPU\n");
150 cpu->trap(fault, tid, inst);
151 cpu->resPool->trap(fault, tid, inst);
152 break;
153
154 case Syscall:
155 cpu->syscall(inst->syscallNum, tid);
156 cpu->resPool->trap(fault, tid, inst);
157 break;
158
159 default:
160 fatal("Unrecognized Event Type %s", eventNames[cpuEventType]);
161 }
162
163 cpu->cpuEventRemoveList.push(this);
164 }
165
166
167
168 const char *
169 InOrderCPU::CPUEvent::description()
170 {
171 return "InOrderCPU event";
172 }
173
174 void
175 InOrderCPU::CPUEvent::scheduleEvent(int delay)
176 {
177 assert(!scheduled() || squashed());
178 cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true);
179 }
180
181 void
182 InOrderCPU::CPUEvent::unscheduleEvent()
183 {
184 if (scheduled())
185 squash();
186 }
187
188 InOrderCPU::InOrderCPU(Params *params)
189 : BaseCPU(params),
190 cpu_id(params->cpu_id),
191 coreType("default"),
192 _status(Idle),
193 tickEvent(this),
194 stageWidth(params->stageWidth),
195 timeBuffer(2 , 2),
196 removeInstsThisCycle(false),
197 activityRec(params->name, NumStages, 10, params->activity),
198 #if FULL_SYSTEM
199 system(params->system),
200 physmem(system->physmem),
201 #endif // FULL_SYSTEM
202 #ifdef DEBUG
203 cpuEventNum(0),
204 resReqCount(0),
205 #endif // DEBUG
206 switchCount(0),
207 deferRegistration(false/*params->deferRegistration*/),
208 stageTracing(params->stageTracing),
209 instsPerSwitch(0)
210 {
211 ThreadID active_threads;
212 cpu_params = params;
213
214 resPool = new ResourcePool(this, params);
215
216 // Resize for Multithreading CPUs
217 thread.resize(numThreads);
218
219 #if FULL_SYSTEM
220 active_threads = 1;
221 #else
222 active_threads = params->workload.size();
223
224 if (active_threads > MaxThreads) {
225 panic("Workload Size too large. Increase the 'MaxThreads'"
226 "in your InOrder implementation or "
227 "edit your workload size.");
228 }
229
230
231 if (active_threads > 1) {
232 threadModel = (InOrderCPU::ThreadModel) params->threadModel;
233
234 if (threadModel == SMT) {
235 DPRINTF(InOrderCPU, "Setting Thread Model to SMT.\n");
236 } else if (threadModel == SwitchOnCacheMiss) {
237 DPRINTF(InOrderCPU, "Setting Thread Model to "
238 "Switch On Cache Miss\n");
239 }
240
241 } else {
242 threadModel = Single;
243 }
244
245
246
247 #endif
248
249 // Bind the fetch & data ports from the resource pool.
250 fetchPortIdx = resPool->getPortIdx(params->fetchMemPort);
251 if (fetchPortIdx == 0) {
252 fatal("Unable to find port to fetch instructions from.\n");
253 }
254
255 dataPortIdx = resPool->getPortIdx(params->dataMemPort);
256 if (dataPortIdx == 0) {
257 fatal("Unable to find port for data.\n");
258 }
259
260 for (ThreadID tid = 0; tid < numThreads; ++tid) {
261 #if FULL_SYSTEM
262 // SMT is not supported in FS mode yet.
263 assert(numThreads == 1);
264 thread[tid] = new Thread(this, 0);
265 #else
266 if (tid < (ThreadID)params->workload.size()) {
267 DPRINTF(InOrderCPU, "Workload[%i] process is %#x\n",
268 tid, params->workload[tid]->prog_fname);
269 thread[tid] =
270 new Thread(this, tid, params->workload[tid]);
271 } else {
272 //Allocate Empty thread so M5 can use later
273 //when scheduling threads to CPU
274 Process* dummy_proc = params->workload[0];
275 thread[tid] = new Thread(this, tid, dummy_proc);
276 }
277
278 // Eventually set this with parameters...
279 asid[tid] = tid;
280 #endif
281
282 // Setup the TC that will serve as the interface to the threads/CPU.
283 InOrderThreadContext *tc = new InOrderThreadContext;
284 tc->cpu = this;
285 tc->thread = thread[tid];
286
287 // Give the thread the TC.
288 thread[tid]->tc = tc;
289 thread[tid]->setFuncExeInst(0);
290 globalSeqNum[tid] = 1;
291
292 // Add the TC to the CPU's list of TC's.
293 this->threadContexts.push_back(tc);
294 }
295
296 // Initialize TimeBuffer Stage Queues
297 for (int stNum=0; stNum < NumStages - 1; stNum++) {
298 stageQueue[stNum] = new StageQueue(NumStages, NumStages);
299 stageQueue[stNum]->id(stNum);
300 }
301
302
303 // Set Up Pipeline Stages
304 for (int stNum=0; stNum < NumStages; stNum++) {
305 if (stNum == 0)
306 pipelineStage[stNum] = new FirstStage(params, stNum);
307 else
308 pipelineStage[stNum] = new PipelineStage(params, stNum);
309
310 pipelineStage[stNum]->setCPU(this);
311 pipelineStage[stNum]->setActiveThreads(&activeThreads);
312 pipelineStage[stNum]->setTimeBuffer(&timeBuffer);
313
314 // Take Care of 1st/Nth stages
315 if (stNum > 0)
316 pipelineStage[stNum]->setPrevStageQueue(stageQueue[stNum - 1]);
317 if (stNum < NumStages - 1)
318 pipelineStage[stNum]->setNextStageQueue(stageQueue[stNum]);
319 }
320
321 // Initialize thread specific variables
322 for (ThreadID tid = 0; tid < numThreads; tid++) {
323 archRegDepMap[tid].setCPU(this);
324
325 nonSpecInstActive[tid] = false;
326 nonSpecSeqNum[tid] = 0;
327
328 squashSeqNum[tid] = MaxAddr;
329 lastSquashCycle[tid] = 0;
330
331 memset(intRegs[tid], 0, sizeof(intRegs[tid]));
332 memset(floatRegs.i[tid], 0, sizeof(floatRegs.i[tid]));
333 isa[tid].clear();
334
335 // Define dummy instructions and resource requests to be used.
336 dummyInst[tid] = new InOrderDynInst(this,
337 thread[tid],
338 0,
339 tid,
340 asid[tid]);
341
342 dummyReq[tid] = new ResourceRequest(resPool->getResource(0));
343 }
344
345 dummyReqInst = new InOrderDynInst(this, NULL, 0, 0, 0);
346 dummyReqInst->setSquashed();
347 dummyReqInst->resetInstCount();
348
349 dummyBufferInst = new InOrderDynInst(this, NULL, 0, 0, 0);
350 dummyBufferInst->setSquashed();
351 dummyBufferInst->resetInstCount();
352
353 endOfSkedIt = skedCache.end();
354 frontEndSked = createFrontEndSked();
355
356 lastRunningCycle = curTick();
357
358 // Reset CPU to reset state.
359 #if FULL_SYSTEM
360 Fault resetFault = new ResetFault();
361 resetFault->invoke(tcBase());
362 #endif
363
364
365 // Schedule First Tick Event, CPU will reschedule itself from here on out.
366 scheduleTickEvent(0);
367 }
368
369 InOrderCPU::~InOrderCPU()
370 {
371 delete resPool;
372
373 SkedCacheIt sked_it = skedCache.begin();
374 SkedCacheIt sked_end = skedCache.end();
375
376 while (sked_it != sked_end) {
377 delete (*sked_it).second;
378 sked_it++;
379 }
380 skedCache.clear();
381 }
382
383 m5::hash_map<InOrderCPU::SkedID, ThePipeline::RSkedPtr> InOrderCPU::skedCache;
384
385 RSkedPtr
386 InOrderCPU::createFrontEndSked()
387 {
388 RSkedPtr res_sked = new ResourceSked();
389 int stage_num = 0;
390 StageScheduler F(res_sked, stage_num++);
391 StageScheduler D(res_sked, stage_num++);
392
393 // FETCH
394 F.needs(FetchSeq, FetchSeqUnit::AssignNextPC);
395 F.needs(ICache, FetchUnit::InitiateFetch);
396
397 // DECODE
398 D.needs(ICache, FetchUnit::CompleteFetch);
399 D.needs(Decode, DecodeUnit::DecodeInst);
400 D.needs(BPred, BranchPredictor::PredictBranch);
401 D.needs(FetchSeq, FetchSeqUnit::UpdateTargetPC);
402
403
404 DPRINTF(SkedCache, "Resource Sked created for instruction \"front_end\"\n");
405
406 return res_sked;
407 }
408
409 RSkedPtr
410 InOrderCPU::createBackEndSked(DynInstPtr inst)
411 {
412 RSkedPtr res_sked = lookupSked(inst);
413 if (res_sked != NULL) {
414 DPRINTF(SkedCache, "Found %s in sked cache.\n",
415 inst->instName());
416 return res_sked;
417 } else {
418 res_sked = new ResourceSked();
419 }
420
421 int stage_num = ThePipeline::BackEndStartStage;
422 StageScheduler X(res_sked, stage_num++);
423 StageScheduler M(res_sked, stage_num++);
424 StageScheduler W(res_sked, stage_num++);
425
426 if (!inst->staticInst) {
427 warn_once("Static Instruction Object Not Set. Can't Create"
428 " Back End Schedule");
429 return NULL;
430 }
431
432 // EXECUTE
433 X.needs(RegManager, UseDefUnit::MarkDestRegs);
434 for (int idx=0; idx < inst->numSrcRegs(); idx++) {
435 if (!idx || !inst->isStore()) {
436 X.needs(RegManager, UseDefUnit::ReadSrcReg, idx);
437 }
438 }
439
440 //@todo: schedule non-spec insts to operate on this cycle
441 // as long as all previous insts are done
442 if ( inst->isNonSpeculative() ) {
443 // skip execution of non speculative insts until later
444 } else if ( inst->isMemRef() ) {
445 if ( inst->isLoad() ) {
446 X.needs(AGEN, AGENUnit::GenerateAddr);
447 }
448 } else if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
449 X.needs(MDU, MultDivUnit::StartMultDiv);
450 } else {
451 X.needs(ExecUnit, ExecutionUnit::ExecuteInst);
452 }
453
454 // MEMORY
455 if (!inst->isNonSpeculative()) {
456 if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
457 M.needs(MDU, MultDivUnit::EndMultDiv);
458 }
459
460 if ( inst->isLoad() ) {
461 M.needs(DCache, CacheUnit::InitiateReadData);
462 if (inst->splitInst)
463 M.needs(DCache, CacheUnit::InitSecondSplitRead);
464 } else if ( inst->isStore() ) {
465 for (int i = 1; i < inst->numSrcRegs(); i++ ) {
466 M.needs(RegManager, UseDefUnit::ReadSrcReg, i);
467 }
468 M.needs(AGEN, AGENUnit::GenerateAddr);
469 M.needs(DCache, CacheUnit::InitiateWriteData);
470 if (inst->splitInst)
471 M.needs(DCache, CacheUnit::InitSecondSplitWrite);
472 }
473 }
474
475 // WRITEBACK
476 if (!inst->isNonSpeculative()) {
477 if ( inst->isLoad() ) {
478 W.needs(DCache, CacheUnit::CompleteReadData);
479 if (inst->splitInst)
480 W.needs(DCache, CacheUnit::CompleteSecondSplitRead);
481 } else if ( inst->isStore() ) {
482 W.needs(DCache, CacheUnit::CompleteWriteData);
483 if (inst->splitInst)
484 W.needs(DCache, CacheUnit::CompleteSecondSplitWrite);
485 }
486 } else {
487 // Finally, Execute Speculative Data
488 if (inst->isMemRef()) {
489 if (inst->isLoad()) {
490 W.needs(AGEN, AGENUnit::GenerateAddr);
491 W.needs(DCache, CacheUnit::InitiateReadData);
492 if (inst->splitInst)
493 W.needs(DCache, CacheUnit::InitSecondSplitRead);
494 W.needs(DCache, CacheUnit::CompleteReadData);
495 if (inst->splitInst)
496 W.needs(DCache, CacheUnit::CompleteSecondSplitRead);
497 } else if (inst->isStore()) {
498 if ( inst->numSrcRegs() >= 2 ) {
499 W.needs(RegManager, UseDefUnit::ReadSrcReg, 1);
500 }
501 W.needs(AGEN, AGENUnit::GenerateAddr);
502 W.needs(DCache, CacheUnit::InitiateWriteData);
503 if (inst->splitInst)
504 W.needs(DCache, CacheUnit::InitSecondSplitWrite);
505 W.needs(DCache, CacheUnit::CompleteWriteData);
506 if (inst->splitInst)
507 W.needs(DCache, CacheUnit::CompleteSecondSplitWrite);
508 }
509 } else {
510 W.needs(ExecUnit, ExecutionUnit::ExecuteInst);
511 }
512 }
513
514 W.needs(Grad, GraduationUnit::GraduateInst);
515
516 for (int idx=0; idx < inst->numDestRegs(); idx++) {
517 W.needs(RegManager, UseDefUnit::WriteDestReg, idx);
518 }
519
520 if (inst->isControl())
521 W.needs(BPred, BranchPredictor::UpdatePredictor);
522
523 // Insert Back Schedule into our cache of
524 // resource schedules
525 addToSkedCache(inst, res_sked);
526
527 DPRINTF(SkedCache, "Back End Sked Created for instruction: %s (%08p)\n",
528 inst->instName(), inst->getMachInst());
529 res_sked->print();
530
531 return res_sked;
532 }
533
534 void
535 InOrderCPU::regStats()
536 {
537 /* Register the Resource Pool's stats here.*/
538 resPool->regStats();
539
540 /* Register for each Pipeline Stage */
541 for (int stage_num=0; stage_num < ThePipeline::NumStages; stage_num++) {
542 pipelineStage[stage_num]->regStats();
543 }
544
545 /* Register any of the InOrderCPU's stats here.*/
546 instsPerCtxtSwitch
547 .name(name() + ".instsPerContextSwitch")
548 .desc("Instructions Committed Per Context Switch")
549 .prereq(instsPerCtxtSwitch);
550
551 numCtxtSwitches
552 .name(name() + ".contextSwitches")
553 .desc("Number of context switches");
554
555 comLoads
556 .name(name() + ".comLoads")
557 .desc("Number of Load instructions committed");
558
559 comStores
560 .name(name() + ".comStores")
561 .desc("Number of Store instructions committed");
562
563 comBranches
564 .name(name() + ".comBranches")
565 .desc("Number of Branches instructions committed");
566
567 comNops
568 .name(name() + ".comNops")
569 .desc("Number of Nop instructions committed");
570
571 comNonSpec
572 .name(name() + ".comNonSpec")
573 .desc("Number of Non-Speculative instructions committed");
574
575 comInts
576 .name(name() + ".comInts")
577 .desc("Number of Integer instructions committed");
578
579 comFloats
580 .name(name() + ".comFloats")
581 .desc("Number of Floating Point instructions committed");
582
583 timesIdled
584 .name(name() + ".timesIdled")
585 .desc("Number of times that the entire CPU went into an idle state and"
586 " unscheduled itself")
587 .prereq(timesIdled);
588
589 idleCycles
590 .name(name() + ".idleCycles")
591 .desc("Number of cycles cpu's stages were not processed");
592
593 runCycles
594 .name(name() + ".runCycles")
595 .desc("Number of cycles cpu stages are processed.");
596
597 activity
598 .name(name() + ".activity")
599 .desc("Percentage of cycles cpu is active")
600 .precision(6);
601 activity = (runCycles / numCycles) * 100;
602
603 threadCycles
604 .init(numThreads)
605 .name(name() + ".threadCycles")
606 .desc("Total Number of Cycles A Thread Was Active in CPU (Per-Thread)");
607
608 smtCycles
609 .name(name() + ".smtCycles")
610 .desc("Total number of cycles that the CPU was in SMT-mode");
611
612 committedInsts
613 .init(numThreads)
614 .name(name() + ".committedInsts")
615 .desc("Number of Instructions Simulated (Per-Thread)");
616
617 smtCommittedInsts
618 .init(numThreads)
619 .name(name() + ".smtCommittedInsts")
620 .desc("Number of SMT Instructions Simulated (Per-Thread)");
621
622 totalCommittedInsts
623 .name(name() + ".committedInsts_total")
624 .desc("Number of Instructions Simulated (Total)");
625
626 cpi
627 .name(name() + ".cpi")
628 .desc("CPI: Cycles Per Instruction (Per-Thread)")
629 .precision(6);
630 cpi = numCycles / committedInsts;
631
632 smtCpi
633 .name(name() + ".smt_cpi")
634 .desc("CPI: Total SMT-CPI")
635 .precision(6);
636 smtCpi = smtCycles / smtCommittedInsts;
637
638 totalCpi
639 .name(name() + ".cpi_total")
640 .desc("CPI: Total CPI of All Threads")
641 .precision(6);
642 totalCpi = numCycles / totalCommittedInsts;
643
644 ipc
645 .name(name() + ".ipc")
646 .desc("IPC: Instructions Per Cycle (Per-Thread)")
647 .precision(6);
648 ipc = committedInsts / numCycles;
649
650 smtIpc
651 .name(name() + ".smt_ipc")
652 .desc("IPC: Total SMT-IPC")
653 .precision(6);
654 smtIpc = smtCommittedInsts / smtCycles;
655
656 totalIpc
657 .name(name() + ".ipc_total")
658 .desc("IPC: Total IPC of All Threads")
659 .precision(6);
660 totalIpc = totalCommittedInsts / numCycles;
661
662 BaseCPU::regStats();
663 }
664
665
666 void
667 InOrderCPU::tick()
668 {
669 DPRINTF(InOrderCPU, "\n\nInOrderCPU: Ticking main, InOrderCPU.\n");
670
671 ++numCycles;
672
673 bool pipes_idle = true;
674
675 //Tick each of the stages
676 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
677 pipelineStage[stNum]->tick();
678
679 pipes_idle = pipes_idle && pipelineStage[stNum]->idle;
680 }
681
682 if (pipes_idle)
683 idleCycles++;
684 else
685 runCycles++;
686
687 // Now advance the time buffers one tick
688 timeBuffer.advance();
689 for (int sqNum=0; sqNum < NumStages - 1; sqNum++) {
690 stageQueue[sqNum]->advance();
691 }
692 activityRec.advance();
693
694 // Any squashed events, or insts then remove them now
695 cleanUpRemovedEvents();
696 cleanUpRemovedInsts();
697
698 // Re-schedule CPU for this cycle
699 if (!tickEvent.scheduled()) {
700 if (_status == SwitchedOut) {
701 // increment stat
702 lastRunningCycle = curTick();
703 } else if (!activityRec.active()) {
704 DPRINTF(InOrderCPU, "sleeping CPU.\n");
705 lastRunningCycle = curTick();
706 timesIdled++;
707 } else {
708 //Tick next_tick = curTick() + cycles(1);
709 //tickEvent.schedule(next_tick);
710 schedule(&tickEvent, nextCycle(curTick() + 1));
711 DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
712 nextCycle(curTick() + 1));
713 }
714 }
715
716 tickThreadStats();
717 updateThreadPriority();
718 }
719
720
721 void
722 InOrderCPU::init()
723 {
724 if (!deferRegistration) {
725 registerThreadContexts();
726 }
727
728 // Set inSyscall so that the CPU doesn't squash when initially
729 // setting up registers.
730 for (ThreadID tid = 0; tid < numThreads; ++tid)
731 thread[tid]->inSyscall = true;
732
733 #if FULL_SYSTEM
734 for (ThreadID tid = 0; tid < numThreads; tid++) {
735 ThreadContext *src_tc = threadContexts[tid];
736 TheISA::initCPU(src_tc, src_tc->contextId());
737 }
738 #endif
739
740 // Clear inSyscall.
741 for (ThreadID tid = 0; tid < numThreads; ++tid)
742 thread[tid]->inSyscall = false;
743
744 // Call Initializiation Routine for Resource Pool
745 resPool->init();
746 }
747
748 Port*
749 InOrderCPU::getPort(const std::string &if_name, int idx)
750 {
751 return resPool->getPort(if_name, idx);
752 }
753
754 #if FULL_SYSTEM
755 Fault
756 InOrderCPU::hwrei(ThreadID tid)
757 {
758 panic("hwrei: Unimplemented");
759
760 return NoFault;
761 }
762
763
764 bool
765 InOrderCPU::simPalCheck(int palFunc, ThreadID tid)
766 {
767 panic("simPalCheck: Unimplemented");
768
769 return true;
770 }
771
772
773 Fault
774 InOrderCPU::getInterrupts()
775 {
776 // Check if there are any outstanding interrupts
777 return interrupts->getInterrupt(threadContexts[0]);
778 }
779
780
781 void
782 InOrderCPU::processInterrupts(Fault interrupt)
783 {
784 // Check for interrupts here. For now can copy the code that
785 // exists within isa_fullsys_traits.hh. Also assume that thread 0
786 // is the one that handles the interrupts.
787 // @todo: Possibly consolidate the interrupt checking code.
788 // @todo: Allow other threads to handle interrupts.
789
790 assert(interrupt != NoFault);
791 interrupts->updateIntrInfo(threadContexts[0]);
792
793 DPRINTF(InOrderCPU, "Interrupt %s being handled\n", interrupt->name());
794
795 // Note: Context ID ok here? Impl. of FS mode needs to revisit this
796 trap(interrupt, threadContexts[0]->contextId(), dummyBufferInst);
797 }
798
799
800 void
801 InOrderCPU::updateMemPorts()
802 {
803 // Update all ThreadContext's memory ports (Functional/Virtual
804 // Ports)
805 ThreadID size = thread.size();
806 for (ThreadID i = 0; i < size; ++i)
807 thread[i]->connectMemPorts(thread[i]->getTC());
808 }
809 #endif
810
811 void
812 InOrderCPU::trapContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay)
813 {
814 scheduleCpuEvent(Trap, fault, tid, inst, delay);
815 }
816
817 void
818 InOrderCPU::trap(Fault fault, ThreadID tid, DynInstPtr inst)
819 {
820 fault->invoke(tcBase(tid), inst->staticInst);
821 }
822
823 void
824 InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay)
825 {
826 scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay);
827 }
828
829
830 void
831 InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num,
832 ThreadID tid)
833 {
834 DPRINTF(InOrderCPU, "Squashing Pipeline Stages Due to Memory Stall...\n");
835
836 // Squash all instructions in each stage including
837 // instruction that caused the squash (seq_num - 1)
838 // NOTE: The stage bandwidth needs to be cleared so thats why
839 // the stalling instruction is squashed as well. The stalled
840 // instruction is previously placed in another intermediate buffer
841 // while it's stall is being handled.
842 InstSeqNum squash_seq_num = seq_num - 1;
843
844 for (int stNum=stage_num; stNum >= 0 ; stNum--) {
845 pipelineStage[stNum]->squashDueToMemStall(squash_seq_num, tid);
846 }
847 }
848
849 void
850 InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
851 ThreadID tid, DynInstPtr inst,
852 unsigned delay, CPUEventPri event_pri)
853 {
854 CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
855 event_pri);
856
857 Tick sked_tick = nextCycle(curTick() + ticks(delay));
858 if (delay >= 0) {
859 DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
860 eventNames[c_event], curTick() + delay, tid);
861 schedule(cpu_event, sked_tick);
862 } else {
863 cpu_event->process();
864 cpuEventRemoveList.push(cpu_event);
865 }
866
867 // Broadcast event to the Resource Pool
868 // Need to reset tid just in case this is a dummy instruction
869 inst->setTid(tid);
870 resPool->scheduleEvent(c_event, inst, 0, 0, tid);
871 }
872
873 bool
874 InOrderCPU::isThreadActive(ThreadID tid)
875 {
876 list<ThreadID>::iterator isActive =
877 std::find(activeThreads.begin(), activeThreads.end(), tid);
878
879 return (isActive != activeThreads.end());
880 }
881
882 bool
883 InOrderCPU::isThreadReady(ThreadID tid)
884 {
885 list<ThreadID>::iterator isReady =
886 std::find(readyThreads.begin(), readyThreads.end(), tid);
887
888 return (isReady != readyThreads.end());
889 }
890
891 bool
892 InOrderCPU::isThreadSuspended(ThreadID tid)
893 {
894 list<ThreadID>::iterator isSuspended =
895 std::find(suspendedThreads.begin(), suspendedThreads.end(), tid);
896
897 return (isSuspended != suspendedThreads.end());
898 }
899
900 void
901 InOrderCPU::activateNextReadyThread()
902 {
903 if (readyThreads.size() >= 1) {
904 ThreadID ready_tid = readyThreads.front();
905
906 // Activate in Pipeline
907 activateThread(ready_tid);
908
909 // Activate in Resource Pool
910 resPool->activateThread(ready_tid);
911
912 list<ThreadID>::iterator ready_it =
913 std::find(readyThreads.begin(), readyThreads.end(), ready_tid);
914 readyThreads.erase(ready_it);
915 } else {
916 DPRINTF(InOrderCPU,
917 "Attempting to activate new thread, but No Ready Threads to"
918 "activate.\n");
919 DPRINTF(InOrderCPU,
920 "Unable to switch to next active thread.\n");
921 }
922 }
923
924 void
925 InOrderCPU::activateThread(ThreadID tid)
926 {
927 if (isThreadSuspended(tid)) {
928 DPRINTF(InOrderCPU,
929 "Removing [tid:%i] from suspended threads list.\n", tid);
930
931 list<ThreadID>::iterator susp_it =
932 std::find(suspendedThreads.begin(), suspendedThreads.end(),
933 tid);
934 suspendedThreads.erase(susp_it);
935 }
936
937 if (threadModel == SwitchOnCacheMiss &&
938 numActiveThreads() == 1) {
939 DPRINTF(InOrderCPU,
940 "Ignoring activation of [tid:%i], since [tid:%i] is "
941 "already running.\n", tid, activeThreadId());
942
943 DPRINTF(InOrderCPU,"Placing [tid:%i] on ready threads list\n",
944 tid);
945
946 readyThreads.push_back(tid);
947
948 } else if (!isThreadActive(tid)) {
949 DPRINTF(InOrderCPU,
950 "Adding [tid:%i] to active threads list.\n", tid);
951 activeThreads.push_back(tid);
952
953 activateThreadInPipeline(tid);
954
955 thread[tid]->lastActivate = curTick();
956
957 tcBase(tid)->setStatus(ThreadContext::Active);
958
959 wakeCPU();
960
961 numCtxtSwitches++;
962 }
963 }
964
965 void
966 InOrderCPU::activateThreadInPipeline(ThreadID tid)
967 {
968 for (int stNum=0; stNum < NumStages; stNum++) {
969 pipelineStage[stNum]->activateThread(tid);
970 }
971 }
972
973 void
974 InOrderCPU::deactivateContext(ThreadID tid, int delay)
975 {
976 DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid);
977
978 scheduleCpuEvent(DeactivateThread, NoFault, tid, dummyInst[tid], delay);
979
980 // Be sure to signal that there's some activity so the CPU doesn't
981 // deschedule itself.
982 activityRec.activity();
983
984 _status = Running;
985 }
986
987 void
988 InOrderCPU::deactivateThread(ThreadID tid)
989 {
990 DPRINTF(InOrderCPU, "[tid:%i]: Calling deactivate thread.\n", tid);
991
992 if (isThreadActive(tid)) {
993 DPRINTF(InOrderCPU,"[tid:%i]: Removing from active threads list\n",
994 tid);
995 list<ThreadID>::iterator thread_it =
996 std::find(activeThreads.begin(), activeThreads.end(), tid);
997
998 removePipelineStalls(*thread_it);
999
1000 activeThreads.erase(thread_it);
1001
1002 // Ideally, this should be triggered from the
1003 // suspendContext/Thread functions
1004 tcBase(tid)->setStatus(ThreadContext::Suspended);
1005 }
1006
1007 assert(!isThreadActive(tid));
1008 }
1009
1010 void
1011 InOrderCPU::removePipelineStalls(ThreadID tid)
1012 {
1013 DPRINTF(InOrderCPU,"[tid:%i]: Removing all pipeline stalls\n",
1014 tid);
1015
1016 for (int stNum = 0; stNum < NumStages ; stNum++) {
1017 pipelineStage[stNum]->removeStalls(tid);
1018 }
1019
1020 }
1021
1022 void
1023 InOrderCPU::updateThreadPriority()
1024 {
1025 if (activeThreads.size() > 1)
1026 {
1027 //DEFAULT TO ROUND ROBIN SCHEME
1028 //e.g. Move highest priority to end of thread list
1029 list<ThreadID>::iterator list_begin = activeThreads.begin();
1030 list<ThreadID>::iterator list_end = activeThreads.end();
1031
1032 unsigned high_thread = *list_begin;
1033
1034 activeThreads.erase(list_begin);
1035
1036 activeThreads.push_back(high_thread);
1037 }
1038 }
1039
1040 inline void
1041 InOrderCPU::tickThreadStats()
1042 {
1043 /** Keep track of cycles that each thread is active */
1044 list<ThreadID>::iterator thread_it = activeThreads.begin();
1045 while (thread_it != activeThreads.end()) {
1046 threadCycles[*thread_it]++;
1047 thread_it++;
1048 }
1049
1050 // Keep track of cycles where SMT is active
1051 if (activeThreads.size() > 1) {
1052 smtCycles++;
1053 }
1054 }
1055
1056 void
1057 InOrderCPU::activateContext(ThreadID tid, int delay)
1058 {
1059 DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
1060
1061
1062 scheduleCpuEvent(ActivateThread, NoFault, tid, dummyInst[tid], delay);
1063
1064 // Be sure to signal that there's some activity so the CPU doesn't
1065 // deschedule itself.
1066 activityRec.activity();
1067
1068 _status = Running;
1069 }
1070
1071 void
1072 InOrderCPU::activateNextReadyContext(int delay)
1073 {
1074 DPRINTF(InOrderCPU,"Activating next ready thread\n");
1075
1076 scheduleCpuEvent(ActivateNextReadyThread, NoFault, 0/*tid*/, dummyInst[0],
1077 delay, ActivateNextReadyThread_Pri);
1078
1079 // Be sure to signal that there's some activity so the CPU doesn't
1080 // deschedule itself.
1081 activityRec.activity();
1082
1083 _status = Running;
1084 }
1085
1086 void
1087 InOrderCPU::haltContext(ThreadID tid, int delay)
1088 {
1089 DPRINTF(InOrderCPU, "[tid:%i]: Calling Halt Context...\n", tid);
1090
1091 scheduleCpuEvent(HaltThread, NoFault, tid, dummyInst[tid], delay);
1092
1093 activityRec.activity();
1094 }
1095
1096 void
1097 InOrderCPU::haltThread(ThreadID tid)
1098 {
1099 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Halted Threads List...\n", tid);
1100 deactivateThread(tid);
1101 squashThreadInPipeline(tid);
1102 haltedThreads.push_back(tid);
1103
1104 tcBase(tid)->setStatus(ThreadContext::Halted);
1105
1106 if (threadModel == SwitchOnCacheMiss) {
1107 activateNextReadyContext();
1108 }
1109 }
1110
1111 void
1112 InOrderCPU::suspendContext(ThreadID tid, int delay)
1113 {
1114 scheduleCpuEvent(SuspendThread, NoFault, tid, dummyInst[tid], delay);
1115 }
1116
1117 void
1118 InOrderCPU::suspendThread(ThreadID tid)
1119 {
1120 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Suspended Threads List...\n",
1121 tid);
1122 deactivateThread(tid);
1123 suspendedThreads.push_back(tid);
1124 thread[tid]->lastSuspend = curTick();
1125
1126 tcBase(tid)->setStatus(ThreadContext::Suspended);
1127 }
1128
1129 void
1130 InOrderCPU::squashThreadInPipeline(ThreadID tid)
1131 {
1132 //Squash all instructions in each stage
1133 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
1134 pipelineStage[stNum]->squash(0 /*seq_num*/, tid);
1135 }
1136 }
1137
1138 PipelineStage*
1139 InOrderCPU::getPipeStage(int stage_num)
1140 {
1141 return pipelineStage[stage_num];
1142 }
1143
1144
1145 RegIndex
1146 InOrderCPU::flattenRegIdx(RegIndex reg_idx, RegType &reg_type, ThreadID tid)
1147 {
1148 if (reg_idx < FP_Base_DepTag) {
1149 reg_type = IntType;
1150 return isa[tid].flattenIntIndex(reg_idx);
1151 } else if (reg_idx < Ctrl_Base_DepTag) {
1152 reg_type = FloatType;
1153 reg_idx -= FP_Base_DepTag;
1154 return isa[tid].flattenFloatIndex(reg_idx);
1155 } else {
1156 reg_type = MiscType;
1157 return reg_idx - TheISA::Ctrl_Base_DepTag;
1158 }
1159 }
1160
1161 uint64_t
1162 InOrderCPU::readIntReg(RegIndex reg_idx, ThreadID tid)
1163 {
1164 DPRINTF(IntRegs, "[tid:%i]: Reading Int. Reg %i as %x\n",
1165 tid, reg_idx, intRegs[tid][reg_idx]);
1166
1167 return intRegs[tid][reg_idx];
1168 }
1169
1170 FloatReg
1171 InOrderCPU::readFloatReg(RegIndex reg_idx, ThreadID tid)
1172 {
1173 return floatRegs.f[tid][reg_idx];
1174 }
1175
1176 FloatRegBits
1177 InOrderCPU::readFloatRegBits(RegIndex reg_idx, ThreadID tid)
1178 {;
1179 return floatRegs.i[tid][reg_idx];
1180 }
1181
1182 void
1183 InOrderCPU::setIntReg(RegIndex reg_idx, uint64_t val, ThreadID tid)
1184 {
1185 if (reg_idx == TheISA::ZeroReg) {
1186 DPRINTF(IntRegs, "[tid:%i]: Ignoring Setting of ISA-ZeroReg "
1187 "(Int. Reg %i) to %x\n", tid, reg_idx, val);
1188 return;
1189 } else {
1190 DPRINTF(IntRegs, "[tid:%i]: Setting Int. Reg %i to %x\n",
1191 tid, reg_idx, val);
1192
1193 intRegs[tid][reg_idx] = val;
1194 }
1195 }
1196
1197
1198 void
1199 InOrderCPU::setFloatReg(RegIndex reg_idx, FloatReg val, ThreadID tid)
1200 {
1201 floatRegs.f[tid][reg_idx] = val;
1202 }
1203
1204
1205 void
1206 InOrderCPU::setFloatRegBits(RegIndex reg_idx, FloatRegBits val, ThreadID tid)
1207 {
1208 floatRegs.i[tid][reg_idx] = val;
1209 }
1210
1211 uint64_t
1212 InOrderCPU::readRegOtherThread(unsigned reg_idx, ThreadID tid)
1213 {
1214 // If Default value is set, then retrieve target thread
1215 if (tid == InvalidThreadID) {
1216 tid = TheISA::getTargetThread(tcBase(tid));
1217 }
1218
1219 if (reg_idx < FP_Base_DepTag) {
1220 // Integer Register File
1221 return readIntReg(reg_idx, tid);
1222 } else if (reg_idx < Ctrl_Base_DepTag) {
1223 // Float Register File
1224 reg_idx -= FP_Base_DepTag;
1225 return readFloatRegBits(reg_idx, tid);
1226 } else {
1227 reg_idx -= Ctrl_Base_DepTag;
1228 return readMiscReg(reg_idx, tid); // Misc. Register File
1229 }
1230 }
1231 void
1232 InOrderCPU::setRegOtherThread(unsigned reg_idx, const MiscReg &val,
1233 ThreadID tid)
1234 {
1235 // If Default value is set, then retrieve target thread
1236 if (tid == InvalidThreadID) {
1237 tid = TheISA::getTargetThread(tcBase(tid));
1238 }
1239
1240 if (reg_idx < FP_Base_DepTag) { // Integer Register File
1241 setIntReg(reg_idx, val, tid);
1242 } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
1243 reg_idx -= FP_Base_DepTag;
1244 setFloatRegBits(reg_idx, val, tid);
1245 } else {
1246 reg_idx -= Ctrl_Base_DepTag;
1247 setMiscReg(reg_idx, val, tid); // Misc. Register File
1248 }
1249 }
1250
1251 MiscReg
1252 InOrderCPU::readMiscRegNoEffect(int misc_reg, ThreadID tid)
1253 {
1254 return isa[tid].readMiscRegNoEffect(misc_reg);
1255 }
1256
1257 MiscReg
1258 InOrderCPU::readMiscReg(int misc_reg, ThreadID tid)
1259 {
1260 return isa[tid].readMiscReg(misc_reg, tcBase(tid));
1261 }
1262
1263 void
1264 InOrderCPU::setMiscRegNoEffect(int misc_reg, const MiscReg &val, ThreadID tid)
1265 {
1266 isa[tid].setMiscRegNoEffect(misc_reg, val);
1267 }
1268
1269 void
1270 InOrderCPU::setMiscReg(int misc_reg, const MiscReg &val, ThreadID tid)
1271 {
1272 isa[tid].setMiscReg(misc_reg, val, tcBase(tid));
1273 }
1274
1275
1276 InOrderCPU::ListIt
1277 InOrderCPU::addInst(DynInstPtr inst)
1278 {
1279 ThreadID tid = inst->readTid();
1280
1281 instList[tid].push_back(inst);
1282
1283 return --(instList[tid].end());
1284 }
1285
1286 InOrderCPU::ListIt
1287 InOrderCPU::findInst(InstSeqNum seq_num, ThreadID tid)
1288 {
1289 ListIt it = instList[tid].begin();
1290 ListIt end = instList[tid].end();
1291
1292 while (it != end) {
1293 if ((*it)->seqNum == seq_num)
1294 return it;
1295 else if ((*it)->seqNum > seq_num)
1296 break;
1297
1298 it++;
1299 }
1300
1301 return instList[tid].end();
1302 }
1303
1304 void
1305 InOrderCPU::updateContextSwitchStats()
1306 {
1307 // Set Average Stat Here, then reset to 0
1308 instsPerCtxtSwitch = instsPerSwitch;
1309 instsPerSwitch = 0;
1310 }
1311
1312
1313 void
1314 InOrderCPU::instDone(DynInstPtr inst, ThreadID tid)
1315 {
1316 // Set the nextPC to be fetched if this is the last instruction
1317 // committed
1318 // ========
1319 // This contributes to the precise state of the CPU
1320 // which can be used when restoring a thread to the CPU after after any
1321 // type of context switching activity (fork, exception, etc.)
1322 TheISA::PCState comm_pc = inst->pcState();
1323 lastCommittedPC[tid] = comm_pc;
1324 TheISA::advancePC(comm_pc, inst->staticInst);
1325 pcState(comm_pc, tid);
1326
1327 //@todo: may be unnecessary with new-ISA-specific branch handling code
1328 if (inst->isControl()) {
1329 thread[tid]->lastGradIsBranch = true;
1330 thread[tid]->lastBranchPC = inst->pcState();
1331 TheISA::advancePC(thread[tid]->lastBranchPC, inst->staticInst);
1332 } else {
1333 thread[tid]->lastGradIsBranch = false;
1334 }
1335
1336
1337 // Finalize Trace Data For Instruction
1338 if (inst->traceData) {
1339 //inst->traceData->setCycle(curTick());
1340 inst->traceData->setFetchSeq(inst->seqNum);
1341 //inst->traceData->setCPSeq(cpu->tcBase(tid)->numInst);
1342 inst->traceData->dump();
1343 delete inst->traceData;
1344 inst->traceData = NULL;
1345 }
1346
1347 // Increment active thread's instruction count
1348 instsPerSwitch++;
1349
1350 // Increment thread-state's instruction count
1351 thread[tid]->numInst++;
1352
1353 // Increment thread-state's instruction stats
1354 thread[tid]->numInsts++;
1355
1356 // Count committed insts per thread stats
1357 committedInsts[tid]++;
1358
1359 // Count total insts committed stat
1360 totalCommittedInsts++;
1361
1362 // Count SMT-committed insts per thread stat
1363 if (numActiveThreads() > 1) {
1364 smtCommittedInsts[tid]++;
1365 }
1366
1367 // Instruction-Mix Stats
1368 if (inst->isLoad()) {
1369 comLoads++;
1370 } else if (inst->isStore()) {
1371 comStores++;
1372 } else if (inst->isControl()) {
1373 comBranches++;
1374 } else if (inst->isNop()) {
1375 comNops++;
1376 } else if (inst->isNonSpeculative()) {
1377 comNonSpec++;
1378 } else if (inst->isInteger()) {
1379 comInts++;
1380 } else if (inst->isFloating()) {
1381 comFloats++;
1382 }
1383
1384 // Check for instruction-count-based events.
1385 comInstEventQueue[tid]->serviceEvents(thread[tid]->numInst);
1386
1387 // Finally, remove instruction from CPU
1388 removeInst(inst);
1389 }
1390
1391 // currently unused function, but substitute repetitive code w/this function
1392 // call
1393 void
1394 InOrderCPU::addToRemoveList(DynInstPtr inst)
1395 {
1396 removeInstsThisCycle = true;
1397 if (!inst->isRemoveList()) {
1398 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %s "
1399 "[sn:%lli] to remove list\n",
1400 inst->threadNumber, inst->pcState(), inst->seqNum);
1401 inst->setRemoveList();
1402 removeList.push(inst->getInstListIt());
1403 } else {
1404 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %s "
1405 "[sn:%lli], already remove list\n",
1406 inst->threadNumber, inst->pcState(), inst->seqNum);
1407 }
1408
1409 }
1410
1411 void
1412 InOrderCPU::removeInst(DynInstPtr inst)
1413 {
1414 DPRINTF(InOrderCPU, "Removing graduated instruction [tid:%i] PC %s "
1415 "[sn:%lli]\n",
1416 inst->threadNumber, inst->pcState(), inst->seqNum);
1417
1418 removeInstsThisCycle = true;
1419
1420 // Remove the instruction.
1421 if (!inst->isRemoveList()) {
1422 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %s "
1423 "[sn:%lli] to remove list\n",
1424 inst->threadNumber, inst->pcState(), inst->seqNum);
1425 inst->setRemoveList();
1426 removeList.push(inst->getInstListIt());
1427 } else {
1428 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %s "
1429 "[sn:%lli], already on remove list\n",
1430 inst->threadNumber, inst->pcState(), inst->seqNum);
1431 }
1432
1433 }
1434
1435 void
1436 InOrderCPU::removeInstsUntil(const InstSeqNum &seq_num, ThreadID tid)
1437 {
1438 //assert(!instList[tid].empty());
1439
1440 removeInstsThisCycle = true;
1441
1442 ListIt inst_iter = instList[tid].end();
1443
1444 inst_iter--;
1445
1446 DPRINTF(InOrderCPU, "Squashing instructions from CPU instruction "
1447 "list that are from [tid:%i] and above [sn:%lli] (end=%lli).\n",
1448 tid, seq_num, (*inst_iter)->seqNum);
1449
1450 while ((*inst_iter)->seqNum > seq_num) {
1451
1452 bool break_loop = (inst_iter == instList[tid].begin());
1453
1454 squashInstIt(inst_iter, tid);
1455
1456 inst_iter--;
1457
1458 if (break_loop)
1459 break;
1460 }
1461 }
1462
1463
1464 inline void
1465 InOrderCPU::squashInstIt(const ListIt inst_it, ThreadID tid)
1466 {
1467 DynInstPtr inst = (*inst_it);
1468 if (inst->threadNumber == tid) {
1469 DPRINTF(InOrderCPU, "Squashing instruction, "
1470 "[tid:%i] [sn:%lli] PC %s\n",
1471 inst->threadNumber,
1472 inst->seqNum,
1473 inst->pcState());
1474
1475 inst->setSquashed();
1476 archRegDepMap[tid].remove(inst);
1477
1478 if (!inst->isRemoveList()) {
1479 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %s "
1480 "[sn:%lli] to remove list\n",
1481 inst->threadNumber, inst->pcState(),
1482 inst->seqNum);
1483 inst->setRemoveList();
1484 removeList.push(inst_it);
1485 } else {
1486 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i]"
1487 " PC %s [sn:%lli], already on remove list\n",
1488 inst->threadNumber, inst->pcState(),
1489 inst->seqNum);
1490 }
1491
1492 }
1493
1494 }
1495
1496
1497 void
1498 InOrderCPU::cleanUpRemovedInsts()
1499 {
1500 while (!removeList.empty()) {
1501 DPRINTF(InOrderCPU, "Removing instruction, "
1502 "[tid:%i] [sn:%lli] PC %s\n",
1503 (*removeList.front())->threadNumber,
1504 (*removeList.front())->seqNum,
1505 (*removeList.front())->pcState());
1506
1507 DynInstPtr inst = *removeList.front();
1508 ThreadID tid = inst->threadNumber;
1509
1510 // Remove From Register Dependency Map, If Necessary
1511 // archRegDepMap[tid].remove(inst);
1512
1513 // Clear if Non-Speculative
1514 if (inst->staticInst &&
1515 inst->seqNum == nonSpecSeqNum[tid] &&
1516 nonSpecInstActive[tid] == true) {
1517 nonSpecInstActive[tid] = false;
1518 }
1519
1520 inst->onInstList = false;
1521
1522 instList[tid].erase(removeList.front());
1523
1524 removeList.pop();
1525 }
1526
1527 removeInstsThisCycle = false;
1528 }
1529
1530 void
1531 InOrderCPU::cleanUpRemovedEvents()
1532 {
1533 while (!cpuEventRemoveList.empty()) {
1534 Event *cpu_event = cpuEventRemoveList.front();
1535 cpuEventRemoveList.pop();
1536 delete cpu_event;
1537 }
1538 }
1539
1540
1541 void
1542 InOrderCPU::dumpInsts()
1543 {
1544 int num = 0;
1545
1546 ListIt inst_list_it = instList[0].begin();
1547
1548 cprintf("Dumping Instruction List\n");
1549
1550 while (inst_list_it != instList[0].end()) {
1551 cprintf("Instruction:%i\nPC:%s\n[tid:%i]\n[sn:%lli]\nIssued:%i\n"
1552 "Squashed:%i\n\n",
1553 num, (*inst_list_it)->pcState(),
1554 (*inst_list_it)->threadNumber,
1555 (*inst_list_it)->seqNum, (*inst_list_it)->isIssued(),
1556 (*inst_list_it)->isSquashed());
1557 inst_list_it++;
1558 ++num;
1559 }
1560 }
1561
1562 void
1563 InOrderCPU::wakeCPU()
1564 {
1565 if (/*activityRec.active() || */tickEvent.scheduled()) {
1566 DPRINTF(Activity, "CPU already running.\n");
1567 return;
1568 }
1569
1570 DPRINTF(Activity, "Waking up CPU\n");
1571
1572 Tick extra_cycles = tickToCycles((curTick() - 1) - lastRunningCycle);
1573
1574 idleCycles += extra_cycles;
1575 for (int stage_num = 0; stage_num < NumStages; stage_num++) {
1576 pipelineStage[stage_num]->idleCycles += extra_cycles;
1577 }
1578
1579 numCycles += extra_cycles;
1580
1581 schedule(&tickEvent, nextCycle(curTick()));
1582 }
1583
1584 #if FULL_SYSTEM
1585
1586 void
1587 InOrderCPU::wakeup()
1588 {
1589 if (thread[0]->status() != ThreadContext::Suspended)
1590 return;
1591
1592 wakeCPU();
1593
1594 DPRINTF(Quiesce, "Suspended Processor woken\n");
1595 threadContexts[0]->activate();
1596 }
1597 #endif
1598
1599 #if !FULL_SYSTEM
1600 void
1601 InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay)
1602 {
1603 scheduleCpuEvent(Syscall, fault, tid, inst, delay, Syscall_Pri);
1604 }
1605
1606 void
1607 InOrderCPU::syscall(int64_t callnum, ThreadID tid)
1608 {
1609 DPRINTF(InOrderCPU, "[tid:%i] Executing syscall().\n\n", tid);
1610
1611 DPRINTF(Activity,"Activity: syscall() called.\n");
1612
1613 // Temporarily increase this by one to account for the syscall
1614 // instruction.
1615 ++(this->thread[tid]->funcExeInst);
1616
1617 // Execute the actual syscall.
1618 this->thread[tid]->syscall(callnum);
1619
1620 // Decrease funcExeInst by one as the normal commit will handle
1621 // incrementing it.
1622 --(this->thread[tid]->funcExeInst);
1623
1624 // Clear Non-Speculative Block Variable
1625 nonSpecInstActive[tid] = false;
1626 }
1627 #endif
1628
1629 TheISA::TLB*
1630 InOrderCPU::getITBPtr()
1631 {
1632 CacheUnit *itb_res =
1633 dynamic_cast<CacheUnit*>(resPool->getResource(fetchPortIdx));
1634 return itb_res->tlb();
1635 }
1636
1637
1638 TheISA::TLB*
1639 InOrderCPU::getDTBPtr()
1640 {
1641 CacheUnit *dtb_res =
1642 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1643 return dtb_res->tlb();
1644 }
1645
1646 Fault
1647 InOrderCPU::read(DynInstPtr inst, Addr addr,
1648 uint8_t *data, unsigned size, unsigned flags)
1649 {
1650 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1651 // you want to run w/out caches?
1652 CacheUnit *cache_res =
1653 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1654
1655 return cache_res->read(inst, addr, data, size, flags);
1656 }
1657
1658 Fault
1659 InOrderCPU::write(DynInstPtr inst, uint8_t *data, unsigned size,
1660 Addr addr, unsigned flags, uint64_t *write_res)
1661 {
1662 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1663 // you want to run w/out caches?
1664 CacheUnit *cache_res =
1665 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1666 return cache_res->write(inst, data, size, addr, flags, write_res);
1667 }