CPU: Fix O3 and possible InOrder segfaults in FS.
[gem5.git] / src / cpu / inorder / cpu.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <algorithm>
33
34 #include "arch/utility.hh"
35 #include "config/full_system.hh"
36 #include "config/the_isa.hh"
37 #include "cpu/activity.hh"
38 #include "cpu/base.hh"
39 #include "cpu/exetrace.hh"
40 #include "cpu/inorder/cpu.hh"
41 #include "cpu/inorder/first_stage.hh"
42 #include "cpu/inorder/inorder_dyn_inst.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "cpu/inorder/resources/resource_list.hh"
46 #include "cpu/inorder/thread_context.hh"
47 #include "cpu/inorder/thread_state.hh"
48 #include "cpu/simple_thread.hh"
49 #include "cpu/thread_context.hh"
50 #include "mem/translating_port.hh"
51 #include "params/InOrderCPU.hh"
52 #include "sim/process.hh"
53 #include "sim/stat_control.hh"
54
55 #if FULL_SYSTEM
56 #include "cpu/quiesce_event.hh"
57 #include "sim/system.hh"
58 #endif
59
60 #if THE_ISA == ALPHA_ISA
61 #include "arch/alpha/osfpal.hh"
62 #endif
63
64 using namespace std;
65 using namespace TheISA;
66 using namespace ThePipeline;
67
68 InOrderCPU::TickEvent::TickEvent(InOrderCPU *c)
69 : Event(CPU_Tick_Pri), cpu(c)
70 { }
71
72
73 void
74 InOrderCPU::TickEvent::process()
75 {
76 cpu->tick();
77 }
78
79
80 const char *
81 InOrderCPU::TickEvent::description()
82 {
83 return "InOrderCPU tick event";
84 }
85
86 InOrderCPU::CPUEvent::CPUEvent(InOrderCPU *_cpu, CPUEventType e_type,
87 Fault fault, ThreadID _tid, DynInstPtr inst,
88 unsigned event_pri_offset)
89 : Event(Event::Priority((unsigned int)CPU_Tick_Pri + event_pri_offset)),
90 cpu(_cpu)
91 {
92 setEvent(e_type, fault, _tid, inst);
93 }
94
95
96 std::string InOrderCPU::eventNames[NumCPUEvents] =
97 {
98 "ActivateThread",
99 "ActivateNextReadyThread",
100 "DeactivateThread",
101 "HaltThread",
102 "SuspendThread",
103 "Trap",
104 "InstGraduated",
105 "SquashFromMemStall",
106 "UpdatePCs"
107 };
108
109 void
110 InOrderCPU::CPUEvent::process()
111 {
112 switch (cpuEventType)
113 {
114 case ActivateThread:
115 cpu->activateThread(tid);
116 break;
117
118 case ActivateNextReadyThread:
119 cpu->activateNextReadyThread();
120 break;
121
122 case DeactivateThread:
123 cpu->deactivateThread(tid);
124 break;
125
126 case HaltThread:
127 cpu->haltThread(tid);
128 break;
129
130 case SuspendThread:
131 cpu->suspendThread(tid);
132 break;
133
134 case SquashFromMemStall:
135 cpu->squashDueToMemStall(inst->squashingStage, inst->seqNum, tid);
136 break;
137
138 case Trap:
139 cpu->trapCPU(fault, tid, inst);
140 break;
141
142 default:
143 fatal("Unrecognized Event Type %s", eventNames[cpuEventType]);
144 }
145
146 cpu->cpuEventRemoveList.push(this);
147 }
148
149
150
151 const char *
152 InOrderCPU::CPUEvent::description()
153 {
154 return "InOrderCPU event";
155 }
156
157 void
158 InOrderCPU::CPUEvent::scheduleEvent(int delay)
159 {
160 if (squashed())
161 mainEventQueue.reschedule(this, cpu->nextCycle(curTick +
162 cpu->ticks(delay)));
163 else if (!scheduled())
164 mainEventQueue.schedule(this, cpu->nextCycle(curTick +
165 cpu->ticks(delay)));
166 }
167
168 void
169 InOrderCPU::CPUEvent::unscheduleEvent()
170 {
171 if (scheduled())
172 squash();
173 }
174
175 InOrderCPU::InOrderCPU(Params *params)
176 : BaseCPU(params),
177 cpu_id(params->cpu_id),
178 coreType("default"),
179 _status(Idle),
180 tickEvent(this),
181 timeBuffer(2 , 2),
182 removeInstsThisCycle(false),
183 activityRec(params->name, NumStages, 10, params->activity),
184 #if FULL_SYSTEM
185 system(params->system),
186 physmem(system->physmem),
187 #endif // FULL_SYSTEM
188 #ifdef DEBUG
189 cpuEventNum(0),
190 resReqCount(0),
191 #endif // DEBUG
192 switchCount(0),
193 deferRegistration(false/*params->deferRegistration*/),
194 stageTracing(params->stageTracing),
195 instsPerSwitch(0)
196 {
197 ThreadID active_threads;
198 cpu_params = params;
199
200 resPool = new ResourcePool(this, params);
201
202 // Resize for Multithreading CPUs
203 thread.resize(numThreads);
204
205 #if FULL_SYSTEM
206 active_threads = 1;
207 #else
208 active_threads = params->workload.size();
209
210 if (active_threads > MaxThreads) {
211 panic("Workload Size too large. Increase the 'MaxThreads'"
212 "in your InOrder implementation or "
213 "edit your workload size.");
214 }
215
216
217 if (active_threads > 1) {
218 threadModel = (InOrderCPU::ThreadModel) params->threadModel;
219
220 if (threadModel == SMT) {
221 DPRINTF(InOrderCPU, "Setting Thread Model to SMT.\n");
222 } else if (threadModel == SwitchOnCacheMiss) {
223 DPRINTF(InOrderCPU, "Setting Thread Model to "
224 "Switch On Cache Miss\n");
225 }
226
227 } else {
228 threadModel = Single;
229 }
230
231
232
233 #endif
234
235 // Bind the fetch & data ports from the resource pool.
236 fetchPortIdx = resPool->getPortIdx(params->fetchMemPort);
237 if (fetchPortIdx == 0) {
238 fatal("Unable to find port to fetch instructions from.\n");
239 }
240
241 dataPortIdx = resPool->getPortIdx(params->dataMemPort);
242 if (dataPortIdx == 0) {
243 fatal("Unable to find port for data.\n");
244 }
245
246 for (ThreadID tid = 0; tid < numThreads; ++tid) {
247 #if FULL_SYSTEM
248 // SMT is not supported in FS mode yet.
249 assert(numThreads == 1);
250 thread[tid] = new Thread(this, 0);
251 #else
252 if (tid < (ThreadID)params->workload.size()) {
253 DPRINTF(InOrderCPU, "Workload[%i] process is %#x\n",
254 tid, params->workload[tid]->prog_fname);
255 thread[tid] =
256 new Thread(this, tid, params->workload[tid]);
257 } else {
258 //Allocate Empty thread so M5 can use later
259 //when scheduling threads to CPU
260 Process* dummy_proc = params->workload[0];
261 thread[tid] = new Thread(this, tid, dummy_proc);
262 }
263
264 // Eventually set this with parameters...
265 asid[tid] = tid;
266 #endif
267
268 // Setup the TC that will serve as the interface to the threads/CPU.
269 InOrderThreadContext *tc = new InOrderThreadContext;
270 tc->cpu = this;
271 tc->thread = thread[tid];
272
273 // Give the thread the TC.
274 thread[tid]->tc = tc;
275 thread[tid]->setFuncExeInst(0);
276 globalSeqNum[tid] = 1;
277
278 // Add the TC to the CPU's list of TC's.
279 this->threadContexts.push_back(tc);
280 }
281
282 // Initialize TimeBuffer Stage Queues
283 for (int stNum=0; stNum < NumStages - 1; stNum++) {
284 stageQueue[stNum] = new StageQueue(NumStages, NumStages);
285 stageQueue[stNum]->id(stNum);
286 }
287
288
289 // Set Up Pipeline Stages
290 for (int stNum=0; stNum < NumStages; stNum++) {
291 if (stNum == 0)
292 pipelineStage[stNum] = new FirstStage(params, stNum);
293 else
294 pipelineStage[stNum] = new PipelineStage(params, stNum);
295
296 pipelineStage[stNum]->setCPU(this);
297 pipelineStage[stNum]->setActiveThreads(&activeThreads);
298 pipelineStage[stNum]->setTimeBuffer(&timeBuffer);
299
300 // Take Care of 1st/Nth stages
301 if (stNum > 0)
302 pipelineStage[stNum]->setPrevStageQueue(stageQueue[stNum - 1]);
303 if (stNum < NumStages - 1)
304 pipelineStage[stNum]->setNextStageQueue(stageQueue[stNum]);
305 }
306
307 // Initialize thread specific variables
308 for (ThreadID tid = 0; tid < numThreads; tid++) {
309 archRegDepMap[tid].setCPU(this);
310
311 nonSpecInstActive[tid] = false;
312 nonSpecSeqNum[tid] = 0;
313
314 squashSeqNum[tid] = MaxAddr;
315 lastSquashCycle[tid] = 0;
316
317 memset(intRegs[tid], 0, sizeof(intRegs[tid]));
318 memset(floatRegs.i[tid], 0, sizeof(floatRegs.i[tid]));
319 isa[tid].clear();
320
321 isa[tid].expandForMultithreading(numThreads, 1/*numVirtProcs*/);
322
323 // Define dummy instructions and resource requests to be used.
324 dummyInst[tid] = new InOrderDynInst(this,
325 thread[tid],
326 0,
327 tid,
328 asid[tid]);
329
330 dummyReq[tid] = new ResourceRequest(resPool->getResource(0),
331 dummyInst[tid],
332 0,
333 0,
334 0,
335 0);
336 }
337
338 dummyReqInst = new InOrderDynInst(this, NULL, 0, 0, 0);
339 dummyReqInst->setSquashed();
340
341 dummyBufferInst = new InOrderDynInst(this, NULL, 0, 0, 0);
342 dummyBufferInst->setSquashed();
343
344 lastRunningCycle = curTick;
345
346 // Reset CPU to reset state.
347 #if FULL_SYSTEM
348 Fault resetFault = new ResetFault();
349 resetFault->invoke(tcBase());
350 #else
351 reset();
352 #endif
353
354 dummyBufferInst->resetInstCount();
355
356 // Schedule First Tick Event, CPU will reschedule itself from here on out.
357 scheduleTickEvent(0);
358 }
359
360 InOrderCPU::~InOrderCPU()
361 {
362 delete resPool;
363 }
364
365
366 void
367 InOrderCPU::regStats()
368 {
369 /* Register the Resource Pool's stats here.*/
370 resPool->regStats();
371
372 /* Register for each Pipeline Stage */
373 for (int stage_num=0; stage_num < ThePipeline::NumStages; stage_num++) {
374 pipelineStage[stage_num]->regStats();
375 }
376
377 /* Register any of the InOrderCPU's stats here.*/
378 instsPerCtxtSwitch
379 .name(name() + ".instsPerContextSwitch")
380 .desc("Instructions Committed Per Context Switch")
381 .prereq(instsPerCtxtSwitch);
382
383 numCtxtSwitches
384 .name(name() + ".contextSwitches")
385 .desc("Number of context switches");
386
387 comLoads
388 .name(name() + ".comLoads")
389 .desc("Number of Load instructions committed");
390
391 comStores
392 .name(name() + ".comStores")
393 .desc("Number of Store instructions committed");
394
395 comBranches
396 .name(name() + ".comBranches")
397 .desc("Number of Branches instructions committed");
398
399 comNops
400 .name(name() + ".comNops")
401 .desc("Number of Nop instructions committed");
402
403 comNonSpec
404 .name(name() + ".comNonSpec")
405 .desc("Number of Non-Speculative instructions committed");
406
407 comInts
408 .name(name() + ".comInts")
409 .desc("Number of Integer instructions committed");
410
411 comFloats
412 .name(name() + ".comFloats")
413 .desc("Number of Floating Point instructions committed");
414
415 timesIdled
416 .name(name() + ".timesIdled")
417 .desc("Number of times that the entire CPU went into an idle state and"
418 " unscheduled itself")
419 .prereq(timesIdled);
420
421 idleCycles
422 .name(name() + ".idleCycles")
423 .desc("Number of cycles cpu's stages were not processed");
424
425 runCycles
426 .name(name() + ".runCycles")
427 .desc("Number of cycles cpu stages are processed.");
428
429 activity
430 .name(name() + ".activity")
431 .desc("Percentage of cycles cpu is active")
432 .precision(6);
433 activity = (runCycles / numCycles) * 100;
434
435 threadCycles
436 .init(numThreads)
437 .name(name() + ".threadCycles")
438 .desc("Total Number of Cycles A Thread Was Active in CPU (Per-Thread)");
439
440 smtCycles
441 .name(name() + ".smtCycles")
442 .desc("Total number of cycles that the CPU was in SMT-mode");
443
444 committedInsts
445 .init(numThreads)
446 .name(name() + ".committedInsts")
447 .desc("Number of Instructions Simulated (Per-Thread)");
448
449 smtCommittedInsts
450 .init(numThreads)
451 .name(name() + ".smtCommittedInsts")
452 .desc("Number of SMT Instructions Simulated (Per-Thread)");
453
454 totalCommittedInsts
455 .name(name() + ".committedInsts_total")
456 .desc("Number of Instructions Simulated (Total)");
457
458 cpi
459 .name(name() + ".cpi")
460 .desc("CPI: Cycles Per Instruction (Per-Thread)")
461 .precision(6);
462 cpi = numCycles / committedInsts;
463
464 smtCpi
465 .name(name() + ".smt_cpi")
466 .desc("CPI: Total SMT-CPI")
467 .precision(6);
468 smtCpi = smtCycles / smtCommittedInsts;
469
470 totalCpi
471 .name(name() + ".cpi_total")
472 .desc("CPI: Total CPI of All Threads")
473 .precision(6);
474 totalCpi = numCycles / totalCommittedInsts;
475
476 ipc
477 .name(name() + ".ipc")
478 .desc("IPC: Instructions Per Cycle (Per-Thread)")
479 .precision(6);
480 ipc = committedInsts / numCycles;
481
482 smtIpc
483 .name(name() + ".smt_ipc")
484 .desc("IPC: Total SMT-IPC")
485 .precision(6);
486 smtIpc = smtCommittedInsts / smtCycles;
487
488 totalIpc
489 .name(name() + ".ipc_total")
490 .desc("IPC: Total IPC of All Threads")
491 .precision(6);
492 totalIpc = totalCommittedInsts / numCycles;
493
494 BaseCPU::regStats();
495 }
496
497
498 void
499 InOrderCPU::tick()
500 {
501 DPRINTF(InOrderCPU, "\n\nInOrderCPU: Ticking main, InOrderCPU.\n");
502
503 ++numCycles;
504
505 bool pipes_idle = true;
506
507 //Tick each of the stages
508 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
509 pipelineStage[stNum]->tick();
510
511 pipes_idle = pipes_idle && pipelineStage[stNum]->idle;
512 }
513
514 if (pipes_idle)
515 idleCycles++;
516 else
517 runCycles++;
518
519 // Now advance the time buffers one tick
520 timeBuffer.advance();
521 for (int sqNum=0; sqNum < NumStages - 1; sqNum++) {
522 stageQueue[sqNum]->advance();
523 }
524 activityRec.advance();
525
526 // Any squashed requests, events, or insts then remove them now
527 cleanUpRemovedReqs();
528 cleanUpRemovedEvents();
529 cleanUpRemovedInsts();
530
531 // Re-schedule CPU for this cycle
532 if (!tickEvent.scheduled()) {
533 if (_status == SwitchedOut) {
534 // increment stat
535 lastRunningCycle = curTick;
536 } else if (!activityRec.active()) {
537 DPRINTF(InOrderCPU, "sleeping CPU.\n");
538 lastRunningCycle = curTick;
539 timesIdled++;
540 } else {
541 //Tick next_tick = curTick + cycles(1);
542 //tickEvent.schedule(next_tick);
543 mainEventQueue.schedule(&tickEvent, nextCycle(curTick + 1));
544 DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
545 nextCycle(curTick + 1));
546 }
547 }
548
549 tickThreadStats();
550 updateThreadPriority();
551 }
552
553
554 void
555 InOrderCPU::init()
556 {
557 if (!deferRegistration) {
558 registerThreadContexts();
559 }
560
561 // Set inSyscall so that the CPU doesn't squash when initially
562 // setting up registers.
563 for (ThreadID tid = 0; tid < numThreads; ++tid)
564 thread[tid]->inSyscall = true;
565
566 #if FULL_SYSTEM
567 for (ThreadID tid = 0; tid < numThreads; tid++) {
568 ThreadContext *src_tc = threadContexts[tid];
569 TheISA::initCPU(src_tc, src_tc->contextId());
570 }
571 #endif
572
573 // Clear inSyscall.
574 for (ThreadID tid = 0; tid < numThreads; ++tid)
575 thread[tid]->inSyscall = false;
576
577 // Call Initializiation Routine for Resource Pool
578 resPool->init();
579 }
580
581 void
582 InOrderCPU::reset()
583 {
584 for (int i = 0; i < numThreads; i++) {
585 isa[i].reset(coreType, numThreads,
586 1/*numVirtProcs*/, dynamic_cast<BaseCPU*>(this));
587 }
588 }
589
590 Port*
591 InOrderCPU::getPort(const std::string &if_name, int idx)
592 {
593 return resPool->getPort(if_name, idx);
594 }
595
596 #if FULL_SYSTEM
597 Fault
598 InOrderCPU::hwrei(ThreadID tid)
599 {
600 panic("hwrei: Unimplemented");
601
602 return NoFault;
603 }
604
605
606 bool
607 InOrderCPU::simPalCheck(int palFunc, ThreadID tid)
608 {
609 panic("simPalCheck: Unimplemented");
610
611 return true;
612 }
613
614
615 Fault
616 InOrderCPU::getInterrupts()
617 {
618 // Check if there are any outstanding interrupts
619 return this->interrupts->getInterrupt(this->threadContexts[0]);
620 }
621
622
623 void
624 InOrderCPU::processInterrupts(Fault interrupt)
625 {
626 // Check for interrupts here. For now can copy the code that
627 // exists within isa_fullsys_traits.hh. Also assume that thread 0
628 // is the one that handles the interrupts.
629 // @todo: Possibly consolidate the interrupt checking code.
630 // @todo: Allow other threads to handle interrupts.
631
632 assert(interrupt != NoFault);
633 this->interrupts->updateIntrInfo(this->threadContexts[0]);
634
635 DPRINTF(InOrderCPU, "Interrupt %s being handled\n", interrupt->name());
636 static StaticInstPtr dummyStatic(TheISA::NoopMachInst, 0);
637 static DynInstPtr dummyDyn = new Impl::DynInst(dummyStatic);
638 this->trap(interrupt, dummyDyn);
639 }
640
641
642 void
643 InOrderCPU::updateMemPorts()
644 {
645 // Update all ThreadContext's memory ports (Functional/Virtual
646 // Ports)
647 ThreadID size = thread.size();
648 for (ThreadID i = 0; i < size; ++i)
649 thread[i]->connectMemPorts(thread[i]->getTC());
650 }
651 #endif
652
653 void
654 InOrderCPU::trap(Fault fault, ThreadID tid, DynInstPtr inst, int delay)
655 {
656 //@ Squash Pipeline during TRAP
657 scheduleCpuEvent(Trap, fault, tid, inst, delay);
658 }
659
660 void
661 InOrderCPU::trapCPU(Fault fault, ThreadID tid, DynInstPtr inst)
662 {
663 fault->invoke(tcBase(tid), inst->staticInst);
664 }
665
666 void
667 InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay)
668 {
669 scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay);
670 }
671
672
673 void
674 InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num,
675 ThreadID tid)
676 {
677 DPRINTF(InOrderCPU, "Squashing Pipeline Stages Due to Memory Stall...\n");
678
679 // Squash all instructions in each stage including
680 // instruction that caused the squash (seq_num - 1)
681 // NOTE: The stage bandwidth needs to be cleared so thats why
682 // the stalling instruction is squashed as well. The stalled
683 // instruction is previously placed in another intermediate buffer
684 // while it's stall is being handled.
685 InstSeqNum squash_seq_num = seq_num - 1;
686
687 for (int stNum=stage_num; stNum >= 0 ; stNum--) {
688 pipelineStage[stNum]->squashDueToMemStall(squash_seq_num, tid);
689 }
690 }
691
692 void
693 InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
694 ThreadID tid, DynInstPtr inst,
695 unsigned delay, unsigned event_pri_offset)
696 {
697 CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
698 event_pri_offset);
699
700 Tick sked_tick = nextCycle(curTick + ticks(delay));
701 if (delay >= 0) {
702 DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
703 eventNames[c_event], curTick + delay, tid);
704 mainEventQueue.schedule(cpu_event, sked_tick);
705 } else {
706 cpu_event->process();
707 cpuEventRemoveList.push(cpu_event);
708 }
709
710 // Broadcast event to the Resource Pool
711 // Need to reset tid just in case this is a dummy instruction
712 inst->setTid(tid);
713 resPool->scheduleEvent(c_event, inst, 0, 0, tid);
714 }
715
716 bool
717 InOrderCPU::isThreadActive(ThreadID tid)
718 {
719 list<ThreadID>::iterator isActive =
720 std::find(activeThreads.begin(), activeThreads.end(), tid);
721
722 return (isActive != activeThreads.end());
723 }
724
725 bool
726 InOrderCPU::isThreadReady(ThreadID tid)
727 {
728 list<ThreadID>::iterator isReady =
729 std::find(readyThreads.begin(), readyThreads.end(), tid);
730
731 return (isReady != readyThreads.end());
732 }
733
734 bool
735 InOrderCPU::isThreadSuspended(ThreadID tid)
736 {
737 list<ThreadID>::iterator isSuspended =
738 std::find(suspendedThreads.begin(), suspendedThreads.end(), tid);
739
740 return (isSuspended != suspendedThreads.end());
741 }
742
743 void
744 InOrderCPU::activateNextReadyThread()
745 {
746 if (readyThreads.size() >= 1) {
747 ThreadID ready_tid = readyThreads.front();
748
749 // Activate in Pipeline
750 activateThread(ready_tid);
751
752 // Activate in Resource Pool
753 resPool->activateAll(ready_tid);
754
755 list<ThreadID>::iterator ready_it =
756 std::find(readyThreads.begin(), readyThreads.end(), ready_tid);
757 readyThreads.erase(ready_it);
758 } else {
759 DPRINTF(InOrderCPU,
760 "Attempting to activate new thread, but No Ready Threads to"
761 "activate.\n");
762 DPRINTF(InOrderCPU,
763 "Unable to switch to next active thread.\n");
764 }
765 }
766
767 void
768 InOrderCPU::activateThread(ThreadID tid)
769 {
770 if (isThreadSuspended(tid)) {
771 DPRINTF(InOrderCPU,
772 "Removing [tid:%i] from suspended threads list.\n", tid);
773
774 list<ThreadID>::iterator susp_it =
775 std::find(suspendedThreads.begin(), suspendedThreads.end(),
776 tid);
777 suspendedThreads.erase(susp_it);
778 }
779
780 if (threadModel == SwitchOnCacheMiss &&
781 numActiveThreads() == 1) {
782 DPRINTF(InOrderCPU,
783 "Ignoring activation of [tid:%i], since [tid:%i] is "
784 "already running.\n", tid, activeThreadId());
785
786 DPRINTF(InOrderCPU,"Placing [tid:%i] on ready threads list\n",
787 tid);
788
789 readyThreads.push_back(tid);
790
791 } else if (!isThreadActive(tid)) {
792 DPRINTF(InOrderCPU,
793 "Adding [tid:%i] to active threads list.\n", tid);
794 activeThreads.push_back(tid);
795
796 activateThreadInPipeline(tid);
797
798 thread[tid]->lastActivate = curTick;
799
800 tcBase(tid)->setStatus(ThreadContext::Active);
801
802 wakeCPU();
803
804 numCtxtSwitches++;
805 }
806 }
807
808 void
809 InOrderCPU::activateThreadInPipeline(ThreadID tid)
810 {
811 for (int stNum=0; stNum < NumStages; stNum++) {
812 pipelineStage[stNum]->activateThread(tid);
813 }
814 }
815
816 void
817 InOrderCPU::deactivateContext(ThreadID tid, int delay)
818 {
819 DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid);
820
821 scheduleCpuEvent(DeactivateThread, NoFault, tid, dummyInst[tid], delay);
822
823 // Be sure to signal that there's some activity so the CPU doesn't
824 // deschedule itself.
825 activityRec.activity();
826
827 _status = Running;
828 }
829
830 void
831 InOrderCPU::deactivateThread(ThreadID tid)
832 {
833 DPRINTF(InOrderCPU, "[tid:%i]: Calling deactivate thread.\n", tid);
834
835 if (isThreadActive(tid)) {
836 DPRINTF(InOrderCPU,"[tid:%i]: Removing from active threads list\n",
837 tid);
838 list<ThreadID>::iterator thread_it =
839 std::find(activeThreads.begin(), activeThreads.end(), tid);
840
841 removePipelineStalls(*thread_it);
842
843 activeThreads.erase(thread_it);
844
845 // Ideally, this should be triggered from the
846 // suspendContext/Thread functions
847 tcBase(tid)->setStatus(ThreadContext::Suspended);
848 }
849
850 assert(!isThreadActive(tid));
851 }
852
853 void
854 InOrderCPU::removePipelineStalls(ThreadID tid)
855 {
856 DPRINTF(InOrderCPU,"[tid:%i]: Removing all pipeline stalls\n",
857 tid);
858
859 for (int stNum = 0; stNum < NumStages ; stNum++) {
860 pipelineStage[stNum]->removeStalls(tid);
861 }
862
863 }
864
865 void
866 InOrderCPU::updateThreadPriority()
867 {
868 if (activeThreads.size() > 1)
869 {
870 //DEFAULT TO ROUND ROBIN SCHEME
871 //e.g. Move highest priority to end of thread list
872 list<ThreadID>::iterator list_begin = activeThreads.begin();
873 list<ThreadID>::iterator list_end = activeThreads.end();
874
875 unsigned high_thread = *list_begin;
876
877 activeThreads.erase(list_begin);
878
879 activeThreads.push_back(high_thread);
880 }
881 }
882
883 inline void
884 InOrderCPU::tickThreadStats()
885 {
886 /** Keep track of cycles that each thread is active */
887 list<ThreadID>::iterator thread_it = activeThreads.begin();
888 while (thread_it != activeThreads.end()) {
889 threadCycles[*thread_it]++;
890 thread_it++;
891 }
892
893 // Keep track of cycles where SMT is active
894 if (activeThreads.size() > 1) {
895 smtCycles++;
896 }
897 }
898
899 void
900 InOrderCPU::activateContext(ThreadID tid, int delay)
901 {
902 DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
903
904
905 scheduleCpuEvent(ActivateThread, NoFault, tid, dummyInst[tid], delay);
906
907 // Be sure to signal that there's some activity so the CPU doesn't
908 // deschedule itself.
909 activityRec.activity();
910
911 _status = Running;
912 }
913
914 void
915 InOrderCPU::activateNextReadyContext(int delay)
916 {
917 DPRINTF(InOrderCPU,"Activating next ready thread\n");
918
919 // NOTE: Add 5 to the event priority so that we always activate
920 // threads after we've finished deactivating, squashing,etc.
921 // other threads
922 scheduleCpuEvent(ActivateNextReadyThread, NoFault, 0/*tid*/, dummyInst[0],
923 delay, 5);
924
925 // Be sure to signal that there's some activity so the CPU doesn't
926 // deschedule itself.
927 activityRec.activity();
928
929 _status = Running;
930 }
931
932 void
933 InOrderCPU::haltContext(ThreadID tid, int delay)
934 {
935 DPRINTF(InOrderCPU, "[tid:%i]: Calling Halt Context...\n", tid);
936
937 scheduleCpuEvent(HaltThread, NoFault, tid, dummyInst[tid], delay);
938
939 activityRec.activity();
940 }
941
942 void
943 InOrderCPU::haltThread(ThreadID tid)
944 {
945 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Halted Threads List...\n", tid);
946 deactivateThread(tid);
947 squashThreadInPipeline(tid);
948 haltedThreads.push_back(tid);
949
950 tcBase(tid)->setStatus(ThreadContext::Halted);
951
952 if (threadModel == SwitchOnCacheMiss) {
953 activateNextReadyContext();
954 }
955 }
956
957 void
958 InOrderCPU::suspendContext(ThreadID tid, int delay)
959 {
960 scheduleCpuEvent(SuspendThread, NoFault, tid, dummyInst[tid], delay);
961 }
962
963 void
964 InOrderCPU::suspendThread(ThreadID tid)
965 {
966 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Suspended Threads List...\n",
967 tid);
968 deactivateThread(tid);
969 suspendedThreads.push_back(tid);
970 thread[tid]->lastSuspend = curTick;
971
972 tcBase(tid)->setStatus(ThreadContext::Suspended);
973 }
974
975 void
976 InOrderCPU::squashThreadInPipeline(ThreadID tid)
977 {
978 //Squash all instructions in each stage
979 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
980 pipelineStage[stNum]->squash(0 /*seq_num*/, tid);
981 }
982 }
983
984 PipelineStage*
985 InOrderCPU::getPipeStage(int stage_num)
986 {
987 return pipelineStage[stage_num];
988 }
989
990 uint64_t
991 InOrderCPU::readPC(ThreadID tid)
992 {
993 return PC[tid];
994 }
995
996
997 void
998 InOrderCPU::setPC(Addr new_PC, ThreadID tid)
999 {
1000 PC[tid] = new_PC;
1001 }
1002
1003
1004 uint64_t
1005 InOrderCPU::readNextPC(ThreadID tid)
1006 {
1007 return nextPC[tid];
1008 }
1009
1010
1011 void
1012 InOrderCPU::setNextPC(uint64_t new_NPC, ThreadID tid)
1013 {
1014 nextPC[tid] = new_NPC;
1015 }
1016
1017
1018 uint64_t
1019 InOrderCPU::readNextNPC(ThreadID tid)
1020 {
1021 return nextNPC[tid];
1022 }
1023
1024
1025 void
1026 InOrderCPU::setNextNPC(uint64_t new_NNPC, ThreadID tid)
1027 {
1028 nextNPC[tid] = new_NNPC;
1029 }
1030
1031 uint64_t
1032 InOrderCPU::readIntReg(int reg_idx, ThreadID tid)
1033 {
1034 return intRegs[tid][reg_idx];
1035 }
1036
1037 FloatReg
1038 InOrderCPU::readFloatReg(int reg_idx, ThreadID tid)
1039 {
1040 return floatRegs.f[tid][reg_idx];
1041 }
1042
1043 FloatRegBits
1044 InOrderCPU::readFloatRegBits(int reg_idx, ThreadID tid)
1045 {;
1046 return floatRegs.i[tid][reg_idx];
1047 }
1048
1049 void
1050 InOrderCPU::setIntReg(int reg_idx, uint64_t val, ThreadID tid)
1051 {
1052 intRegs[tid][reg_idx] = val;
1053 }
1054
1055
1056 void
1057 InOrderCPU::setFloatReg(int reg_idx, FloatReg val, ThreadID tid)
1058 {
1059 floatRegs.f[tid][reg_idx] = val;
1060 }
1061
1062
1063 void
1064 InOrderCPU::setFloatRegBits(int reg_idx, FloatRegBits val, ThreadID tid)
1065 {
1066 floatRegs.i[tid][reg_idx] = val;
1067 }
1068
1069 uint64_t
1070 InOrderCPU::readRegOtherThread(unsigned reg_idx, ThreadID tid)
1071 {
1072 // If Default value is set, then retrieve target thread
1073 if (tid == InvalidThreadID) {
1074 tid = TheISA::getTargetThread(tcBase(tid));
1075 }
1076
1077 if (reg_idx < FP_Base_DepTag) {
1078 // Integer Register File
1079 return readIntReg(reg_idx, tid);
1080 } else if (reg_idx < Ctrl_Base_DepTag) {
1081 // Float Register File
1082 reg_idx -= FP_Base_DepTag;
1083 return readFloatRegBits(reg_idx, tid);
1084 } else {
1085 reg_idx -= Ctrl_Base_DepTag;
1086 return readMiscReg(reg_idx, tid); // Misc. Register File
1087 }
1088 }
1089 void
1090 InOrderCPU::setRegOtherThread(unsigned reg_idx, const MiscReg &val,
1091 ThreadID tid)
1092 {
1093 // If Default value is set, then retrieve target thread
1094 if (tid == InvalidThreadID) {
1095 tid = TheISA::getTargetThread(tcBase(tid));
1096 }
1097
1098 if (reg_idx < FP_Base_DepTag) { // Integer Register File
1099 setIntReg(reg_idx, val, tid);
1100 } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
1101 reg_idx -= FP_Base_DepTag;
1102 setFloatRegBits(reg_idx, val, tid);
1103 } else {
1104 reg_idx -= Ctrl_Base_DepTag;
1105 setMiscReg(reg_idx, val, tid); // Misc. Register File
1106 }
1107 }
1108
1109 MiscReg
1110 InOrderCPU::readMiscRegNoEffect(int misc_reg, ThreadID tid)
1111 {
1112 return isa[tid].readMiscRegNoEffect(misc_reg);
1113 }
1114
1115 MiscReg
1116 InOrderCPU::readMiscReg(int misc_reg, ThreadID tid)
1117 {
1118 return isa[tid].readMiscReg(misc_reg, tcBase(tid));
1119 }
1120
1121 void
1122 InOrderCPU::setMiscRegNoEffect(int misc_reg, const MiscReg &val, ThreadID tid)
1123 {
1124 isa[tid].setMiscRegNoEffect(misc_reg, val);
1125 }
1126
1127 void
1128 InOrderCPU::setMiscReg(int misc_reg, const MiscReg &val, ThreadID tid)
1129 {
1130 isa[tid].setMiscReg(misc_reg, val, tcBase(tid));
1131 }
1132
1133
1134 InOrderCPU::ListIt
1135 InOrderCPU::addInst(DynInstPtr &inst)
1136 {
1137 ThreadID tid = inst->readTid();
1138
1139 instList[tid].push_back(inst);
1140
1141 return --(instList[tid].end());
1142 }
1143
1144 void
1145 InOrderCPU::updateContextSwitchStats()
1146 {
1147 // Set Average Stat Here, then reset to 0
1148 instsPerCtxtSwitch = instsPerSwitch;
1149 instsPerSwitch = 0;
1150 }
1151
1152
1153 void
1154 InOrderCPU::instDone(DynInstPtr inst, ThreadID tid)
1155 {
1156 // Set the CPU's PCs - This contributes to the precise state of the CPU
1157 // which can be used when restoring a thread to the CPU after after any
1158 // type of context switching activity (fork, exception, etc.)
1159 setPC(inst->readPC(), tid);
1160 setNextPC(inst->readNextPC(), tid);
1161 setNextNPC(inst->readNextNPC(), tid);
1162
1163 if (inst->isControl()) {
1164 thread[tid]->lastGradIsBranch = true;
1165 thread[tid]->lastBranchPC = inst->readPC();
1166 thread[tid]->lastBranchNextPC = inst->readNextPC();
1167 thread[tid]->lastBranchNextNPC = inst->readNextNPC();
1168 } else {
1169 thread[tid]->lastGradIsBranch = false;
1170 }
1171
1172
1173 // Finalize Trace Data For Instruction
1174 if (inst->traceData) {
1175 //inst->traceData->setCycle(curTick);
1176 inst->traceData->setFetchSeq(inst->seqNum);
1177 //inst->traceData->setCPSeq(cpu->tcBase(tid)->numInst);
1178 inst->traceData->dump();
1179 delete inst->traceData;
1180 inst->traceData = NULL;
1181 }
1182
1183 // Increment active thread's instruction count
1184 instsPerSwitch++;
1185
1186 // Increment thread-state's instruction count
1187 thread[tid]->numInst++;
1188
1189 // Increment thread-state's instruction stats
1190 thread[tid]->numInsts++;
1191
1192 // Count committed insts per thread stats
1193 committedInsts[tid]++;
1194
1195 // Count total insts committed stat
1196 totalCommittedInsts++;
1197
1198 // Count SMT-committed insts per thread stat
1199 if (numActiveThreads() > 1) {
1200 smtCommittedInsts[tid]++;
1201 }
1202
1203 // Instruction-Mix Stats
1204 if (inst->isLoad()) {
1205 comLoads++;
1206 } else if (inst->isStore()) {
1207 comStores++;
1208 } else if (inst->isControl()) {
1209 comBranches++;
1210 } else if (inst->isNop()) {
1211 comNops++;
1212 } else if (inst->isNonSpeculative()) {
1213 comNonSpec++;
1214 } else if (inst->isInteger()) {
1215 comInts++;
1216 } else if (inst->isFloating()) {
1217 comFloats++;
1218 }
1219
1220 // Check for instruction-count-based events.
1221 comInstEventQueue[tid]->serviceEvents(thread[tid]->numInst);
1222
1223 // Broadcast to other resources an instruction
1224 // has been completed
1225 resPool->scheduleEvent((CPUEventType)ResourcePool::InstGraduated, inst,
1226 0, 0, tid);
1227
1228 // Finally, remove instruction from CPU
1229 removeInst(inst);
1230 }
1231
1232 // currently unused function, but substitute repetitive code w/this function
1233 // call
1234 void
1235 InOrderCPU::addToRemoveList(DynInstPtr &inst)
1236 {
1237 removeInstsThisCycle = true;
1238 if (!inst->isRemoveList()) {
1239 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %#x "
1240 "[sn:%lli] to remove list\n",
1241 inst->threadNumber, inst->readPC(), inst->seqNum);
1242 inst->setRemoveList();
1243 removeList.push(inst->getInstListIt());
1244 } else {
1245 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %#x "
1246 "[sn:%lli], already remove list\n",
1247 inst->threadNumber, inst->readPC(), inst->seqNum);
1248 }
1249
1250 }
1251
1252 void
1253 InOrderCPU::removeInst(DynInstPtr &inst)
1254 {
1255 DPRINTF(InOrderCPU, "Removing graduated instruction [tid:%i] PC %#x "
1256 "[sn:%lli]\n",
1257 inst->threadNumber, inst->readPC(), inst->seqNum);
1258
1259 removeInstsThisCycle = true;
1260
1261 // Remove the instruction.
1262 if (!inst->isRemoveList()) {
1263 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %#x "
1264 "[sn:%lli] to remove list\n",
1265 inst->threadNumber, inst->readPC(), inst->seqNum);
1266 inst->setRemoveList();
1267 removeList.push(inst->getInstListIt());
1268 } else {
1269 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %#x "
1270 "[sn:%lli], already on remove list\n",
1271 inst->threadNumber, inst->readPC(), inst->seqNum);
1272 }
1273
1274 }
1275
1276 void
1277 InOrderCPU::removeInstsUntil(const InstSeqNum &seq_num, ThreadID tid)
1278 {
1279 //assert(!instList[tid].empty());
1280
1281 removeInstsThisCycle = true;
1282
1283 ListIt inst_iter = instList[tid].end();
1284
1285 inst_iter--;
1286
1287 DPRINTF(InOrderCPU, "Squashing instructions from CPU instruction "
1288 "list that are from [tid:%i] and above [sn:%lli] (end=%lli).\n",
1289 tid, seq_num, (*inst_iter)->seqNum);
1290
1291 while ((*inst_iter)->seqNum > seq_num) {
1292
1293 bool break_loop = (inst_iter == instList[tid].begin());
1294
1295 squashInstIt(inst_iter, tid);
1296
1297 inst_iter--;
1298
1299 if (break_loop)
1300 break;
1301 }
1302 }
1303
1304
1305 inline void
1306 InOrderCPU::squashInstIt(const ListIt &instIt, ThreadID tid)
1307 {
1308 if ((*instIt)->threadNumber == tid) {
1309 DPRINTF(InOrderCPU, "Squashing instruction, "
1310 "[tid:%i] [sn:%lli] PC %#x\n",
1311 (*instIt)->threadNumber,
1312 (*instIt)->seqNum,
1313 (*instIt)->readPC());
1314
1315 (*instIt)->setSquashed();
1316
1317 if (!(*instIt)->isRemoveList()) {
1318 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %#x "
1319 "[sn:%lli] to remove list\n",
1320 (*instIt)->threadNumber, (*instIt)->readPC(),
1321 (*instIt)->seqNum);
1322 (*instIt)->setRemoveList();
1323 removeList.push(instIt);
1324 } else {
1325 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i]"
1326 " PC %#x [sn:%lli], already on remove list\n",
1327 (*instIt)->threadNumber, (*instIt)->readPC(),
1328 (*instIt)->seqNum);
1329 }
1330
1331 }
1332
1333 }
1334
1335
1336 void
1337 InOrderCPU::cleanUpRemovedInsts()
1338 {
1339 while (!removeList.empty()) {
1340 DPRINTF(InOrderCPU, "Removing instruction, "
1341 "[tid:%i] [sn:%lli] PC %#x\n",
1342 (*removeList.front())->threadNumber,
1343 (*removeList.front())->seqNum,
1344 (*removeList.front())->readPC());
1345
1346 DynInstPtr inst = *removeList.front();
1347 ThreadID tid = inst->threadNumber;
1348
1349 // Make Sure Resource Schedule Is Emptied Out
1350 ThePipeline::ResSchedule *inst_sched = &inst->resSched;
1351 while (!inst_sched->empty()) {
1352 ScheduleEntry* sch_entry = inst_sched->top();
1353 inst_sched->pop();
1354 delete sch_entry;
1355 }
1356
1357 // Remove From Register Dependency Map, If Necessary
1358 archRegDepMap[(*removeList.front())->threadNumber].
1359 remove((*removeList.front()));
1360
1361
1362 // Clear if Non-Speculative
1363 if (inst->staticInst &&
1364 inst->seqNum == nonSpecSeqNum[tid] &&
1365 nonSpecInstActive[tid] == true) {
1366 nonSpecInstActive[tid] = false;
1367 }
1368
1369 instList[tid].erase(removeList.front());
1370
1371 removeList.pop();
1372 }
1373
1374 removeInstsThisCycle = false;
1375 }
1376
1377 void
1378 InOrderCPU::cleanUpRemovedReqs()
1379 {
1380 while (!reqRemoveList.empty()) {
1381 ResourceRequest *res_req = reqRemoveList.front();
1382
1383 DPRINTF(RefCount, "[tid:%i] [sn:%lli]: Removing Request "
1384 "[stage_num:%i] [res:%s] [slot:%i] [completed:%i].\n",
1385 res_req->inst->threadNumber,
1386 res_req->inst->seqNum,
1387 res_req->getStageNum(),
1388 res_req->res->name(),
1389 (res_req->isCompleted()) ?
1390 res_req->getComplSlot() : res_req->getSlot(),
1391 res_req->isCompleted());
1392
1393 reqRemoveList.pop();
1394
1395 delete res_req;
1396 }
1397 }
1398
1399 void
1400 InOrderCPU::cleanUpRemovedEvents()
1401 {
1402 while (!cpuEventRemoveList.empty()) {
1403 Event *cpu_event = cpuEventRemoveList.front();
1404 cpuEventRemoveList.pop();
1405 delete cpu_event;
1406 }
1407 }
1408
1409
1410 void
1411 InOrderCPU::dumpInsts()
1412 {
1413 int num = 0;
1414
1415 ListIt inst_list_it = instList[0].begin();
1416
1417 cprintf("Dumping Instruction List\n");
1418
1419 while (inst_list_it != instList[0].end()) {
1420 cprintf("Instruction:%i\nPC:%#x\n[tid:%i]\n[sn:%lli]\nIssued:%i\n"
1421 "Squashed:%i\n\n",
1422 num, (*inst_list_it)->readPC(), (*inst_list_it)->threadNumber,
1423 (*inst_list_it)->seqNum, (*inst_list_it)->isIssued(),
1424 (*inst_list_it)->isSquashed());
1425 inst_list_it++;
1426 ++num;
1427 }
1428 }
1429
1430 void
1431 InOrderCPU::wakeCPU()
1432 {
1433 if (/*activityRec.active() || */tickEvent.scheduled()) {
1434 DPRINTF(Activity, "CPU already running.\n");
1435 return;
1436 }
1437
1438 DPRINTF(Activity, "Waking up CPU\n");
1439
1440 Tick extra_cycles = tickToCycles((curTick - 1) - lastRunningCycle);
1441
1442 idleCycles += extra_cycles;
1443 for (int stage_num = 0; stage_num < NumStages; stage_num++) {
1444 pipelineStage[stage_num]->idleCycles += extra_cycles;
1445 }
1446
1447 numCycles += extra_cycles;
1448
1449 mainEventQueue.schedule(&tickEvent, nextCycle(curTick));
1450 }
1451
1452 #if FULL_SYSTEM
1453
1454 void
1455 InOrderCPU::wakeup()
1456 {
1457 if (this->thread[0]->status() != ThreadContext::Suspended)
1458 return;
1459
1460 this->wakeCPU();
1461
1462 DPRINTF(Quiesce, "Suspended Processor woken\n");
1463 this->threadContexts[0]->activate();
1464 }
1465 #endif
1466
1467 #if !FULL_SYSTEM
1468 void
1469 InOrderCPU::syscall(int64_t callnum, ThreadID tid)
1470 {
1471 DPRINTF(InOrderCPU, "[tid:%i] Executing syscall().\n\n", tid);
1472
1473 DPRINTF(Activity,"Activity: syscall() called.\n");
1474
1475 // Temporarily increase this by one to account for the syscall
1476 // instruction.
1477 ++(this->thread[tid]->funcExeInst);
1478
1479 // Execute the actual syscall.
1480 this->thread[tid]->syscall(callnum);
1481
1482 // Decrease funcExeInst by one as the normal commit will handle
1483 // incrementing it.
1484 --(this->thread[tid]->funcExeInst);
1485
1486 // Clear Non-Speculative Block Variable
1487 nonSpecInstActive[tid] = false;
1488 }
1489 #endif
1490
1491 void
1492 InOrderCPU::prefetch(DynInstPtr inst)
1493 {
1494 Resource *mem_res = resPool->getResource(dataPortIdx);
1495 return mem_res->prefetch(inst);
1496 }
1497
1498 void
1499 InOrderCPU::writeHint(DynInstPtr inst)
1500 {
1501 Resource *mem_res = resPool->getResource(dataPortIdx);
1502 return mem_res->writeHint(inst);
1503 }
1504
1505
1506 TheISA::TLB*
1507 InOrderCPU::getITBPtr()
1508 {
1509 CacheUnit *itb_res =
1510 dynamic_cast<CacheUnit*>(resPool->getResource(fetchPortIdx));
1511 return itb_res->tlb();
1512 }
1513
1514
1515 TheISA::TLB*
1516 InOrderCPU::getDTBPtr()
1517 {
1518 CacheUnit *dtb_res =
1519 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1520 return dtb_res->tlb();
1521 }
1522
1523 Fault
1524 InOrderCPU::read(DynInstPtr inst, Addr addr,
1525 uint8_t *data, unsigned size, unsigned flags)
1526 {
1527 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1528 // you want to run w/out caches?
1529 CacheUnit *cache_res =
1530 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1531
1532 return cache_res->read(inst, addr, data, size, flags);
1533 }
1534
1535 Fault
1536 InOrderCPU::write(DynInstPtr inst, uint8_t *data, unsigned size,
1537 Addr addr, unsigned flags, uint64_t *write_res)
1538 {
1539 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1540 // you want to run w/out caches?
1541 CacheUnit *cache_res =
1542 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1543 return cache_res->write(inst, data, size, addr, flags, write_res);
1544 }