059996b0723bc58b780ac1855fd64136975a4d90
[gem5.git] / src / cpu / inorder / cpu.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <algorithm>
33
34 #include "arch/utility.hh"
35 #include "config/full_system.hh"
36 #include "config/the_isa.hh"
37 #include "cpu/activity.hh"
38 #include "cpu/base.hh"
39 #include "cpu/exetrace.hh"
40 #include "cpu/inorder/cpu.hh"
41 #include "cpu/inorder/first_stage.hh"
42 #include "cpu/inorder/inorder_dyn_inst.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "cpu/inorder/resources/resource_list.hh"
46 #include "cpu/inorder/thread_context.hh"
47 #include "cpu/inorder/thread_state.hh"
48 #include "cpu/simple_thread.hh"
49 #include "cpu/thread_context.hh"
50 #include "mem/translating_port.hh"
51 #include "params/InOrderCPU.hh"
52 #include "sim/process.hh"
53 #include "sim/stat_control.hh"
54
55 #if FULL_SYSTEM
56 #include "cpu/quiesce_event.hh"
57 #include "sim/system.hh"
58 #endif
59
60 #if THE_ISA == ALPHA_ISA
61 #include "arch/alpha/osfpal.hh"
62 #endif
63
64 using namespace std;
65 using namespace TheISA;
66 using namespace ThePipeline;
67
68 InOrderCPU::TickEvent::TickEvent(InOrderCPU *c)
69 : Event(CPU_Tick_Pri), cpu(c)
70 { }
71
72
73 void
74 InOrderCPU::TickEvent::process()
75 {
76 cpu->tick();
77 }
78
79
80 const char *
81 InOrderCPU::TickEvent::description()
82 {
83 return "InOrderCPU tick event";
84 }
85
86 InOrderCPU::CPUEvent::CPUEvent(InOrderCPU *_cpu, CPUEventType e_type,
87 Fault fault, ThreadID _tid, DynInstPtr inst,
88 unsigned event_pri_offset)
89 : Event(Event::Priority((unsigned int)CPU_Tick_Pri + event_pri_offset)),
90 cpu(_cpu)
91 {
92 setEvent(e_type, fault, _tid, inst);
93 }
94
95
96 std::string InOrderCPU::eventNames[NumCPUEvents] =
97 {
98 "ActivateThread",
99 "ActivateNextReadyThread",
100 "DeactivateThread",
101 "HaltThread",
102 "SuspendThread",
103 "Trap",
104 "InstGraduated",
105 "SquashFromMemStall",
106 "UpdatePCs"
107 };
108
109 void
110 InOrderCPU::CPUEvent::process()
111 {
112 switch (cpuEventType)
113 {
114 case ActivateThread:
115 cpu->activateThread(tid);
116 break;
117
118 case ActivateNextReadyThread:
119 cpu->activateNextReadyThread();
120 break;
121
122 case DeactivateThread:
123 cpu->deactivateThread(tid);
124 break;
125
126 case HaltThread:
127 cpu->haltThread(tid);
128 break;
129
130 case SuspendThread:
131 cpu->suspendThread(tid);
132 break;
133
134 case SquashFromMemStall:
135 cpu->squashDueToMemStall(inst->squashingStage, inst->seqNum, tid);
136 break;
137
138 case Trap:
139 cpu->trapCPU(fault, tid);
140 break;
141
142 default:
143 fatal("Unrecognized Event Type %s", eventNames[cpuEventType]);
144 }
145
146 cpu->cpuEventRemoveList.push(this);
147 }
148
149
150
151 const char *
152 InOrderCPU::CPUEvent::description()
153 {
154 return "InOrderCPU event";
155 }
156
157 void
158 InOrderCPU::CPUEvent::scheduleEvent(int delay)
159 {
160 if (squashed())
161 mainEventQueue.reschedule(this, cpu->nextCycle(curTick +
162 cpu->ticks(delay)));
163 else if (!scheduled())
164 mainEventQueue.schedule(this, cpu->nextCycle(curTick +
165 cpu->ticks(delay)));
166 }
167
168 void
169 InOrderCPU::CPUEvent::unscheduleEvent()
170 {
171 if (scheduled())
172 squash();
173 }
174
175 InOrderCPU::InOrderCPU(Params *params)
176 : BaseCPU(params),
177 cpu_id(params->cpu_id),
178 coreType("default"),
179 _status(Idle),
180 tickEvent(this),
181 timeBuffer(2 , 2),
182 removeInstsThisCycle(false),
183 activityRec(params->name, NumStages, 10, params->activity),
184 #if FULL_SYSTEM
185 system(params->system),
186 physmem(system->physmem),
187 #endif // FULL_SYSTEM
188 #ifdef DEBUG
189 cpuEventNum(0),
190 resReqCount(0),
191 #endif // DEBUG
192 switchCount(0),
193 deferRegistration(false/*params->deferRegistration*/),
194 stageTracing(params->stageTracing),
195 instsPerSwitch(0)
196 {
197 ThreadID active_threads;
198 cpu_params = params;
199
200 resPool = new ResourcePool(this, params);
201
202 // Resize for Multithreading CPUs
203 thread.resize(numThreads);
204
205 #if FULL_SYSTEM
206 active_threads = 1;
207 #else
208 active_threads = params->workload.size();
209
210 if (active_threads > MaxThreads) {
211 panic("Workload Size too large. Increase the 'MaxThreads'"
212 "in your InOrder implementation or "
213 "edit your workload size.");
214 }
215
216
217 if (active_threads > 1) {
218 threadModel = (InOrderCPU::ThreadModel) params->threadModel;
219
220 if (threadModel == SMT) {
221 DPRINTF(InOrderCPU, "Setting Thread Model to SMT.\n");
222 } else if (threadModel == SwitchOnCacheMiss) {
223 DPRINTF(InOrderCPU, "Setting Thread Model to "
224 "Switch On Cache Miss\n");
225 }
226
227 } else {
228 threadModel = Single;
229 }
230
231
232
233 #endif
234
235 // Bind the fetch & data ports from the resource pool.
236 fetchPortIdx = resPool->getPortIdx(params->fetchMemPort);
237 if (fetchPortIdx == 0) {
238 fatal("Unable to find port to fetch instructions from.\n");
239 }
240
241 dataPortIdx = resPool->getPortIdx(params->dataMemPort);
242 if (dataPortIdx == 0) {
243 fatal("Unable to find port for data.\n");
244 }
245
246 for (ThreadID tid = 0; tid < numThreads; ++tid) {
247 #if FULL_SYSTEM
248 // SMT is not supported in FS mode yet.
249 assert(numThreads == 1);
250 thread[tid] = new Thread(this, 0);
251 #else
252 if (tid < (ThreadID)params->workload.size()) {
253 DPRINTF(InOrderCPU, "Workload[%i] process is %#x\n",
254 tid, params->workload[tid]->prog_fname);
255 thread[tid] =
256 new Thread(this, tid, params->workload[tid]);
257 } else {
258 //Allocate Empty thread so M5 can use later
259 //when scheduling threads to CPU
260 Process* dummy_proc = params->workload[0];
261 thread[tid] = new Thread(this, tid, dummy_proc);
262 }
263
264 // Eventually set this with parameters...
265 asid[tid] = tid;
266 #endif
267
268 // Setup the TC that will serve as the interface to the threads/CPU.
269 InOrderThreadContext *tc = new InOrderThreadContext;
270 tc->cpu = this;
271 tc->thread = thread[tid];
272
273 // Give the thread the TC.
274 thread[tid]->tc = tc;
275 thread[tid]->setFuncExeInst(0);
276 globalSeqNum[tid] = 1;
277
278 // Add the TC to the CPU's list of TC's.
279 this->threadContexts.push_back(tc);
280 }
281
282 // Initialize TimeBuffer Stage Queues
283 for (int stNum=0; stNum < NumStages - 1; stNum++) {
284 stageQueue[stNum] = new StageQueue(NumStages, NumStages);
285 stageQueue[stNum]->id(stNum);
286 }
287
288
289 // Set Up Pipeline Stages
290 for (int stNum=0; stNum < NumStages; stNum++) {
291 if (stNum == 0)
292 pipelineStage[stNum] = new FirstStage(params, stNum);
293 else
294 pipelineStage[stNum] = new PipelineStage(params, stNum);
295
296 pipelineStage[stNum]->setCPU(this);
297 pipelineStage[stNum]->setActiveThreads(&activeThreads);
298 pipelineStage[stNum]->setTimeBuffer(&timeBuffer);
299
300 // Take Care of 1st/Nth stages
301 if (stNum > 0)
302 pipelineStage[stNum]->setPrevStageQueue(stageQueue[stNum - 1]);
303 if (stNum < NumStages - 1)
304 pipelineStage[stNum]->setNextStageQueue(stageQueue[stNum]);
305 }
306
307 // Initialize thread specific variables
308 for (ThreadID tid = 0; tid < numThreads; tid++) {
309 archRegDepMap[tid].setCPU(this);
310
311 nonSpecInstActive[tid] = false;
312 nonSpecSeqNum[tid] = 0;
313
314 squashSeqNum[tid] = MaxAddr;
315 lastSquashCycle[tid] = 0;
316
317 memset(intRegs[tid], 0, sizeof(intRegs[tid]));
318 memset(floatRegs.i[tid], 0, sizeof(floatRegs.i[tid]));
319 isa[tid].clear();
320
321 isa[tid].expandForMultithreading(numThreads, 1/*numVirtProcs*/);
322
323 // Define dummy instructions and resource requests to be used.
324 dummyInst[tid] = new InOrderDynInst(this,
325 thread[tid],
326 0,
327 tid,
328 asid[tid]);
329
330 dummyReq[tid] = new ResourceRequest(resPool->getResource(0),
331 dummyInst[tid],
332 0,
333 0,
334 0,
335 0);
336 }
337
338 dummyReqInst = new InOrderDynInst(this, NULL, 0, 0, 0);
339 dummyReqInst->setSquashed();
340
341 dummyBufferInst = new InOrderDynInst(this, NULL, 0, 0, 0);
342 dummyBufferInst->setSquashed();
343
344 lastRunningCycle = curTick;
345
346 // Reset CPU to reset state.
347 #if FULL_SYSTEM
348 Fault resetFault = new ResetFault();
349 resetFault->invoke(tcBase());
350 #else
351 reset();
352 #endif
353
354 dummyBufferInst->resetInstCount();
355
356 // Schedule First Tick Event, CPU will reschedule itself from here on out.
357 scheduleTickEvent(0);
358 }
359
360 InOrderCPU::~InOrderCPU()
361 {
362 delete resPool;
363 }
364
365
366 void
367 InOrderCPU::regStats()
368 {
369 /* Register the Resource Pool's stats here.*/
370 resPool->regStats();
371
372 /* Register for each Pipeline Stage */
373 for (int stage_num=0; stage_num < ThePipeline::NumStages; stage_num++) {
374 pipelineStage[stage_num]->regStats();
375 }
376
377 /* Register any of the InOrderCPU's stats here.*/
378 instsPerCtxtSwitch
379 .name(name() + ".instsPerContextSwitch")
380 .desc("Instructions Committed Per Context Switch")
381 .prereq(instsPerCtxtSwitch);
382
383 numCtxtSwitches
384 .name(name() + ".contextSwitches")
385 .desc("Number of context switches");
386
387 comLoads
388 .name(name() + ".comLoads")
389 .desc("Number of Load instructions committed");
390
391 comStores
392 .name(name() + ".comStores")
393 .desc("Number of Store instructions committed");
394
395 comBranches
396 .name(name() + ".comBranches")
397 .desc("Number of Branches instructions committed");
398
399 comNops
400 .name(name() + ".comNops")
401 .desc("Number of Nop instructions committed");
402
403 comNonSpec
404 .name(name() + ".comNonSpec")
405 .desc("Number of Non-Speculative instructions committed");
406
407 comInts
408 .name(name() + ".comInts")
409 .desc("Number of Integer instructions committed");
410
411 comFloats
412 .name(name() + ".comFloats")
413 .desc("Number of Floating Point instructions committed");
414
415 timesIdled
416 .name(name() + ".timesIdled")
417 .desc("Number of times that the entire CPU went into an idle state and"
418 " unscheduled itself")
419 .prereq(timesIdled);
420
421 idleCycles
422 .name(name() + ".idleCycles")
423 .desc("Number of cycles cpu's stages were not processed");
424
425 runCycles
426 .name(name() + ".runCycles")
427 .desc("Number of cycles cpu stages are processed.");
428
429 activity
430 .name(name() + ".activity")
431 .desc("Percentage of cycles cpu is active")
432 .precision(6);
433 activity = (runCycles / numCycles) * 100;
434
435 threadCycles
436 .init(numThreads)
437 .name(name() + ".threadCycles")
438 .desc("Total Number of Cycles A Thread Was Active in CPU (Per-Thread)");
439
440 smtCycles
441 .name(name() + ".smtCycles")
442 .desc("Total number of cycles that the CPU was in SMT-mode");
443
444 committedInsts
445 .init(numThreads)
446 .name(name() + ".committedInsts")
447 .desc("Number of Instructions Simulated (Per-Thread)");
448
449 smtCommittedInsts
450 .init(numThreads)
451 .name(name() + ".smtCommittedInsts")
452 .desc("Number of SMT Instructions Simulated (Per-Thread)");
453
454 totalCommittedInsts
455 .name(name() + ".committedInsts_total")
456 .desc("Number of Instructions Simulated (Total)");
457
458 cpi
459 .name(name() + ".cpi")
460 .desc("CPI: Cycles Per Instruction (Per-Thread)")
461 .precision(6);
462 cpi = numCycles / committedInsts;
463
464 smtCpi
465 .name(name() + ".smt_cpi")
466 .desc("CPI: Total SMT-CPI")
467 .precision(6);
468 smtCpi = smtCycles / smtCommittedInsts;
469
470 totalCpi
471 .name(name() + ".cpi_total")
472 .desc("CPI: Total CPI of All Threads")
473 .precision(6);
474 totalCpi = numCycles / totalCommittedInsts;
475
476 ipc
477 .name(name() + ".ipc")
478 .desc("IPC: Instructions Per Cycle (Per-Thread)")
479 .precision(6);
480 ipc = committedInsts / numCycles;
481
482 smtIpc
483 .name(name() + ".smt_ipc")
484 .desc("IPC: Total SMT-IPC")
485 .precision(6);
486 smtIpc = smtCommittedInsts / smtCycles;
487
488 totalIpc
489 .name(name() + ".ipc_total")
490 .desc("IPC: Total IPC of All Threads")
491 .precision(6);
492 totalIpc = totalCommittedInsts / numCycles;
493
494 BaseCPU::regStats();
495 }
496
497
498 void
499 InOrderCPU::tick()
500 {
501 DPRINTF(InOrderCPU, "\n\nInOrderCPU: Ticking main, InOrderCPU.\n");
502
503 ++numCycles;
504
505 bool pipes_idle = true;
506
507 //Tick each of the stages
508 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
509 pipelineStage[stNum]->tick();
510
511 pipes_idle = pipes_idle && pipelineStage[stNum]->idle;
512 }
513
514 if (pipes_idle)
515 idleCycles++;
516 else
517 runCycles++;
518
519 // Now advance the time buffers one tick
520 timeBuffer.advance();
521 for (int sqNum=0; sqNum < NumStages - 1; sqNum++) {
522 stageQueue[sqNum]->advance();
523 }
524 activityRec.advance();
525
526 // Any squashed requests, events, or insts then remove them now
527 cleanUpRemovedReqs();
528 cleanUpRemovedEvents();
529 cleanUpRemovedInsts();
530
531 // Re-schedule CPU for this cycle
532 if (!tickEvent.scheduled()) {
533 if (_status == SwitchedOut) {
534 // increment stat
535 lastRunningCycle = curTick;
536 } else if (!activityRec.active()) {
537 DPRINTF(InOrderCPU, "sleeping CPU.\n");
538 lastRunningCycle = curTick;
539 timesIdled++;
540 } else {
541 //Tick next_tick = curTick + cycles(1);
542 //tickEvent.schedule(next_tick);
543 mainEventQueue.schedule(&tickEvent, nextCycle(curTick + 1));
544 DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
545 nextCycle(curTick + 1));
546 }
547 }
548
549 tickThreadStats();
550 updateThreadPriority();
551 }
552
553
554 void
555 InOrderCPU::init()
556 {
557 if (!deferRegistration) {
558 registerThreadContexts();
559 }
560
561 // Set inSyscall so that the CPU doesn't squash when initially
562 // setting up registers.
563 for (ThreadID tid = 0; tid < numThreads; ++tid)
564 thread[tid]->inSyscall = true;
565
566 #if FULL_SYSTEM
567 for (ThreadID tid = 0; tid < numThreads; tid++) {
568 ThreadContext *src_tc = threadContexts[tid];
569 TheISA::initCPU(src_tc, src_tc->contextId());
570 }
571 #endif
572
573 // Clear inSyscall.
574 for (ThreadID tid = 0; tid < numThreads; ++tid)
575 thread[tid]->inSyscall = false;
576
577 // Call Initializiation Routine for Resource Pool
578 resPool->init();
579 }
580
581 void
582 InOrderCPU::reset()
583 {
584 for (int i = 0; i < numThreads; i++) {
585 isa[i].reset(coreType, numThreads,
586 1/*numVirtProcs*/, dynamic_cast<BaseCPU*>(this));
587 }
588 }
589
590 Port*
591 InOrderCPU::getPort(const std::string &if_name, int idx)
592 {
593 return resPool->getPort(if_name, idx);
594 }
595
596 #if FULL_SYSTEM
597 Fault
598 InOrderCPU::hwrei(ThreadID tid)
599 {
600 panic("hwrei: Unimplemented");
601
602 return NoFault;
603 }
604
605
606 bool
607 InOrderCPU::simPalCheck(int palFunc, ThreadID tid)
608 {
609 panic("simPalCheck: Unimplemented");
610
611 return true;
612 }
613
614
615 Fault
616 InOrderCPU::getInterrupts()
617 {
618 // Check if there are any outstanding interrupts
619 return this->interrupts->getInterrupt(this->threadContexts[0]);
620 }
621
622
623 void
624 InOrderCPU::processInterrupts(Fault interrupt)
625 {
626 // Check for interrupts here. For now can copy the code that
627 // exists within isa_fullsys_traits.hh. Also assume that thread 0
628 // is the one that handles the interrupts.
629 // @todo: Possibly consolidate the interrupt checking code.
630 // @todo: Allow other threads to handle interrupts.
631
632 assert(interrupt != NoFault);
633 this->interrupts->updateIntrInfo(this->threadContexts[0]);
634
635 DPRINTF(InOrderCPU, "Interrupt %s being handled\n", interrupt->name());
636 this->trap(interrupt, 0);
637 }
638
639
640 void
641 InOrderCPU::updateMemPorts()
642 {
643 // Update all ThreadContext's memory ports (Functional/Virtual
644 // Ports)
645 ThreadID size = thread.size();
646 for (ThreadID i = 0; i < size; ++i)
647 thread[i]->connectMemPorts(thread[i]->getTC());
648 }
649 #endif
650
651 void
652 InOrderCPU::trap(Fault fault, ThreadID tid, int delay)
653 {
654 //@ Squash Pipeline during TRAP
655 scheduleCpuEvent(Trap, fault, tid, dummyInst[tid], delay);
656 }
657
658 void
659 InOrderCPU::trapCPU(Fault fault, ThreadID tid)
660 {
661 fault->invoke(tcBase(tid));
662 }
663
664 void
665 InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay)
666 {
667 scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay);
668 }
669
670
671 void
672 InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num,
673 ThreadID tid)
674 {
675 DPRINTF(InOrderCPU, "Squashing Pipeline Stages Due to Memory Stall...\n");
676
677 // Squash all instructions in each stage including
678 // instruction that caused the squash (seq_num - 1)
679 // NOTE: The stage bandwidth needs to be cleared so thats why
680 // the stalling instruction is squashed as well. The stalled
681 // instruction is previously placed in another intermediate buffer
682 // while it's stall is being handled.
683 InstSeqNum squash_seq_num = seq_num - 1;
684
685 for (int stNum=stage_num; stNum >= 0 ; stNum--) {
686 pipelineStage[stNum]->squashDueToMemStall(squash_seq_num, tid);
687 }
688 }
689
690 void
691 InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
692 ThreadID tid, DynInstPtr inst,
693 unsigned delay, unsigned event_pri_offset)
694 {
695 CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
696 event_pri_offset);
697
698 Tick sked_tick = nextCycle(curTick + ticks(delay));
699 if (delay >= 0) {
700 DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
701 eventNames[c_event], curTick + delay, tid);
702 mainEventQueue.schedule(cpu_event, sked_tick);
703 } else {
704 cpu_event->process();
705 cpuEventRemoveList.push(cpu_event);
706 }
707
708 // Broadcast event to the Resource Pool
709 // Need to reset tid just in case this is a dummy instruction
710 inst->setTid(tid);
711 resPool->scheduleEvent(c_event, inst, 0, 0, tid);
712 }
713
714 bool
715 InOrderCPU::isThreadActive(ThreadID tid)
716 {
717 list<ThreadID>::iterator isActive =
718 std::find(activeThreads.begin(), activeThreads.end(), tid);
719
720 return (isActive != activeThreads.end());
721 }
722
723 bool
724 InOrderCPU::isThreadReady(ThreadID tid)
725 {
726 list<ThreadID>::iterator isReady =
727 std::find(readyThreads.begin(), readyThreads.end(), tid);
728
729 return (isReady != readyThreads.end());
730 }
731
732 bool
733 InOrderCPU::isThreadSuspended(ThreadID tid)
734 {
735 list<ThreadID>::iterator isSuspended =
736 std::find(suspendedThreads.begin(), suspendedThreads.end(), tid);
737
738 return (isSuspended != suspendedThreads.end());
739 }
740
741 void
742 InOrderCPU::activateNextReadyThread()
743 {
744 if (readyThreads.size() >= 1) {
745 ThreadID ready_tid = readyThreads.front();
746
747 // Activate in Pipeline
748 activateThread(ready_tid);
749
750 // Activate in Resource Pool
751 resPool->activateAll(ready_tid);
752
753 list<ThreadID>::iterator ready_it =
754 std::find(readyThreads.begin(), readyThreads.end(), ready_tid);
755 readyThreads.erase(ready_it);
756 } else {
757 DPRINTF(InOrderCPU,
758 "Attempting to activate new thread, but No Ready Threads to"
759 "activate.\n");
760 DPRINTF(InOrderCPU,
761 "Unable to switch to next active thread.\n");
762 }
763 }
764
765 void
766 InOrderCPU::activateThread(ThreadID tid)
767 {
768 if (isThreadSuspended(tid)) {
769 DPRINTF(InOrderCPU,
770 "Removing [tid:%i] from suspended threads list.\n", tid);
771
772 list<ThreadID>::iterator susp_it =
773 std::find(suspendedThreads.begin(), suspendedThreads.end(),
774 tid);
775 suspendedThreads.erase(susp_it);
776 }
777
778 if (threadModel == SwitchOnCacheMiss &&
779 numActiveThreads() == 1) {
780 DPRINTF(InOrderCPU,
781 "Ignoring activation of [tid:%i], since [tid:%i] is "
782 "already running.\n", tid, activeThreadId());
783
784 DPRINTF(InOrderCPU,"Placing [tid:%i] on ready threads list\n",
785 tid);
786
787 readyThreads.push_back(tid);
788
789 } else if (!isThreadActive(tid)) {
790 DPRINTF(InOrderCPU,
791 "Adding [tid:%i] to active threads list.\n", tid);
792 activeThreads.push_back(tid);
793
794 activateThreadInPipeline(tid);
795
796 thread[tid]->lastActivate = curTick;
797
798 tcBase(tid)->setStatus(ThreadContext::Active);
799
800 wakeCPU();
801
802 numCtxtSwitches++;
803 }
804 }
805
806 void
807 InOrderCPU::activateThreadInPipeline(ThreadID tid)
808 {
809 for (int stNum=0; stNum < NumStages; stNum++) {
810 pipelineStage[stNum]->activateThread(tid);
811 }
812 }
813
814 void
815 InOrderCPU::deactivateContext(ThreadID tid, int delay)
816 {
817 DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid);
818
819 scheduleCpuEvent(DeactivateThread, NoFault, tid, dummyInst[tid], delay);
820
821 // Be sure to signal that there's some activity so the CPU doesn't
822 // deschedule itself.
823 activityRec.activity();
824
825 _status = Running;
826 }
827
828 void
829 InOrderCPU::deactivateThread(ThreadID tid)
830 {
831 DPRINTF(InOrderCPU, "[tid:%i]: Calling deactivate thread.\n", tid);
832
833 if (isThreadActive(tid)) {
834 DPRINTF(InOrderCPU,"[tid:%i]: Removing from active threads list\n",
835 tid);
836 list<ThreadID>::iterator thread_it =
837 std::find(activeThreads.begin(), activeThreads.end(), tid);
838
839 removePipelineStalls(*thread_it);
840
841 activeThreads.erase(thread_it);
842
843 // Ideally, this should be triggered from the
844 // suspendContext/Thread functions
845 tcBase(tid)->setStatus(ThreadContext::Suspended);
846 }
847
848 assert(!isThreadActive(tid));
849 }
850
851 void
852 InOrderCPU::removePipelineStalls(ThreadID tid)
853 {
854 DPRINTF(InOrderCPU,"[tid:%i]: Removing all pipeline stalls\n",
855 tid);
856
857 for (int stNum = 0; stNum < NumStages ; stNum++) {
858 pipelineStage[stNum]->removeStalls(tid);
859 }
860
861 }
862
863 void
864 InOrderCPU::updateThreadPriority()
865 {
866 if (activeThreads.size() > 1)
867 {
868 //DEFAULT TO ROUND ROBIN SCHEME
869 //e.g. Move highest priority to end of thread list
870 list<ThreadID>::iterator list_begin = activeThreads.begin();
871 list<ThreadID>::iterator list_end = activeThreads.end();
872
873 unsigned high_thread = *list_begin;
874
875 activeThreads.erase(list_begin);
876
877 activeThreads.push_back(high_thread);
878 }
879 }
880
881 inline void
882 InOrderCPU::tickThreadStats()
883 {
884 /** Keep track of cycles that each thread is active */
885 list<ThreadID>::iterator thread_it = activeThreads.begin();
886 while (thread_it != activeThreads.end()) {
887 threadCycles[*thread_it]++;
888 thread_it++;
889 }
890
891 // Keep track of cycles where SMT is active
892 if (activeThreads.size() > 1) {
893 smtCycles++;
894 }
895 }
896
897 void
898 InOrderCPU::activateContext(ThreadID tid, int delay)
899 {
900 DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
901
902
903 scheduleCpuEvent(ActivateThread, NoFault, tid, dummyInst[tid], delay);
904
905 // Be sure to signal that there's some activity so the CPU doesn't
906 // deschedule itself.
907 activityRec.activity();
908
909 _status = Running;
910 }
911
912 void
913 InOrderCPU::activateNextReadyContext(int delay)
914 {
915 DPRINTF(InOrderCPU,"Activating next ready thread\n");
916
917 // NOTE: Add 5 to the event priority so that we always activate
918 // threads after we've finished deactivating, squashing,etc.
919 // other threads
920 scheduleCpuEvent(ActivateNextReadyThread, NoFault, 0/*tid*/, dummyInst[0],
921 delay, 5);
922
923 // Be sure to signal that there's some activity so the CPU doesn't
924 // deschedule itself.
925 activityRec.activity();
926
927 _status = Running;
928 }
929
930 void
931 InOrderCPU::haltContext(ThreadID tid, int delay)
932 {
933 DPRINTF(InOrderCPU, "[tid:%i]: Calling Halt Context...\n", tid);
934
935 scheduleCpuEvent(HaltThread, NoFault, tid, dummyInst[tid], delay);
936
937 activityRec.activity();
938 }
939
940 void
941 InOrderCPU::haltThread(ThreadID tid)
942 {
943 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Halted Threads List...\n", tid);
944 deactivateThread(tid);
945 squashThreadInPipeline(tid);
946 haltedThreads.push_back(tid);
947
948 tcBase(tid)->setStatus(ThreadContext::Halted);
949
950 if (threadModel == SwitchOnCacheMiss) {
951 activateNextReadyContext();
952 }
953 }
954
955 void
956 InOrderCPU::suspendContext(ThreadID tid, int delay)
957 {
958 scheduleCpuEvent(SuspendThread, NoFault, tid, dummyInst[tid], delay);
959 }
960
961 void
962 InOrderCPU::suspendThread(ThreadID tid)
963 {
964 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Suspended Threads List...\n",
965 tid);
966 deactivateThread(tid);
967 suspendedThreads.push_back(tid);
968 thread[tid]->lastSuspend = curTick;
969
970 tcBase(tid)->setStatus(ThreadContext::Suspended);
971 }
972
973 void
974 InOrderCPU::squashThreadInPipeline(ThreadID tid)
975 {
976 //Squash all instructions in each stage
977 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
978 pipelineStage[stNum]->squash(0 /*seq_num*/, tid);
979 }
980 }
981
982 PipelineStage*
983 InOrderCPU::getPipeStage(int stage_num)
984 {
985 return pipelineStage[stage_num];
986 }
987
988 uint64_t
989 InOrderCPU::readPC(ThreadID tid)
990 {
991 return PC[tid];
992 }
993
994
995 void
996 InOrderCPU::setPC(Addr new_PC, ThreadID tid)
997 {
998 PC[tid] = new_PC;
999 }
1000
1001
1002 uint64_t
1003 InOrderCPU::readNextPC(ThreadID tid)
1004 {
1005 return nextPC[tid];
1006 }
1007
1008
1009 void
1010 InOrderCPU::setNextPC(uint64_t new_NPC, ThreadID tid)
1011 {
1012 nextPC[tid] = new_NPC;
1013 }
1014
1015
1016 uint64_t
1017 InOrderCPU::readNextNPC(ThreadID tid)
1018 {
1019 return nextNPC[tid];
1020 }
1021
1022
1023 void
1024 InOrderCPU::setNextNPC(uint64_t new_NNPC, ThreadID tid)
1025 {
1026 nextNPC[tid] = new_NNPC;
1027 }
1028
1029 uint64_t
1030 InOrderCPU::readIntReg(int reg_idx, ThreadID tid)
1031 {
1032 return intRegs[tid][reg_idx];
1033 }
1034
1035 FloatReg
1036 InOrderCPU::readFloatReg(int reg_idx, ThreadID tid)
1037 {
1038 return floatRegs.f[tid][reg_idx];
1039 }
1040
1041 FloatRegBits
1042 InOrderCPU::readFloatRegBits(int reg_idx, ThreadID tid)
1043 {;
1044 return floatRegs.i[tid][reg_idx];
1045 }
1046
1047 void
1048 InOrderCPU::setIntReg(int reg_idx, uint64_t val, ThreadID tid)
1049 {
1050 intRegs[tid][reg_idx] = val;
1051 }
1052
1053
1054 void
1055 InOrderCPU::setFloatReg(int reg_idx, FloatReg val, ThreadID tid)
1056 {
1057 floatRegs.f[tid][reg_idx] = val;
1058 }
1059
1060
1061 void
1062 InOrderCPU::setFloatRegBits(int reg_idx, FloatRegBits val, ThreadID tid)
1063 {
1064 floatRegs.i[tid][reg_idx] = val;
1065 }
1066
1067 uint64_t
1068 InOrderCPU::readRegOtherThread(unsigned reg_idx, ThreadID tid)
1069 {
1070 // If Default value is set, then retrieve target thread
1071 if (tid == InvalidThreadID) {
1072 tid = TheISA::getTargetThread(tcBase(tid));
1073 }
1074
1075 if (reg_idx < FP_Base_DepTag) {
1076 // Integer Register File
1077 return readIntReg(reg_idx, tid);
1078 } else if (reg_idx < Ctrl_Base_DepTag) {
1079 // Float Register File
1080 reg_idx -= FP_Base_DepTag;
1081 return readFloatRegBits(reg_idx, tid);
1082 } else {
1083 reg_idx -= Ctrl_Base_DepTag;
1084 return readMiscReg(reg_idx, tid); // Misc. Register File
1085 }
1086 }
1087 void
1088 InOrderCPU::setRegOtherThread(unsigned reg_idx, const MiscReg &val,
1089 ThreadID tid)
1090 {
1091 // If Default value is set, then retrieve target thread
1092 if (tid == InvalidThreadID) {
1093 tid = TheISA::getTargetThread(tcBase(tid));
1094 }
1095
1096 if (reg_idx < FP_Base_DepTag) { // Integer Register File
1097 setIntReg(reg_idx, val, tid);
1098 } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
1099 reg_idx -= FP_Base_DepTag;
1100 setFloatRegBits(reg_idx, val, tid);
1101 } else {
1102 reg_idx -= Ctrl_Base_DepTag;
1103 setMiscReg(reg_idx, val, tid); // Misc. Register File
1104 }
1105 }
1106
1107 MiscReg
1108 InOrderCPU::readMiscRegNoEffect(int misc_reg, ThreadID tid)
1109 {
1110 return isa[tid].readMiscRegNoEffect(misc_reg);
1111 }
1112
1113 MiscReg
1114 InOrderCPU::readMiscReg(int misc_reg, ThreadID tid)
1115 {
1116 return isa[tid].readMiscReg(misc_reg, tcBase(tid));
1117 }
1118
1119 void
1120 InOrderCPU::setMiscRegNoEffect(int misc_reg, const MiscReg &val, ThreadID tid)
1121 {
1122 isa[tid].setMiscRegNoEffect(misc_reg, val);
1123 }
1124
1125 void
1126 InOrderCPU::setMiscReg(int misc_reg, const MiscReg &val, ThreadID tid)
1127 {
1128 isa[tid].setMiscReg(misc_reg, val, tcBase(tid));
1129 }
1130
1131
1132 InOrderCPU::ListIt
1133 InOrderCPU::addInst(DynInstPtr &inst)
1134 {
1135 ThreadID tid = inst->readTid();
1136
1137 instList[tid].push_back(inst);
1138
1139 return --(instList[tid].end());
1140 }
1141
1142 void
1143 InOrderCPU::updateContextSwitchStats()
1144 {
1145 // Set Average Stat Here, then reset to 0
1146 instsPerCtxtSwitch = instsPerSwitch;
1147 instsPerSwitch = 0;
1148 }
1149
1150
1151 void
1152 InOrderCPU::instDone(DynInstPtr inst, ThreadID tid)
1153 {
1154 // Set the CPU's PCs - This contributes to the precise state of the CPU
1155 // which can be used when restoring a thread to the CPU after after any
1156 // type of context switching activity (fork, exception, etc.)
1157 setPC(inst->readPC(), tid);
1158 setNextPC(inst->readNextPC(), tid);
1159 setNextNPC(inst->readNextNPC(), tid);
1160
1161 if (inst->isControl()) {
1162 thread[tid]->lastGradIsBranch = true;
1163 thread[tid]->lastBranchPC = inst->readPC();
1164 thread[tid]->lastBranchNextPC = inst->readNextPC();
1165 thread[tid]->lastBranchNextNPC = inst->readNextNPC();
1166 } else {
1167 thread[tid]->lastGradIsBranch = false;
1168 }
1169
1170
1171 // Finalize Trace Data For Instruction
1172 if (inst->traceData) {
1173 //inst->traceData->setCycle(curTick);
1174 inst->traceData->setFetchSeq(inst->seqNum);
1175 //inst->traceData->setCPSeq(cpu->tcBase(tid)->numInst);
1176 inst->traceData->dump();
1177 delete inst->traceData;
1178 inst->traceData = NULL;
1179 }
1180
1181 // Increment active thread's instruction count
1182 instsPerSwitch++;
1183
1184 // Increment thread-state's instruction count
1185 thread[tid]->numInst++;
1186
1187 // Increment thread-state's instruction stats
1188 thread[tid]->numInsts++;
1189
1190 // Count committed insts per thread stats
1191 committedInsts[tid]++;
1192
1193 // Count total insts committed stat
1194 totalCommittedInsts++;
1195
1196 // Count SMT-committed insts per thread stat
1197 if (numActiveThreads() > 1) {
1198 smtCommittedInsts[tid]++;
1199 }
1200
1201 // Instruction-Mix Stats
1202 if (inst->isLoad()) {
1203 comLoads++;
1204 } else if (inst->isStore()) {
1205 comStores++;
1206 } else if (inst->isControl()) {
1207 comBranches++;
1208 } else if (inst->isNop()) {
1209 comNops++;
1210 } else if (inst->isNonSpeculative()) {
1211 comNonSpec++;
1212 } else if (inst->isInteger()) {
1213 comInts++;
1214 } else if (inst->isFloating()) {
1215 comFloats++;
1216 }
1217
1218 // Check for instruction-count-based events.
1219 comInstEventQueue[tid]->serviceEvents(thread[tid]->numInst);
1220
1221 // Broadcast to other resources an instruction
1222 // has been completed
1223 resPool->scheduleEvent((CPUEventType)ResourcePool::InstGraduated, inst,
1224 0, 0, tid);
1225
1226 // Finally, remove instruction from CPU
1227 removeInst(inst);
1228 }
1229
1230 // currently unused function, but substitute repetitive code w/this function
1231 // call
1232 void
1233 InOrderCPU::addToRemoveList(DynInstPtr &inst)
1234 {
1235 removeInstsThisCycle = true;
1236 if (!inst->isRemoveList()) {
1237 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %#x "
1238 "[sn:%lli] to remove list\n",
1239 inst->threadNumber, inst->readPC(), inst->seqNum);
1240 inst->setRemoveList();
1241 removeList.push(inst->getInstListIt());
1242 } else {
1243 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %#x "
1244 "[sn:%lli], already remove list\n",
1245 inst->threadNumber, inst->readPC(), inst->seqNum);
1246 }
1247
1248 }
1249
1250 void
1251 InOrderCPU::removeInst(DynInstPtr &inst)
1252 {
1253 DPRINTF(InOrderCPU, "Removing graduated instruction [tid:%i] PC %#x "
1254 "[sn:%lli]\n",
1255 inst->threadNumber, inst->readPC(), inst->seqNum);
1256
1257 removeInstsThisCycle = true;
1258
1259 // Remove the instruction.
1260 if (!inst->isRemoveList()) {
1261 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %#x "
1262 "[sn:%lli] to remove list\n",
1263 inst->threadNumber, inst->readPC(), inst->seqNum);
1264 inst->setRemoveList();
1265 removeList.push(inst->getInstListIt());
1266 } else {
1267 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %#x "
1268 "[sn:%lli], already on remove list\n",
1269 inst->threadNumber, inst->readPC(), inst->seqNum);
1270 }
1271
1272 }
1273
1274 void
1275 InOrderCPU::removeInstsUntil(const InstSeqNum &seq_num, ThreadID tid)
1276 {
1277 //assert(!instList[tid].empty());
1278
1279 removeInstsThisCycle = true;
1280
1281 ListIt inst_iter = instList[tid].end();
1282
1283 inst_iter--;
1284
1285 DPRINTF(InOrderCPU, "Squashing instructions from CPU instruction "
1286 "list that are from [tid:%i] and above [sn:%lli] (end=%lli).\n",
1287 tid, seq_num, (*inst_iter)->seqNum);
1288
1289 while ((*inst_iter)->seqNum > seq_num) {
1290
1291 bool break_loop = (inst_iter == instList[tid].begin());
1292
1293 squashInstIt(inst_iter, tid);
1294
1295 inst_iter--;
1296
1297 if (break_loop)
1298 break;
1299 }
1300 }
1301
1302
1303 inline void
1304 InOrderCPU::squashInstIt(const ListIt &instIt, ThreadID tid)
1305 {
1306 if ((*instIt)->threadNumber == tid) {
1307 DPRINTF(InOrderCPU, "Squashing instruction, "
1308 "[tid:%i] [sn:%lli] PC %#x\n",
1309 (*instIt)->threadNumber,
1310 (*instIt)->seqNum,
1311 (*instIt)->readPC());
1312
1313 (*instIt)->setSquashed();
1314
1315 if (!(*instIt)->isRemoveList()) {
1316 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %#x "
1317 "[sn:%lli] to remove list\n",
1318 (*instIt)->threadNumber, (*instIt)->readPC(),
1319 (*instIt)->seqNum);
1320 (*instIt)->setRemoveList();
1321 removeList.push(instIt);
1322 } else {
1323 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i]"
1324 " PC %#x [sn:%lli], already on remove list\n",
1325 (*instIt)->threadNumber, (*instIt)->readPC(),
1326 (*instIt)->seqNum);
1327 }
1328
1329 }
1330
1331 }
1332
1333
1334 void
1335 InOrderCPU::cleanUpRemovedInsts()
1336 {
1337 while (!removeList.empty()) {
1338 DPRINTF(InOrderCPU, "Removing instruction, "
1339 "[tid:%i] [sn:%lli] PC %#x\n",
1340 (*removeList.front())->threadNumber,
1341 (*removeList.front())->seqNum,
1342 (*removeList.front())->readPC());
1343
1344 DynInstPtr inst = *removeList.front();
1345 ThreadID tid = inst->threadNumber;
1346
1347 // Make Sure Resource Schedule Is Emptied Out
1348 ThePipeline::ResSchedule *inst_sched = &inst->resSched;
1349 while (!inst_sched->empty()) {
1350 ScheduleEntry* sch_entry = inst_sched->top();
1351 inst_sched->pop();
1352 delete sch_entry;
1353 }
1354
1355 // Remove From Register Dependency Map, If Necessary
1356 archRegDepMap[(*removeList.front())->threadNumber].
1357 remove((*removeList.front()));
1358
1359
1360 // Clear if Non-Speculative
1361 if (inst->staticInst &&
1362 inst->seqNum == nonSpecSeqNum[tid] &&
1363 nonSpecInstActive[tid] == true) {
1364 nonSpecInstActive[tid] = false;
1365 }
1366
1367 instList[tid].erase(removeList.front());
1368
1369 removeList.pop();
1370 }
1371
1372 removeInstsThisCycle = false;
1373 }
1374
1375 void
1376 InOrderCPU::cleanUpRemovedReqs()
1377 {
1378 while (!reqRemoveList.empty()) {
1379 ResourceRequest *res_req = reqRemoveList.front();
1380
1381 DPRINTF(RefCount, "[tid:%i] [sn:%lli]: Removing Request "
1382 "[stage_num:%i] [res:%s] [slot:%i] [completed:%i].\n",
1383 res_req->inst->threadNumber,
1384 res_req->inst->seqNum,
1385 res_req->getStageNum(),
1386 res_req->res->name(),
1387 (res_req->isCompleted()) ?
1388 res_req->getComplSlot() : res_req->getSlot(),
1389 res_req->isCompleted());
1390
1391 reqRemoveList.pop();
1392
1393 delete res_req;
1394 }
1395 }
1396
1397 void
1398 InOrderCPU::cleanUpRemovedEvents()
1399 {
1400 while (!cpuEventRemoveList.empty()) {
1401 Event *cpu_event = cpuEventRemoveList.front();
1402 cpuEventRemoveList.pop();
1403 delete cpu_event;
1404 }
1405 }
1406
1407
1408 void
1409 InOrderCPU::dumpInsts()
1410 {
1411 int num = 0;
1412
1413 ListIt inst_list_it = instList[0].begin();
1414
1415 cprintf("Dumping Instruction List\n");
1416
1417 while (inst_list_it != instList[0].end()) {
1418 cprintf("Instruction:%i\nPC:%#x\n[tid:%i]\n[sn:%lli]\nIssued:%i\n"
1419 "Squashed:%i\n\n",
1420 num, (*inst_list_it)->readPC(), (*inst_list_it)->threadNumber,
1421 (*inst_list_it)->seqNum, (*inst_list_it)->isIssued(),
1422 (*inst_list_it)->isSquashed());
1423 inst_list_it++;
1424 ++num;
1425 }
1426 }
1427
1428 void
1429 InOrderCPU::wakeCPU()
1430 {
1431 if (/*activityRec.active() || */tickEvent.scheduled()) {
1432 DPRINTF(Activity, "CPU already running.\n");
1433 return;
1434 }
1435
1436 DPRINTF(Activity, "Waking up CPU\n");
1437
1438 Tick extra_cycles = tickToCycles((curTick - 1) - lastRunningCycle);
1439
1440 idleCycles += extra_cycles;
1441 for (int stage_num = 0; stage_num < NumStages; stage_num++) {
1442 pipelineStage[stage_num]->idleCycles += extra_cycles;
1443 }
1444
1445 numCycles += extra_cycles;
1446
1447 mainEventQueue.schedule(&tickEvent, nextCycle(curTick));
1448 }
1449
1450 #if FULL_SYSTEM
1451
1452 void
1453 InOrderCPU::wakeup()
1454 {
1455 if (this->thread[0]->status() != ThreadContext::Suspended)
1456 return;
1457
1458 this->wakeCPU();
1459
1460 DPRINTF(Quiesce, "Suspended Processor woken\n");
1461 this->threadContexts[0]->activate();
1462 }
1463 #endif
1464
1465 #if !FULL_SYSTEM
1466 void
1467 InOrderCPU::syscall(int64_t callnum, ThreadID tid)
1468 {
1469 DPRINTF(InOrderCPU, "[tid:%i] Executing syscall().\n\n", tid);
1470
1471 DPRINTF(Activity,"Activity: syscall() called.\n");
1472
1473 // Temporarily increase this by one to account for the syscall
1474 // instruction.
1475 ++(this->thread[tid]->funcExeInst);
1476
1477 // Execute the actual syscall.
1478 this->thread[tid]->syscall(callnum);
1479
1480 // Decrease funcExeInst by one as the normal commit will handle
1481 // incrementing it.
1482 --(this->thread[tid]->funcExeInst);
1483
1484 // Clear Non-Speculative Block Variable
1485 nonSpecInstActive[tid] = false;
1486 }
1487 #endif
1488
1489 void
1490 InOrderCPU::prefetch(DynInstPtr inst)
1491 {
1492 Resource *mem_res = resPool->getResource(dataPortIdx);
1493 return mem_res->prefetch(inst);
1494 }
1495
1496 void
1497 InOrderCPU::writeHint(DynInstPtr inst)
1498 {
1499 Resource *mem_res = resPool->getResource(dataPortIdx);
1500 return mem_res->writeHint(inst);
1501 }
1502
1503
1504 TheISA::TLB*
1505 InOrderCPU::getITBPtr()
1506 {
1507 CacheUnit *itb_res =
1508 dynamic_cast<CacheUnit*>(resPool->getResource(fetchPortIdx));
1509 return itb_res->tlb();
1510 }
1511
1512
1513 TheISA::TLB*
1514 InOrderCPU::getDTBPtr()
1515 {
1516 CacheUnit *dtb_res =
1517 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1518 return dtb_res->tlb();
1519 }
1520
1521 Fault
1522 InOrderCPU::read(DynInstPtr inst, Addr addr,
1523 uint8_t *data, unsigned size, unsigned flags)
1524 {
1525 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1526 // you want to run w/out caches?
1527 CacheUnit *cache_res =
1528 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1529
1530 return cache_res->read(inst, addr, data, size, flags);
1531 }
1532
1533 Fault
1534 InOrderCPU::write(DynInstPtr inst, uint8_t *data, unsigned size,
1535 Addr addr, unsigned flags, uint64_t *write_res)
1536 {
1537 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1538 // you want to run w/out caches?
1539 CacheUnit *cache_res =
1540 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1541 return cache_res->write(inst, data, size, addr, flags, write_res);
1542 }