inorder: replace schedEvent() code with reschedule().
[gem5.git] / src / cpu / inorder / cpu.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <algorithm>
33
34 #include "arch/utility.hh"
35 #include "config/full_system.hh"
36 #include "config/the_isa.hh"
37 #include "cpu/activity.hh"
38 #include "cpu/base.hh"
39 #include "cpu/exetrace.hh"
40 #include "cpu/inorder/cpu.hh"
41 #include "cpu/inorder/first_stage.hh"
42 #include "cpu/inorder/inorder_dyn_inst.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "cpu/inorder/resources/resource_list.hh"
46 #include "cpu/inorder/thread_context.hh"
47 #include "cpu/inorder/thread_state.hh"
48 #include "cpu/simple_thread.hh"
49 #include "cpu/thread_context.hh"
50 #include "mem/translating_port.hh"
51 #include "params/InOrderCPU.hh"
52 #include "sim/process.hh"
53 #include "sim/stat_control.hh"
54
55 #if FULL_SYSTEM
56 #include "cpu/quiesce_event.hh"
57 #include "sim/system.hh"
58 #endif
59
60 #if THE_ISA == ALPHA_ISA
61 #include "arch/alpha/osfpal.hh"
62 #endif
63
64 using namespace std;
65 using namespace TheISA;
66 using namespace ThePipeline;
67
68 InOrderCPU::TickEvent::TickEvent(InOrderCPU *c)
69 : Event(CPU_Tick_Pri), cpu(c)
70 { }
71
72
73 void
74 InOrderCPU::TickEvent::process()
75 {
76 cpu->tick();
77 }
78
79
80 const char *
81 InOrderCPU::TickEvent::description()
82 {
83 return "InOrderCPU tick event";
84 }
85
86 InOrderCPU::CPUEvent::CPUEvent(InOrderCPU *_cpu, CPUEventType e_type,
87 Fault fault, ThreadID _tid, DynInstPtr inst,
88 unsigned event_pri_offset)
89 : Event(Event::Priority((unsigned int)CPU_Tick_Pri + event_pri_offset)),
90 cpu(_cpu)
91 {
92 setEvent(e_type, fault, _tid, inst);
93 }
94
95
96 std::string InOrderCPU::eventNames[NumCPUEvents] =
97 {
98 "ActivateThread",
99 "ActivateNextReadyThread",
100 "DeactivateThread",
101 "HaltThread",
102 "SuspendThread",
103 "Trap",
104 "InstGraduated",
105 "SquashFromMemStall",
106 "UpdatePCs"
107 };
108
109 void
110 InOrderCPU::CPUEvent::process()
111 {
112 switch (cpuEventType)
113 {
114 case ActivateThread:
115 cpu->activateThread(tid);
116 break;
117
118 case ActivateNextReadyThread:
119 cpu->activateNextReadyThread();
120 break;
121
122 case DeactivateThread:
123 cpu->deactivateThread(tid);
124 break;
125
126 case HaltThread:
127 cpu->haltThread(tid);
128 break;
129
130 case SuspendThread:
131 cpu->suspendThread(tid);
132 break;
133
134 case SquashFromMemStall:
135 cpu->squashDueToMemStall(inst->squashingStage, inst->seqNum, tid);
136 break;
137
138 case Trap:
139 cpu->trapCPU(fault, tid, inst);
140 break;
141
142 default:
143 fatal("Unrecognized Event Type %s", eventNames[cpuEventType]);
144 }
145
146 cpu->cpuEventRemoveList.push(this);
147 }
148
149
150
151 const char *
152 InOrderCPU::CPUEvent::description()
153 {
154 return "InOrderCPU event";
155 }
156
157 void
158 InOrderCPU::CPUEvent::scheduleEvent(int delay)
159 {
160 assert(!scheduled() || squashed());
161 cpu->reschedule(this, cpu->nextCycle(curTick + cpu->ticks(delay)), true);
162 }
163
164 void
165 InOrderCPU::CPUEvent::unscheduleEvent()
166 {
167 if (scheduled())
168 squash();
169 }
170
171 InOrderCPU::InOrderCPU(Params *params)
172 : BaseCPU(params),
173 cpu_id(params->cpu_id),
174 coreType("default"),
175 _status(Idle),
176 tickEvent(this),
177 timeBuffer(2 , 2),
178 removeInstsThisCycle(false),
179 activityRec(params->name, NumStages, 10, params->activity),
180 #if FULL_SYSTEM
181 system(params->system),
182 physmem(system->physmem),
183 #endif // FULL_SYSTEM
184 #ifdef DEBUG
185 cpuEventNum(0),
186 resReqCount(0),
187 #endif // DEBUG
188 switchCount(0),
189 deferRegistration(false/*params->deferRegistration*/),
190 stageTracing(params->stageTracing),
191 instsPerSwitch(0)
192 {
193 ThreadID active_threads;
194 cpu_params = params;
195
196 resPool = new ResourcePool(this, params);
197
198 // Resize for Multithreading CPUs
199 thread.resize(numThreads);
200
201 #if FULL_SYSTEM
202 active_threads = 1;
203 #else
204 active_threads = params->workload.size();
205
206 if (active_threads > MaxThreads) {
207 panic("Workload Size too large. Increase the 'MaxThreads'"
208 "in your InOrder implementation or "
209 "edit your workload size.");
210 }
211
212
213 if (active_threads > 1) {
214 threadModel = (InOrderCPU::ThreadModel) params->threadModel;
215
216 if (threadModel == SMT) {
217 DPRINTF(InOrderCPU, "Setting Thread Model to SMT.\n");
218 } else if (threadModel == SwitchOnCacheMiss) {
219 DPRINTF(InOrderCPU, "Setting Thread Model to "
220 "Switch On Cache Miss\n");
221 }
222
223 } else {
224 threadModel = Single;
225 }
226
227
228
229 #endif
230
231 // Bind the fetch & data ports from the resource pool.
232 fetchPortIdx = resPool->getPortIdx(params->fetchMemPort);
233 if (fetchPortIdx == 0) {
234 fatal("Unable to find port to fetch instructions from.\n");
235 }
236
237 dataPortIdx = resPool->getPortIdx(params->dataMemPort);
238 if (dataPortIdx == 0) {
239 fatal("Unable to find port for data.\n");
240 }
241
242 for (ThreadID tid = 0; tid < numThreads; ++tid) {
243 #if FULL_SYSTEM
244 // SMT is not supported in FS mode yet.
245 assert(numThreads == 1);
246 thread[tid] = new Thread(this, 0);
247 #else
248 if (tid < (ThreadID)params->workload.size()) {
249 DPRINTF(InOrderCPU, "Workload[%i] process is %#x\n",
250 tid, params->workload[tid]->prog_fname);
251 thread[tid] =
252 new Thread(this, tid, params->workload[tid]);
253 } else {
254 //Allocate Empty thread so M5 can use later
255 //when scheduling threads to CPU
256 Process* dummy_proc = params->workload[0];
257 thread[tid] = new Thread(this, tid, dummy_proc);
258 }
259
260 // Eventually set this with parameters...
261 asid[tid] = tid;
262 #endif
263
264 // Setup the TC that will serve as the interface to the threads/CPU.
265 InOrderThreadContext *tc = new InOrderThreadContext;
266 tc->cpu = this;
267 tc->thread = thread[tid];
268
269 // Give the thread the TC.
270 thread[tid]->tc = tc;
271 thread[tid]->setFuncExeInst(0);
272 globalSeqNum[tid] = 1;
273
274 // Add the TC to the CPU's list of TC's.
275 this->threadContexts.push_back(tc);
276 }
277
278 // Initialize TimeBuffer Stage Queues
279 for (int stNum=0; stNum < NumStages - 1; stNum++) {
280 stageQueue[stNum] = new StageQueue(NumStages, NumStages);
281 stageQueue[stNum]->id(stNum);
282 }
283
284
285 // Set Up Pipeline Stages
286 for (int stNum=0; stNum < NumStages; stNum++) {
287 if (stNum == 0)
288 pipelineStage[stNum] = new FirstStage(params, stNum);
289 else
290 pipelineStage[stNum] = new PipelineStage(params, stNum);
291
292 pipelineStage[stNum]->setCPU(this);
293 pipelineStage[stNum]->setActiveThreads(&activeThreads);
294 pipelineStage[stNum]->setTimeBuffer(&timeBuffer);
295
296 // Take Care of 1st/Nth stages
297 if (stNum > 0)
298 pipelineStage[stNum]->setPrevStageQueue(stageQueue[stNum - 1]);
299 if (stNum < NumStages - 1)
300 pipelineStage[stNum]->setNextStageQueue(stageQueue[stNum]);
301 }
302
303 // Initialize thread specific variables
304 for (ThreadID tid = 0; tid < numThreads; tid++) {
305 archRegDepMap[tid].setCPU(this);
306
307 nonSpecInstActive[tid] = false;
308 nonSpecSeqNum[tid] = 0;
309
310 squashSeqNum[tid] = MaxAddr;
311 lastSquashCycle[tid] = 0;
312
313 memset(intRegs[tid], 0, sizeof(intRegs[tid]));
314 memset(floatRegs.i[tid], 0, sizeof(floatRegs.i[tid]));
315 isa[tid].clear();
316
317 isa[tid].expandForMultithreading(numThreads, 1/*numVirtProcs*/);
318
319 // Define dummy instructions and resource requests to be used.
320 dummyInst[tid] = new InOrderDynInst(this,
321 thread[tid],
322 0,
323 tid,
324 asid[tid]);
325
326 dummyReq[tid] = new ResourceRequest(resPool->getResource(0),
327 dummyInst[tid],
328 0,
329 0,
330 0,
331 0);
332 }
333
334 dummyReqInst = new InOrderDynInst(this, NULL, 0, 0, 0);
335 dummyReqInst->setSquashed();
336
337 dummyBufferInst = new InOrderDynInst(this, NULL, 0, 0, 0);
338 dummyBufferInst->setSquashed();
339
340 lastRunningCycle = curTick;
341
342 // Reset CPU to reset state.
343 #if FULL_SYSTEM
344 Fault resetFault = new ResetFault();
345 resetFault->invoke(tcBase());
346 #else
347 reset();
348 #endif
349
350 dummyBufferInst->resetInstCount();
351
352 // Schedule First Tick Event, CPU will reschedule itself from here on out.
353 scheduleTickEvent(0);
354 }
355
356 InOrderCPU::~InOrderCPU()
357 {
358 delete resPool;
359 }
360
361
362 void
363 InOrderCPU::regStats()
364 {
365 /* Register the Resource Pool's stats here.*/
366 resPool->regStats();
367
368 /* Register for each Pipeline Stage */
369 for (int stage_num=0; stage_num < ThePipeline::NumStages; stage_num++) {
370 pipelineStage[stage_num]->regStats();
371 }
372
373 /* Register any of the InOrderCPU's stats here.*/
374 instsPerCtxtSwitch
375 .name(name() + ".instsPerContextSwitch")
376 .desc("Instructions Committed Per Context Switch")
377 .prereq(instsPerCtxtSwitch);
378
379 numCtxtSwitches
380 .name(name() + ".contextSwitches")
381 .desc("Number of context switches");
382
383 comLoads
384 .name(name() + ".comLoads")
385 .desc("Number of Load instructions committed");
386
387 comStores
388 .name(name() + ".comStores")
389 .desc("Number of Store instructions committed");
390
391 comBranches
392 .name(name() + ".comBranches")
393 .desc("Number of Branches instructions committed");
394
395 comNops
396 .name(name() + ".comNops")
397 .desc("Number of Nop instructions committed");
398
399 comNonSpec
400 .name(name() + ".comNonSpec")
401 .desc("Number of Non-Speculative instructions committed");
402
403 comInts
404 .name(name() + ".comInts")
405 .desc("Number of Integer instructions committed");
406
407 comFloats
408 .name(name() + ".comFloats")
409 .desc("Number of Floating Point instructions committed");
410
411 timesIdled
412 .name(name() + ".timesIdled")
413 .desc("Number of times that the entire CPU went into an idle state and"
414 " unscheduled itself")
415 .prereq(timesIdled);
416
417 idleCycles
418 .name(name() + ".idleCycles")
419 .desc("Number of cycles cpu's stages were not processed");
420
421 runCycles
422 .name(name() + ".runCycles")
423 .desc("Number of cycles cpu stages are processed.");
424
425 activity
426 .name(name() + ".activity")
427 .desc("Percentage of cycles cpu is active")
428 .precision(6);
429 activity = (runCycles / numCycles) * 100;
430
431 threadCycles
432 .init(numThreads)
433 .name(name() + ".threadCycles")
434 .desc("Total Number of Cycles A Thread Was Active in CPU (Per-Thread)");
435
436 smtCycles
437 .name(name() + ".smtCycles")
438 .desc("Total number of cycles that the CPU was in SMT-mode");
439
440 committedInsts
441 .init(numThreads)
442 .name(name() + ".committedInsts")
443 .desc("Number of Instructions Simulated (Per-Thread)");
444
445 smtCommittedInsts
446 .init(numThreads)
447 .name(name() + ".smtCommittedInsts")
448 .desc("Number of SMT Instructions Simulated (Per-Thread)");
449
450 totalCommittedInsts
451 .name(name() + ".committedInsts_total")
452 .desc("Number of Instructions Simulated (Total)");
453
454 cpi
455 .name(name() + ".cpi")
456 .desc("CPI: Cycles Per Instruction (Per-Thread)")
457 .precision(6);
458 cpi = numCycles / committedInsts;
459
460 smtCpi
461 .name(name() + ".smt_cpi")
462 .desc("CPI: Total SMT-CPI")
463 .precision(6);
464 smtCpi = smtCycles / smtCommittedInsts;
465
466 totalCpi
467 .name(name() + ".cpi_total")
468 .desc("CPI: Total CPI of All Threads")
469 .precision(6);
470 totalCpi = numCycles / totalCommittedInsts;
471
472 ipc
473 .name(name() + ".ipc")
474 .desc("IPC: Instructions Per Cycle (Per-Thread)")
475 .precision(6);
476 ipc = committedInsts / numCycles;
477
478 smtIpc
479 .name(name() + ".smt_ipc")
480 .desc("IPC: Total SMT-IPC")
481 .precision(6);
482 smtIpc = smtCommittedInsts / smtCycles;
483
484 totalIpc
485 .name(name() + ".ipc_total")
486 .desc("IPC: Total IPC of All Threads")
487 .precision(6);
488 totalIpc = totalCommittedInsts / numCycles;
489
490 BaseCPU::regStats();
491 }
492
493
494 void
495 InOrderCPU::tick()
496 {
497 DPRINTF(InOrderCPU, "\n\nInOrderCPU: Ticking main, InOrderCPU.\n");
498
499 ++numCycles;
500
501 bool pipes_idle = true;
502
503 //Tick each of the stages
504 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
505 pipelineStage[stNum]->tick();
506
507 pipes_idle = pipes_idle && pipelineStage[stNum]->idle;
508 }
509
510 if (pipes_idle)
511 idleCycles++;
512 else
513 runCycles++;
514
515 // Now advance the time buffers one tick
516 timeBuffer.advance();
517 for (int sqNum=0; sqNum < NumStages - 1; sqNum++) {
518 stageQueue[sqNum]->advance();
519 }
520 activityRec.advance();
521
522 // Any squashed requests, events, or insts then remove them now
523 cleanUpRemovedReqs();
524 cleanUpRemovedEvents();
525 cleanUpRemovedInsts();
526
527 // Re-schedule CPU for this cycle
528 if (!tickEvent.scheduled()) {
529 if (_status == SwitchedOut) {
530 // increment stat
531 lastRunningCycle = curTick;
532 } else if (!activityRec.active()) {
533 DPRINTF(InOrderCPU, "sleeping CPU.\n");
534 lastRunningCycle = curTick;
535 timesIdled++;
536 } else {
537 //Tick next_tick = curTick + cycles(1);
538 //tickEvent.schedule(next_tick);
539 schedule(&tickEvent, nextCycle(curTick + 1));
540 DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
541 nextCycle(curTick + 1));
542 }
543 }
544
545 tickThreadStats();
546 updateThreadPriority();
547 }
548
549
550 void
551 InOrderCPU::init()
552 {
553 if (!deferRegistration) {
554 registerThreadContexts();
555 }
556
557 // Set inSyscall so that the CPU doesn't squash when initially
558 // setting up registers.
559 for (ThreadID tid = 0; tid < numThreads; ++tid)
560 thread[tid]->inSyscall = true;
561
562 #if FULL_SYSTEM
563 for (ThreadID tid = 0; tid < numThreads; tid++) {
564 ThreadContext *src_tc = threadContexts[tid];
565 TheISA::initCPU(src_tc, src_tc->contextId());
566 }
567 #endif
568
569 // Clear inSyscall.
570 for (ThreadID tid = 0; tid < numThreads; ++tid)
571 thread[tid]->inSyscall = false;
572
573 // Call Initializiation Routine for Resource Pool
574 resPool->init();
575 }
576
577 void
578 InOrderCPU::reset()
579 {
580 for (int i = 0; i < numThreads; i++) {
581 isa[i].reset(coreType, numThreads,
582 1/*numVirtProcs*/, dynamic_cast<BaseCPU*>(this));
583 }
584 }
585
586 Port*
587 InOrderCPU::getPort(const std::string &if_name, int idx)
588 {
589 return resPool->getPort(if_name, idx);
590 }
591
592 #if FULL_SYSTEM
593 Fault
594 InOrderCPU::hwrei(ThreadID tid)
595 {
596 panic("hwrei: Unimplemented");
597
598 return NoFault;
599 }
600
601
602 bool
603 InOrderCPU::simPalCheck(int palFunc, ThreadID tid)
604 {
605 panic("simPalCheck: Unimplemented");
606
607 return true;
608 }
609
610
611 Fault
612 InOrderCPU::getInterrupts()
613 {
614 // Check if there are any outstanding interrupts
615 return this->interrupts->getInterrupt(this->threadContexts[0]);
616 }
617
618
619 void
620 InOrderCPU::processInterrupts(Fault interrupt)
621 {
622 // Check for interrupts here. For now can copy the code that
623 // exists within isa_fullsys_traits.hh. Also assume that thread 0
624 // is the one that handles the interrupts.
625 // @todo: Possibly consolidate the interrupt checking code.
626 // @todo: Allow other threads to handle interrupts.
627
628 assert(interrupt != NoFault);
629 this->interrupts->updateIntrInfo(this->threadContexts[0]);
630
631 DPRINTF(InOrderCPU, "Interrupt %s being handled\n", interrupt->name());
632 static StaticInstPtr dummyStatic(TheISA::NoopMachInst, 0);
633 static DynInstPtr dummyDyn = new Impl::DynInst(dummyStatic);
634 this->trap(interrupt, dummyDyn);
635 }
636
637
638 void
639 InOrderCPU::updateMemPorts()
640 {
641 // Update all ThreadContext's memory ports (Functional/Virtual
642 // Ports)
643 ThreadID size = thread.size();
644 for (ThreadID i = 0; i < size; ++i)
645 thread[i]->connectMemPorts(thread[i]->getTC());
646 }
647 #endif
648
649 void
650 InOrderCPU::trap(Fault fault, ThreadID tid, DynInstPtr inst, int delay)
651 {
652 //@ Squash Pipeline during TRAP
653 scheduleCpuEvent(Trap, fault, tid, inst, delay);
654 }
655
656 void
657 InOrderCPU::trapCPU(Fault fault, ThreadID tid, DynInstPtr inst)
658 {
659 fault->invoke(tcBase(tid), inst->staticInst);
660 }
661
662 void
663 InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay)
664 {
665 scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay);
666 }
667
668
669 void
670 InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num,
671 ThreadID tid)
672 {
673 DPRINTF(InOrderCPU, "Squashing Pipeline Stages Due to Memory Stall...\n");
674
675 // Squash all instructions in each stage including
676 // instruction that caused the squash (seq_num - 1)
677 // NOTE: The stage bandwidth needs to be cleared so thats why
678 // the stalling instruction is squashed as well. The stalled
679 // instruction is previously placed in another intermediate buffer
680 // while it's stall is being handled.
681 InstSeqNum squash_seq_num = seq_num - 1;
682
683 for (int stNum=stage_num; stNum >= 0 ; stNum--) {
684 pipelineStage[stNum]->squashDueToMemStall(squash_seq_num, tid);
685 }
686 }
687
688 void
689 InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
690 ThreadID tid, DynInstPtr inst,
691 unsigned delay, unsigned event_pri_offset)
692 {
693 CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
694 event_pri_offset);
695
696 Tick sked_tick = nextCycle(curTick + ticks(delay));
697 if (delay >= 0) {
698 DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
699 eventNames[c_event], curTick + delay, tid);
700 schedule(cpu_event, sked_tick);
701 } else {
702 cpu_event->process();
703 cpuEventRemoveList.push(cpu_event);
704 }
705
706 // Broadcast event to the Resource Pool
707 // Need to reset tid just in case this is a dummy instruction
708 inst->setTid(tid);
709 resPool->scheduleEvent(c_event, inst, 0, 0, tid);
710 }
711
712 bool
713 InOrderCPU::isThreadActive(ThreadID tid)
714 {
715 list<ThreadID>::iterator isActive =
716 std::find(activeThreads.begin(), activeThreads.end(), tid);
717
718 return (isActive != activeThreads.end());
719 }
720
721 bool
722 InOrderCPU::isThreadReady(ThreadID tid)
723 {
724 list<ThreadID>::iterator isReady =
725 std::find(readyThreads.begin(), readyThreads.end(), tid);
726
727 return (isReady != readyThreads.end());
728 }
729
730 bool
731 InOrderCPU::isThreadSuspended(ThreadID tid)
732 {
733 list<ThreadID>::iterator isSuspended =
734 std::find(suspendedThreads.begin(), suspendedThreads.end(), tid);
735
736 return (isSuspended != suspendedThreads.end());
737 }
738
739 void
740 InOrderCPU::activateNextReadyThread()
741 {
742 if (readyThreads.size() >= 1) {
743 ThreadID ready_tid = readyThreads.front();
744
745 // Activate in Pipeline
746 activateThread(ready_tid);
747
748 // Activate in Resource Pool
749 resPool->activateAll(ready_tid);
750
751 list<ThreadID>::iterator ready_it =
752 std::find(readyThreads.begin(), readyThreads.end(), ready_tid);
753 readyThreads.erase(ready_it);
754 } else {
755 DPRINTF(InOrderCPU,
756 "Attempting to activate new thread, but No Ready Threads to"
757 "activate.\n");
758 DPRINTF(InOrderCPU,
759 "Unable to switch to next active thread.\n");
760 }
761 }
762
763 void
764 InOrderCPU::activateThread(ThreadID tid)
765 {
766 if (isThreadSuspended(tid)) {
767 DPRINTF(InOrderCPU,
768 "Removing [tid:%i] from suspended threads list.\n", tid);
769
770 list<ThreadID>::iterator susp_it =
771 std::find(suspendedThreads.begin(), suspendedThreads.end(),
772 tid);
773 suspendedThreads.erase(susp_it);
774 }
775
776 if (threadModel == SwitchOnCacheMiss &&
777 numActiveThreads() == 1) {
778 DPRINTF(InOrderCPU,
779 "Ignoring activation of [tid:%i], since [tid:%i] is "
780 "already running.\n", tid, activeThreadId());
781
782 DPRINTF(InOrderCPU,"Placing [tid:%i] on ready threads list\n",
783 tid);
784
785 readyThreads.push_back(tid);
786
787 } else if (!isThreadActive(tid)) {
788 DPRINTF(InOrderCPU,
789 "Adding [tid:%i] to active threads list.\n", tid);
790 activeThreads.push_back(tid);
791
792 activateThreadInPipeline(tid);
793
794 thread[tid]->lastActivate = curTick;
795
796 tcBase(tid)->setStatus(ThreadContext::Active);
797
798 wakeCPU();
799
800 numCtxtSwitches++;
801 }
802 }
803
804 void
805 InOrderCPU::activateThreadInPipeline(ThreadID tid)
806 {
807 for (int stNum=0; stNum < NumStages; stNum++) {
808 pipelineStage[stNum]->activateThread(tid);
809 }
810 }
811
812 void
813 InOrderCPU::deactivateContext(ThreadID tid, int delay)
814 {
815 DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid);
816
817 scheduleCpuEvent(DeactivateThread, NoFault, tid, dummyInst[tid], delay);
818
819 // Be sure to signal that there's some activity so the CPU doesn't
820 // deschedule itself.
821 activityRec.activity();
822
823 _status = Running;
824 }
825
826 void
827 InOrderCPU::deactivateThread(ThreadID tid)
828 {
829 DPRINTF(InOrderCPU, "[tid:%i]: Calling deactivate thread.\n", tid);
830
831 if (isThreadActive(tid)) {
832 DPRINTF(InOrderCPU,"[tid:%i]: Removing from active threads list\n",
833 tid);
834 list<ThreadID>::iterator thread_it =
835 std::find(activeThreads.begin(), activeThreads.end(), tid);
836
837 removePipelineStalls(*thread_it);
838
839 activeThreads.erase(thread_it);
840
841 // Ideally, this should be triggered from the
842 // suspendContext/Thread functions
843 tcBase(tid)->setStatus(ThreadContext::Suspended);
844 }
845
846 assert(!isThreadActive(tid));
847 }
848
849 void
850 InOrderCPU::removePipelineStalls(ThreadID tid)
851 {
852 DPRINTF(InOrderCPU,"[tid:%i]: Removing all pipeline stalls\n",
853 tid);
854
855 for (int stNum = 0; stNum < NumStages ; stNum++) {
856 pipelineStage[stNum]->removeStalls(tid);
857 }
858
859 }
860
861 void
862 InOrderCPU::updateThreadPriority()
863 {
864 if (activeThreads.size() > 1)
865 {
866 //DEFAULT TO ROUND ROBIN SCHEME
867 //e.g. Move highest priority to end of thread list
868 list<ThreadID>::iterator list_begin = activeThreads.begin();
869 list<ThreadID>::iterator list_end = activeThreads.end();
870
871 unsigned high_thread = *list_begin;
872
873 activeThreads.erase(list_begin);
874
875 activeThreads.push_back(high_thread);
876 }
877 }
878
879 inline void
880 InOrderCPU::tickThreadStats()
881 {
882 /** Keep track of cycles that each thread is active */
883 list<ThreadID>::iterator thread_it = activeThreads.begin();
884 while (thread_it != activeThreads.end()) {
885 threadCycles[*thread_it]++;
886 thread_it++;
887 }
888
889 // Keep track of cycles where SMT is active
890 if (activeThreads.size() > 1) {
891 smtCycles++;
892 }
893 }
894
895 void
896 InOrderCPU::activateContext(ThreadID tid, int delay)
897 {
898 DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
899
900
901 scheduleCpuEvent(ActivateThread, NoFault, tid, dummyInst[tid], delay);
902
903 // Be sure to signal that there's some activity so the CPU doesn't
904 // deschedule itself.
905 activityRec.activity();
906
907 _status = Running;
908 }
909
910 void
911 InOrderCPU::activateNextReadyContext(int delay)
912 {
913 DPRINTF(InOrderCPU,"Activating next ready thread\n");
914
915 // NOTE: Add 5 to the event priority so that we always activate
916 // threads after we've finished deactivating, squashing,etc.
917 // other threads
918 scheduleCpuEvent(ActivateNextReadyThread, NoFault, 0/*tid*/, dummyInst[0],
919 delay, 5);
920
921 // Be sure to signal that there's some activity so the CPU doesn't
922 // deschedule itself.
923 activityRec.activity();
924
925 _status = Running;
926 }
927
928 void
929 InOrderCPU::haltContext(ThreadID tid, int delay)
930 {
931 DPRINTF(InOrderCPU, "[tid:%i]: Calling Halt Context...\n", tid);
932
933 scheduleCpuEvent(HaltThread, NoFault, tid, dummyInst[tid], delay);
934
935 activityRec.activity();
936 }
937
938 void
939 InOrderCPU::haltThread(ThreadID tid)
940 {
941 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Halted Threads List...\n", tid);
942 deactivateThread(tid);
943 squashThreadInPipeline(tid);
944 haltedThreads.push_back(tid);
945
946 tcBase(tid)->setStatus(ThreadContext::Halted);
947
948 if (threadModel == SwitchOnCacheMiss) {
949 activateNextReadyContext();
950 }
951 }
952
953 void
954 InOrderCPU::suspendContext(ThreadID tid, int delay)
955 {
956 scheduleCpuEvent(SuspendThread, NoFault, tid, dummyInst[tid], delay);
957 }
958
959 void
960 InOrderCPU::suspendThread(ThreadID tid)
961 {
962 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Suspended Threads List...\n",
963 tid);
964 deactivateThread(tid);
965 suspendedThreads.push_back(tid);
966 thread[tid]->lastSuspend = curTick;
967
968 tcBase(tid)->setStatus(ThreadContext::Suspended);
969 }
970
971 void
972 InOrderCPU::squashThreadInPipeline(ThreadID tid)
973 {
974 //Squash all instructions in each stage
975 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
976 pipelineStage[stNum]->squash(0 /*seq_num*/, tid);
977 }
978 }
979
980 PipelineStage*
981 InOrderCPU::getPipeStage(int stage_num)
982 {
983 return pipelineStage[stage_num];
984 }
985
986 uint64_t
987 InOrderCPU::readIntReg(int reg_idx, ThreadID tid)
988 {
989 return intRegs[tid][reg_idx];
990 }
991
992 FloatReg
993 InOrderCPU::readFloatReg(int reg_idx, ThreadID tid)
994 {
995 return floatRegs.f[tid][reg_idx];
996 }
997
998 FloatRegBits
999 InOrderCPU::readFloatRegBits(int reg_idx, ThreadID tid)
1000 {;
1001 return floatRegs.i[tid][reg_idx];
1002 }
1003
1004 void
1005 InOrderCPU::setIntReg(int reg_idx, uint64_t val, ThreadID tid)
1006 {
1007 intRegs[tid][reg_idx] = val;
1008 }
1009
1010
1011 void
1012 InOrderCPU::setFloatReg(int reg_idx, FloatReg val, ThreadID tid)
1013 {
1014 floatRegs.f[tid][reg_idx] = val;
1015 }
1016
1017
1018 void
1019 InOrderCPU::setFloatRegBits(int reg_idx, FloatRegBits val, ThreadID tid)
1020 {
1021 floatRegs.i[tid][reg_idx] = val;
1022 }
1023
1024 uint64_t
1025 InOrderCPU::readRegOtherThread(unsigned reg_idx, ThreadID tid)
1026 {
1027 // If Default value is set, then retrieve target thread
1028 if (tid == InvalidThreadID) {
1029 tid = TheISA::getTargetThread(tcBase(tid));
1030 }
1031
1032 if (reg_idx < FP_Base_DepTag) {
1033 // Integer Register File
1034 return readIntReg(reg_idx, tid);
1035 } else if (reg_idx < Ctrl_Base_DepTag) {
1036 // Float Register File
1037 reg_idx -= FP_Base_DepTag;
1038 return readFloatRegBits(reg_idx, tid);
1039 } else {
1040 reg_idx -= Ctrl_Base_DepTag;
1041 return readMiscReg(reg_idx, tid); // Misc. Register File
1042 }
1043 }
1044 void
1045 InOrderCPU::setRegOtherThread(unsigned reg_idx, const MiscReg &val,
1046 ThreadID tid)
1047 {
1048 // If Default value is set, then retrieve target thread
1049 if (tid == InvalidThreadID) {
1050 tid = TheISA::getTargetThread(tcBase(tid));
1051 }
1052
1053 if (reg_idx < FP_Base_DepTag) { // Integer Register File
1054 setIntReg(reg_idx, val, tid);
1055 } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
1056 reg_idx -= FP_Base_DepTag;
1057 setFloatRegBits(reg_idx, val, tid);
1058 } else {
1059 reg_idx -= Ctrl_Base_DepTag;
1060 setMiscReg(reg_idx, val, tid); // Misc. Register File
1061 }
1062 }
1063
1064 MiscReg
1065 InOrderCPU::readMiscRegNoEffect(int misc_reg, ThreadID tid)
1066 {
1067 return isa[tid].readMiscRegNoEffect(misc_reg);
1068 }
1069
1070 MiscReg
1071 InOrderCPU::readMiscReg(int misc_reg, ThreadID tid)
1072 {
1073 return isa[tid].readMiscReg(misc_reg, tcBase(tid));
1074 }
1075
1076 void
1077 InOrderCPU::setMiscRegNoEffect(int misc_reg, const MiscReg &val, ThreadID tid)
1078 {
1079 isa[tid].setMiscRegNoEffect(misc_reg, val);
1080 }
1081
1082 void
1083 InOrderCPU::setMiscReg(int misc_reg, const MiscReg &val, ThreadID tid)
1084 {
1085 isa[tid].setMiscReg(misc_reg, val, tcBase(tid));
1086 }
1087
1088
1089 InOrderCPU::ListIt
1090 InOrderCPU::addInst(DynInstPtr &inst)
1091 {
1092 ThreadID tid = inst->readTid();
1093
1094 instList[tid].push_back(inst);
1095
1096 return --(instList[tid].end());
1097 }
1098
1099 void
1100 InOrderCPU::updateContextSwitchStats()
1101 {
1102 // Set Average Stat Here, then reset to 0
1103 instsPerCtxtSwitch = instsPerSwitch;
1104 instsPerSwitch = 0;
1105 }
1106
1107
1108 void
1109 InOrderCPU::instDone(DynInstPtr inst, ThreadID tid)
1110 {
1111 // Set the CPU's PCs - This contributes to the precise state of the CPU
1112 // which can be used when restoring a thread to the CPU after after any
1113 // type of context switching activity (fork, exception, etc.)
1114 pcState(inst->pcState(), tid);
1115
1116 if (inst->isControl()) {
1117 thread[tid]->lastGradIsBranch = true;
1118 thread[tid]->lastBranchPC = inst->pcState();
1119 TheISA::advancePC(thread[tid]->lastBranchPC, inst->staticInst);
1120 } else {
1121 thread[tid]->lastGradIsBranch = false;
1122 }
1123
1124
1125 // Finalize Trace Data For Instruction
1126 if (inst->traceData) {
1127 //inst->traceData->setCycle(curTick);
1128 inst->traceData->setFetchSeq(inst->seqNum);
1129 //inst->traceData->setCPSeq(cpu->tcBase(tid)->numInst);
1130 inst->traceData->dump();
1131 delete inst->traceData;
1132 inst->traceData = NULL;
1133 }
1134
1135 // Increment active thread's instruction count
1136 instsPerSwitch++;
1137
1138 // Increment thread-state's instruction count
1139 thread[tid]->numInst++;
1140
1141 // Increment thread-state's instruction stats
1142 thread[tid]->numInsts++;
1143
1144 // Count committed insts per thread stats
1145 committedInsts[tid]++;
1146
1147 // Count total insts committed stat
1148 totalCommittedInsts++;
1149
1150 // Count SMT-committed insts per thread stat
1151 if (numActiveThreads() > 1) {
1152 smtCommittedInsts[tid]++;
1153 }
1154
1155 // Instruction-Mix Stats
1156 if (inst->isLoad()) {
1157 comLoads++;
1158 } else if (inst->isStore()) {
1159 comStores++;
1160 } else if (inst->isControl()) {
1161 comBranches++;
1162 } else if (inst->isNop()) {
1163 comNops++;
1164 } else if (inst->isNonSpeculative()) {
1165 comNonSpec++;
1166 } else if (inst->isInteger()) {
1167 comInts++;
1168 } else if (inst->isFloating()) {
1169 comFloats++;
1170 }
1171
1172 // Check for instruction-count-based events.
1173 comInstEventQueue[tid]->serviceEvents(thread[tid]->numInst);
1174
1175 // Broadcast to other resources an instruction
1176 // has been completed
1177 resPool->scheduleEvent((CPUEventType)ResourcePool::InstGraduated, inst,
1178 0, 0, tid);
1179
1180 // Finally, remove instruction from CPU
1181 removeInst(inst);
1182 }
1183
1184 // currently unused function, but substitute repetitive code w/this function
1185 // call
1186 void
1187 InOrderCPU::addToRemoveList(DynInstPtr &inst)
1188 {
1189 removeInstsThisCycle = true;
1190 if (!inst->isRemoveList()) {
1191 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %s "
1192 "[sn:%lli] to remove list\n",
1193 inst->threadNumber, inst->pcState(), inst->seqNum);
1194 inst->setRemoveList();
1195 removeList.push(inst->getInstListIt());
1196 } else {
1197 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %s "
1198 "[sn:%lli], already remove list\n",
1199 inst->threadNumber, inst->pcState(), inst->seqNum);
1200 }
1201
1202 }
1203
1204 void
1205 InOrderCPU::removeInst(DynInstPtr &inst)
1206 {
1207 DPRINTF(InOrderCPU, "Removing graduated instruction [tid:%i] PC %s "
1208 "[sn:%lli]\n",
1209 inst->threadNumber, inst->pcState(), inst->seqNum);
1210
1211 removeInstsThisCycle = true;
1212
1213 // Remove the instruction.
1214 if (!inst->isRemoveList()) {
1215 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %s "
1216 "[sn:%lli] to remove list\n",
1217 inst->threadNumber, inst->pcState(), inst->seqNum);
1218 inst->setRemoveList();
1219 removeList.push(inst->getInstListIt());
1220 } else {
1221 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %s "
1222 "[sn:%lli], already on remove list\n",
1223 inst->threadNumber, inst->pcState(), inst->seqNum);
1224 }
1225
1226 }
1227
1228 void
1229 InOrderCPU::removeInstsUntil(const InstSeqNum &seq_num, ThreadID tid)
1230 {
1231 //assert(!instList[tid].empty());
1232
1233 removeInstsThisCycle = true;
1234
1235 ListIt inst_iter = instList[tid].end();
1236
1237 inst_iter--;
1238
1239 DPRINTF(InOrderCPU, "Squashing instructions from CPU instruction "
1240 "list that are from [tid:%i] and above [sn:%lli] (end=%lli).\n",
1241 tid, seq_num, (*inst_iter)->seqNum);
1242
1243 while ((*inst_iter)->seqNum > seq_num) {
1244
1245 bool break_loop = (inst_iter == instList[tid].begin());
1246
1247 squashInstIt(inst_iter, tid);
1248
1249 inst_iter--;
1250
1251 if (break_loop)
1252 break;
1253 }
1254 }
1255
1256
1257 inline void
1258 InOrderCPU::squashInstIt(const ListIt &instIt, ThreadID tid)
1259 {
1260 if ((*instIt)->threadNumber == tid) {
1261 DPRINTF(InOrderCPU, "Squashing instruction, "
1262 "[tid:%i] [sn:%lli] PC %s\n",
1263 (*instIt)->threadNumber,
1264 (*instIt)->seqNum,
1265 (*instIt)->pcState());
1266
1267 (*instIt)->setSquashed();
1268
1269 if (!(*instIt)->isRemoveList()) {
1270 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %s "
1271 "[sn:%lli] to remove list\n",
1272 (*instIt)->threadNumber, (*instIt)->pcState(),
1273 (*instIt)->seqNum);
1274 (*instIt)->setRemoveList();
1275 removeList.push(instIt);
1276 } else {
1277 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i]"
1278 " PC %s [sn:%lli], already on remove list\n",
1279 (*instIt)->threadNumber, (*instIt)->pcState(),
1280 (*instIt)->seqNum);
1281 }
1282
1283 }
1284
1285 }
1286
1287
1288 void
1289 InOrderCPU::cleanUpRemovedInsts()
1290 {
1291 while (!removeList.empty()) {
1292 DPRINTF(InOrderCPU, "Removing instruction, "
1293 "[tid:%i] [sn:%lli] PC %s\n",
1294 (*removeList.front())->threadNumber,
1295 (*removeList.front())->seqNum,
1296 (*removeList.front())->pcState());
1297
1298 DynInstPtr inst = *removeList.front();
1299 ThreadID tid = inst->threadNumber;
1300
1301 // Make Sure Resource Schedule Is Emptied Out
1302 ThePipeline::ResSchedule *inst_sched = &inst->resSched;
1303 while (!inst_sched->empty()) {
1304 ScheduleEntry* sch_entry = inst_sched->top();
1305 inst_sched->pop();
1306 delete sch_entry;
1307 }
1308
1309 // Remove From Register Dependency Map, If Necessary
1310 archRegDepMap[(*removeList.front())->threadNumber].
1311 remove((*removeList.front()));
1312
1313
1314 // Clear if Non-Speculative
1315 if (inst->staticInst &&
1316 inst->seqNum == nonSpecSeqNum[tid] &&
1317 nonSpecInstActive[tid] == true) {
1318 nonSpecInstActive[tid] = false;
1319 }
1320
1321 instList[tid].erase(removeList.front());
1322
1323 removeList.pop();
1324 }
1325
1326 removeInstsThisCycle = false;
1327 }
1328
1329 void
1330 InOrderCPU::cleanUpRemovedReqs()
1331 {
1332 while (!reqRemoveList.empty()) {
1333 ResourceRequest *res_req = reqRemoveList.front();
1334
1335 DPRINTF(RefCount, "[tid:%i] [sn:%lli]: Removing Request "
1336 "[stage_num:%i] [res:%s] [slot:%i] [completed:%i].\n",
1337 res_req->inst->threadNumber,
1338 res_req->inst->seqNum,
1339 res_req->getStageNum(),
1340 res_req->res->name(),
1341 (res_req->isCompleted()) ?
1342 res_req->getComplSlot() : res_req->getSlot(),
1343 res_req->isCompleted());
1344
1345 reqRemoveList.pop();
1346
1347 delete res_req;
1348 }
1349 }
1350
1351 void
1352 InOrderCPU::cleanUpRemovedEvents()
1353 {
1354 while (!cpuEventRemoveList.empty()) {
1355 Event *cpu_event = cpuEventRemoveList.front();
1356 cpuEventRemoveList.pop();
1357 delete cpu_event;
1358 }
1359 }
1360
1361
1362 void
1363 InOrderCPU::dumpInsts()
1364 {
1365 int num = 0;
1366
1367 ListIt inst_list_it = instList[0].begin();
1368
1369 cprintf("Dumping Instruction List\n");
1370
1371 while (inst_list_it != instList[0].end()) {
1372 cprintf("Instruction:%i\nPC:%s\n[tid:%i]\n[sn:%lli]\nIssued:%i\n"
1373 "Squashed:%i\n\n",
1374 num, (*inst_list_it)->pcState(),
1375 (*inst_list_it)->threadNumber,
1376 (*inst_list_it)->seqNum, (*inst_list_it)->isIssued(),
1377 (*inst_list_it)->isSquashed());
1378 inst_list_it++;
1379 ++num;
1380 }
1381 }
1382
1383 void
1384 InOrderCPU::wakeCPU()
1385 {
1386 if (/*activityRec.active() || */tickEvent.scheduled()) {
1387 DPRINTF(Activity, "CPU already running.\n");
1388 return;
1389 }
1390
1391 DPRINTF(Activity, "Waking up CPU\n");
1392
1393 Tick extra_cycles = tickToCycles((curTick - 1) - lastRunningCycle);
1394
1395 idleCycles += extra_cycles;
1396 for (int stage_num = 0; stage_num < NumStages; stage_num++) {
1397 pipelineStage[stage_num]->idleCycles += extra_cycles;
1398 }
1399
1400 numCycles += extra_cycles;
1401
1402 schedule(&tickEvent, nextCycle(curTick));
1403 }
1404
1405 #if FULL_SYSTEM
1406
1407 void
1408 InOrderCPU::wakeup()
1409 {
1410 if (this->thread[0]->status() != ThreadContext::Suspended)
1411 return;
1412
1413 this->wakeCPU();
1414
1415 DPRINTF(Quiesce, "Suspended Processor woken\n");
1416 this->threadContexts[0]->activate();
1417 }
1418 #endif
1419
1420 #if !FULL_SYSTEM
1421 void
1422 InOrderCPU::syscall(int64_t callnum, ThreadID tid)
1423 {
1424 DPRINTF(InOrderCPU, "[tid:%i] Executing syscall().\n\n", tid);
1425
1426 DPRINTF(Activity,"Activity: syscall() called.\n");
1427
1428 // Temporarily increase this by one to account for the syscall
1429 // instruction.
1430 ++(this->thread[tid]->funcExeInst);
1431
1432 // Execute the actual syscall.
1433 this->thread[tid]->syscall(callnum);
1434
1435 // Decrease funcExeInst by one as the normal commit will handle
1436 // incrementing it.
1437 --(this->thread[tid]->funcExeInst);
1438
1439 // Clear Non-Speculative Block Variable
1440 nonSpecInstActive[tid] = false;
1441 }
1442 #endif
1443
1444 TheISA::TLB*
1445 InOrderCPU::getITBPtr()
1446 {
1447 CacheUnit *itb_res =
1448 dynamic_cast<CacheUnit*>(resPool->getResource(fetchPortIdx));
1449 return itb_res->tlb();
1450 }
1451
1452
1453 TheISA::TLB*
1454 InOrderCPU::getDTBPtr()
1455 {
1456 CacheUnit *dtb_res =
1457 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1458 return dtb_res->tlb();
1459 }
1460
1461 Fault
1462 InOrderCPU::read(DynInstPtr inst, Addr addr,
1463 uint8_t *data, unsigned size, unsigned flags)
1464 {
1465 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1466 // you want to run w/out caches?
1467 CacheUnit *cache_res =
1468 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1469
1470 return cache_res->read(inst, addr, data, size, flags);
1471 }
1472
1473 Fault
1474 InOrderCPU::write(DynInstPtr inst, uint8_t *data, unsigned size,
1475 Addr addr, unsigned flags, uint64_t *write_res)
1476 {
1477 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1478 // you want to run w/out caches?
1479 CacheUnit *cache_res =
1480 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1481 return cache_res->write(inst, data, size, addr, flags, write_res);
1482 }