inorder: stage width as a python parameter
[gem5.git] / src / cpu / inorder / cpu.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <algorithm>
33
34 #include "arch/utility.hh"
35 #include "config/full_system.hh"
36 #include "config/the_isa.hh"
37 #include "cpu/activity.hh"
38 #include "cpu/base.hh"
39 #include "cpu/exetrace.hh"
40 #include "cpu/inorder/cpu.hh"
41 #include "cpu/inorder/first_stage.hh"
42 #include "cpu/inorder/inorder_dyn_inst.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "cpu/inorder/resources/resource_list.hh"
46 #include "cpu/inorder/thread_context.hh"
47 #include "cpu/inorder/thread_state.hh"
48 #include "cpu/simple_thread.hh"
49 #include "cpu/thread_context.hh"
50 #include "mem/translating_port.hh"
51 #include "params/InOrderCPU.hh"
52 #include "sim/process.hh"
53 #include "sim/stat_control.hh"
54
55 #if FULL_SYSTEM
56 #include "cpu/quiesce_event.hh"
57 #include "sim/system.hh"
58 #endif
59
60 #if THE_ISA == ALPHA_ISA
61 #include "arch/alpha/osfpal.hh"
62 #endif
63
64 using namespace std;
65 using namespace TheISA;
66 using namespace ThePipeline;
67
68 InOrderCPU::TickEvent::TickEvent(InOrderCPU *c)
69 : Event(CPU_Tick_Pri), cpu(c)
70 { }
71
72
73 void
74 InOrderCPU::TickEvent::process()
75 {
76 cpu->tick();
77 }
78
79
80 const char *
81 InOrderCPU::TickEvent::description()
82 {
83 return "InOrderCPU tick event";
84 }
85
86 InOrderCPU::CPUEvent::CPUEvent(InOrderCPU *_cpu, CPUEventType e_type,
87 Fault fault, ThreadID _tid, DynInstPtr inst,
88 unsigned event_pri_offset)
89 : Event(Event::Priority((unsigned int)CPU_Tick_Pri + event_pri_offset)),
90 cpu(_cpu)
91 {
92 setEvent(e_type, fault, _tid, inst);
93 }
94
95
96 std::string InOrderCPU::eventNames[NumCPUEvents] =
97 {
98 "ActivateThread",
99 "ActivateNextReadyThread",
100 "DeactivateThread",
101 "HaltThread",
102 "SuspendThread",
103 "Trap",
104 "InstGraduated",
105 "SquashFromMemStall",
106 "UpdatePCs"
107 };
108
109 void
110 InOrderCPU::CPUEvent::process()
111 {
112 switch (cpuEventType)
113 {
114 case ActivateThread:
115 cpu->activateThread(tid);
116 break;
117
118 case ActivateNextReadyThread:
119 cpu->activateNextReadyThread();
120 break;
121
122 case DeactivateThread:
123 cpu->deactivateThread(tid);
124 break;
125
126 case HaltThread:
127 cpu->haltThread(tid);
128 break;
129
130 case SuspendThread:
131 cpu->suspendThread(tid);
132 break;
133
134 case SquashFromMemStall:
135 cpu->squashDueToMemStall(inst->squashingStage, inst->seqNum, tid);
136 break;
137
138 case Trap:
139 cpu->trapCPU(fault, tid, inst);
140 break;
141
142 default:
143 fatal("Unrecognized Event Type %s", eventNames[cpuEventType]);
144 }
145
146 cpu->cpuEventRemoveList.push(this);
147 }
148
149
150
151 const char *
152 InOrderCPU::CPUEvent::description()
153 {
154 return "InOrderCPU event";
155 }
156
157 void
158 InOrderCPU::CPUEvent::scheduleEvent(int delay)
159 {
160 assert(!scheduled() || squashed());
161 cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true);
162 }
163
164 void
165 InOrderCPU::CPUEvent::unscheduleEvent()
166 {
167 if (scheduled())
168 squash();
169 }
170
171 InOrderCPU::InOrderCPU(Params *params)
172 : BaseCPU(params),
173 cpu_id(params->cpu_id),
174 coreType("default"),
175 _status(Idle),
176 tickEvent(this),
177 stageWidth(params->stageWidth),
178 timeBuffer(2 , 2),
179 removeInstsThisCycle(false),
180 activityRec(params->name, NumStages, 10, params->activity),
181 #if FULL_SYSTEM
182 system(params->system),
183 physmem(system->physmem),
184 #endif // FULL_SYSTEM
185 #ifdef DEBUG
186 cpuEventNum(0),
187 resReqCount(0),
188 #endif // DEBUG
189 switchCount(0),
190 deferRegistration(false/*params->deferRegistration*/),
191 stageTracing(params->stageTracing),
192 instsPerSwitch(0)
193 {
194 ThreadID active_threads;
195 cpu_params = params;
196
197 resPool = new ResourcePool(this, params);
198
199 // Resize for Multithreading CPUs
200 thread.resize(numThreads);
201
202 #if FULL_SYSTEM
203 active_threads = 1;
204 #else
205 active_threads = params->workload.size();
206
207 if (active_threads > MaxThreads) {
208 panic("Workload Size too large. Increase the 'MaxThreads'"
209 "in your InOrder implementation or "
210 "edit your workload size.");
211 }
212
213
214 if (active_threads > 1) {
215 threadModel = (InOrderCPU::ThreadModel) params->threadModel;
216
217 if (threadModel == SMT) {
218 DPRINTF(InOrderCPU, "Setting Thread Model to SMT.\n");
219 } else if (threadModel == SwitchOnCacheMiss) {
220 DPRINTF(InOrderCPU, "Setting Thread Model to "
221 "Switch On Cache Miss\n");
222 }
223
224 } else {
225 threadModel = Single;
226 }
227
228
229
230 #endif
231
232 // Bind the fetch & data ports from the resource pool.
233 fetchPortIdx = resPool->getPortIdx(params->fetchMemPort);
234 if (fetchPortIdx == 0) {
235 fatal("Unable to find port to fetch instructions from.\n");
236 }
237
238 dataPortIdx = resPool->getPortIdx(params->dataMemPort);
239 if (dataPortIdx == 0) {
240 fatal("Unable to find port for data.\n");
241 }
242
243 for (ThreadID tid = 0; tid < numThreads; ++tid) {
244 #if FULL_SYSTEM
245 // SMT is not supported in FS mode yet.
246 assert(numThreads == 1);
247 thread[tid] = new Thread(this, 0);
248 #else
249 if (tid < (ThreadID)params->workload.size()) {
250 DPRINTF(InOrderCPU, "Workload[%i] process is %#x\n",
251 tid, params->workload[tid]->prog_fname);
252 thread[tid] =
253 new Thread(this, tid, params->workload[tid]);
254 } else {
255 //Allocate Empty thread so M5 can use later
256 //when scheduling threads to CPU
257 Process* dummy_proc = params->workload[0];
258 thread[tid] = new Thread(this, tid, dummy_proc);
259 }
260
261 // Eventually set this with parameters...
262 asid[tid] = tid;
263 #endif
264
265 // Setup the TC that will serve as the interface to the threads/CPU.
266 InOrderThreadContext *tc = new InOrderThreadContext;
267 tc->cpu = this;
268 tc->thread = thread[tid];
269
270 // Give the thread the TC.
271 thread[tid]->tc = tc;
272 thread[tid]->setFuncExeInst(0);
273 globalSeqNum[tid] = 1;
274
275 // Add the TC to the CPU's list of TC's.
276 this->threadContexts.push_back(tc);
277 }
278
279 // Initialize TimeBuffer Stage Queues
280 for (int stNum=0; stNum < NumStages - 1; stNum++) {
281 stageQueue[stNum] = new StageQueue(NumStages, NumStages);
282 stageQueue[stNum]->id(stNum);
283 }
284
285
286 // Set Up Pipeline Stages
287 for (int stNum=0; stNum < NumStages; stNum++) {
288 if (stNum == 0)
289 pipelineStage[stNum] = new FirstStage(params, stNum);
290 else
291 pipelineStage[stNum] = new PipelineStage(params, stNum);
292
293 pipelineStage[stNum]->setCPU(this);
294 pipelineStage[stNum]->setActiveThreads(&activeThreads);
295 pipelineStage[stNum]->setTimeBuffer(&timeBuffer);
296
297 // Take Care of 1st/Nth stages
298 if (stNum > 0)
299 pipelineStage[stNum]->setPrevStageQueue(stageQueue[stNum - 1]);
300 if (stNum < NumStages - 1)
301 pipelineStage[stNum]->setNextStageQueue(stageQueue[stNum]);
302 }
303
304 // Initialize thread specific variables
305 for (ThreadID tid = 0; tid < numThreads; tid++) {
306 archRegDepMap[tid].setCPU(this);
307
308 nonSpecInstActive[tid] = false;
309 nonSpecSeqNum[tid] = 0;
310
311 squashSeqNum[tid] = MaxAddr;
312 lastSquashCycle[tid] = 0;
313
314 memset(intRegs[tid], 0, sizeof(intRegs[tid]));
315 memset(floatRegs.i[tid], 0, sizeof(floatRegs.i[tid]));
316 isa[tid].clear();
317
318 isa[tid].expandForMultithreading(numThreads, 1/*numVirtProcs*/);
319
320 // Define dummy instructions and resource requests to be used.
321 dummyInst[tid] = new InOrderDynInst(this,
322 thread[tid],
323 0,
324 tid,
325 asid[tid]);
326
327 dummyReq[tid] = new ResourceRequest(resPool->getResource(0),
328 dummyInst[tid],
329 0,
330 0,
331 0,
332 0);
333 }
334
335 dummyReqInst = new InOrderDynInst(this, NULL, 0, 0, 0);
336 dummyReqInst->setSquashed();
337
338 dummyBufferInst = new InOrderDynInst(this, NULL, 0, 0, 0);
339 dummyBufferInst->setSquashed();
340
341 lastRunningCycle = curTick();
342
343 // Reset CPU to reset state.
344 #if FULL_SYSTEM
345 Fault resetFault = new ResetFault();
346 resetFault->invoke(tcBase());
347 #else
348 reset();
349 #endif
350
351 dummyBufferInst->resetInstCount();
352
353 // Schedule First Tick Event, CPU will reschedule itself from here on out.
354 scheduleTickEvent(0);
355 }
356
357 InOrderCPU::~InOrderCPU()
358 {
359 delete resPool;
360 }
361
362
363 void
364 InOrderCPU::regStats()
365 {
366 /* Register the Resource Pool's stats here.*/
367 resPool->regStats();
368
369 /* Register for each Pipeline Stage */
370 for (int stage_num=0; stage_num < ThePipeline::NumStages; stage_num++) {
371 pipelineStage[stage_num]->regStats();
372 }
373
374 /* Register any of the InOrderCPU's stats here.*/
375 instsPerCtxtSwitch
376 .name(name() + ".instsPerContextSwitch")
377 .desc("Instructions Committed Per Context Switch")
378 .prereq(instsPerCtxtSwitch);
379
380 numCtxtSwitches
381 .name(name() + ".contextSwitches")
382 .desc("Number of context switches");
383
384 comLoads
385 .name(name() + ".comLoads")
386 .desc("Number of Load instructions committed");
387
388 comStores
389 .name(name() + ".comStores")
390 .desc("Number of Store instructions committed");
391
392 comBranches
393 .name(name() + ".comBranches")
394 .desc("Number of Branches instructions committed");
395
396 comNops
397 .name(name() + ".comNops")
398 .desc("Number of Nop instructions committed");
399
400 comNonSpec
401 .name(name() + ".comNonSpec")
402 .desc("Number of Non-Speculative instructions committed");
403
404 comInts
405 .name(name() + ".comInts")
406 .desc("Number of Integer instructions committed");
407
408 comFloats
409 .name(name() + ".comFloats")
410 .desc("Number of Floating Point instructions committed");
411
412 timesIdled
413 .name(name() + ".timesIdled")
414 .desc("Number of times that the entire CPU went into an idle state and"
415 " unscheduled itself")
416 .prereq(timesIdled);
417
418 idleCycles
419 .name(name() + ".idleCycles")
420 .desc("Number of cycles cpu's stages were not processed");
421
422 runCycles
423 .name(name() + ".runCycles")
424 .desc("Number of cycles cpu stages are processed.");
425
426 activity
427 .name(name() + ".activity")
428 .desc("Percentage of cycles cpu is active")
429 .precision(6);
430 activity = (runCycles / numCycles) * 100;
431
432 threadCycles
433 .init(numThreads)
434 .name(name() + ".threadCycles")
435 .desc("Total Number of Cycles A Thread Was Active in CPU (Per-Thread)");
436
437 smtCycles
438 .name(name() + ".smtCycles")
439 .desc("Total number of cycles that the CPU was in SMT-mode");
440
441 committedInsts
442 .init(numThreads)
443 .name(name() + ".committedInsts")
444 .desc("Number of Instructions Simulated (Per-Thread)");
445
446 smtCommittedInsts
447 .init(numThreads)
448 .name(name() + ".smtCommittedInsts")
449 .desc("Number of SMT Instructions Simulated (Per-Thread)");
450
451 totalCommittedInsts
452 .name(name() + ".committedInsts_total")
453 .desc("Number of Instructions Simulated (Total)");
454
455 cpi
456 .name(name() + ".cpi")
457 .desc("CPI: Cycles Per Instruction (Per-Thread)")
458 .precision(6);
459 cpi = numCycles / committedInsts;
460
461 smtCpi
462 .name(name() + ".smt_cpi")
463 .desc("CPI: Total SMT-CPI")
464 .precision(6);
465 smtCpi = smtCycles / smtCommittedInsts;
466
467 totalCpi
468 .name(name() + ".cpi_total")
469 .desc("CPI: Total CPI of All Threads")
470 .precision(6);
471 totalCpi = numCycles / totalCommittedInsts;
472
473 ipc
474 .name(name() + ".ipc")
475 .desc("IPC: Instructions Per Cycle (Per-Thread)")
476 .precision(6);
477 ipc = committedInsts / numCycles;
478
479 smtIpc
480 .name(name() + ".smt_ipc")
481 .desc("IPC: Total SMT-IPC")
482 .precision(6);
483 smtIpc = smtCommittedInsts / smtCycles;
484
485 totalIpc
486 .name(name() + ".ipc_total")
487 .desc("IPC: Total IPC of All Threads")
488 .precision(6);
489 totalIpc = totalCommittedInsts / numCycles;
490
491 BaseCPU::regStats();
492 }
493
494
495 void
496 InOrderCPU::tick()
497 {
498 DPRINTF(InOrderCPU, "\n\nInOrderCPU: Ticking main, InOrderCPU.\n");
499
500 ++numCycles;
501
502 bool pipes_idle = true;
503
504 //Tick each of the stages
505 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
506 pipelineStage[stNum]->tick();
507
508 pipes_idle = pipes_idle && pipelineStage[stNum]->idle;
509 }
510
511 if (pipes_idle)
512 idleCycles++;
513 else
514 runCycles++;
515
516 // Now advance the time buffers one tick
517 timeBuffer.advance();
518 for (int sqNum=0; sqNum < NumStages - 1; sqNum++) {
519 stageQueue[sqNum]->advance();
520 }
521 activityRec.advance();
522
523 // Any squashed requests, events, or insts then remove them now
524 cleanUpRemovedReqs();
525 cleanUpRemovedEvents();
526 cleanUpRemovedInsts();
527
528 // Re-schedule CPU for this cycle
529 if (!tickEvent.scheduled()) {
530 if (_status == SwitchedOut) {
531 // increment stat
532 lastRunningCycle = curTick();
533 } else if (!activityRec.active()) {
534 DPRINTF(InOrderCPU, "sleeping CPU.\n");
535 lastRunningCycle = curTick();
536 timesIdled++;
537 } else {
538 //Tick next_tick = curTick() + cycles(1);
539 //tickEvent.schedule(next_tick);
540 schedule(&tickEvent, nextCycle(curTick() + 1));
541 DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
542 nextCycle(curTick() + 1));
543 }
544 }
545
546 tickThreadStats();
547 updateThreadPriority();
548 }
549
550
551 void
552 InOrderCPU::init()
553 {
554 if (!deferRegistration) {
555 registerThreadContexts();
556 }
557
558 // Set inSyscall so that the CPU doesn't squash when initially
559 // setting up registers.
560 for (ThreadID tid = 0; tid < numThreads; ++tid)
561 thread[tid]->inSyscall = true;
562
563 #if FULL_SYSTEM
564 for (ThreadID tid = 0; tid < numThreads; tid++) {
565 ThreadContext *src_tc = threadContexts[tid];
566 TheISA::initCPU(src_tc, src_tc->contextId());
567 }
568 #endif
569
570 // Clear inSyscall.
571 for (ThreadID tid = 0; tid < numThreads; ++tid)
572 thread[tid]->inSyscall = false;
573
574 // Call Initializiation Routine for Resource Pool
575 resPool->init();
576 }
577
578 void
579 InOrderCPU::reset()
580 {
581 for (int i = 0; i < numThreads; i++) {
582 isa[i].reset(coreType, numThreads,
583 1/*numVirtProcs*/, dynamic_cast<BaseCPU*>(this));
584 }
585 }
586
587 Port*
588 InOrderCPU::getPort(const std::string &if_name, int idx)
589 {
590 return resPool->getPort(if_name, idx);
591 }
592
593 #if FULL_SYSTEM
594 Fault
595 InOrderCPU::hwrei(ThreadID tid)
596 {
597 panic("hwrei: Unimplemented");
598
599 return NoFault;
600 }
601
602
603 bool
604 InOrderCPU::simPalCheck(int palFunc, ThreadID tid)
605 {
606 panic("simPalCheck: Unimplemented");
607
608 return true;
609 }
610
611
612 Fault
613 InOrderCPU::getInterrupts()
614 {
615 // Check if there are any outstanding interrupts
616 return interrupts->getInterrupt(threadContexts[0]);
617 }
618
619
620 void
621 InOrderCPU::processInterrupts(Fault interrupt)
622 {
623 // Check for interrupts here. For now can copy the code that
624 // exists within isa_fullsys_traits.hh. Also assume that thread 0
625 // is the one that handles the interrupts.
626 // @todo: Possibly consolidate the interrupt checking code.
627 // @todo: Allow other threads to handle interrupts.
628
629 assert(interrupt != NoFault);
630 interrupts->updateIntrInfo(threadContexts[0]);
631
632 DPRINTF(InOrderCPU, "Interrupt %s being handled\n", interrupt->name());
633
634 // Note: Context ID ok here? Impl. of FS mode needs to revisit this
635 trap(interrupt, threadContexts[0]->contextId(), dummyBufferInst);
636 }
637
638
639 void
640 InOrderCPU::updateMemPorts()
641 {
642 // Update all ThreadContext's memory ports (Functional/Virtual
643 // Ports)
644 ThreadID size = thread.size();
645 for (ThreadID i = 0; i < size; ++i)
646 thread[i]->connectMemPorts(thread[i]->getTC());
647 }
648 #endif
649
650 void
651 InOrderCPU::trap(Fault fault, ThreadID tid, DynInstPtr inst, int delay)
652 {
653 //@ Squash Pipeline during TRAP
654 scheduleCpuEvent(Trap, fault, tid, inst, delay);
655 }
656
657 void
658 InOrderCPU::trapCPU(Fault fault, ThreadID tid, DynInstPtr inst)
659 {
660 fault->invoke(tcBase(tid), inst->staticInst);
661 }
662
663 void
664 InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay)
665 {
666 scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay);
667 }
668
669
670 void
671 InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num,
672 ThreadID tid)
673 {
674 DPRINTF(InOrderCPU, "Squashing Pipeline Stages Due to Memory Stall...\n");
675
676 // Squash all instructions in each stage including
677 // instruction that caused the squash (seq_num - 1)
678 // NOTE: The stage bandwidth needs to be cleared so thats why
679 // the stalling instruction is squashed as well. The stalled
680 // instruction is previously placed in another intermediate buffer
681 // while it's stall is being handled.
682 InstSeqNum squash_seq_num = seq_num - 1;
683
684 for (int stNum=stage_num; stNum >= 0 ; stNum--) {
685 pipelineStage[stNum]->squashDueToMemStall(squash_seq_num, tid);
686 }
687 }
688
689 void
690 InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
691 ThreadID tid, DynInstPtr inst,
692 unsigned delay, unsigned event_pri_offset)
693 {
694 CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
695 event_pri_offset);
696
697 Tick sked_tick = nextCycle(curTick() + ticks(delay));
698 if (delay >= 0) {
699 DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
700 eventNames[c_event], curTick() + delay, tid);
701 schedule(cpu_event, sked_tick);
702 } else {
703 cpu_event->process();
704 cpuEventRemoveList.push(cpu_event);
705 }
706
707 // Broadcast event to the Resource Pool
708 // Need to reset tid just in case this is a dummy instruction
709 inst->setTid(tid);
710 resPool->scheduleEvent(c_event, inst, 0, 0, tid);
711 }
712
713 bool
714 InOrderCPU::isThreadActive(ThreadID tid)
715 {
716 list<ThreadID>::iterator isActive =
717 std::find(activeThreads.begin(), activeThreads.end(), tid);
718
719 return (isActive != activeThreads.end());
720 }
721
722 bool
723 InOrderCPU::isThreadReady(ThreadID tid)
724 {
725 list<ThreadID>::iterator isReady =
726 std::find(readyThreads.begin(), readyThreads.end(), tid);
727
728 return (isReady != readyThreads.end());
729 }
730
731 bool
732 InOrderCPU::isThreadSuspended(ThreadID tid)
733 {
734 list<ThreadID>::iterator isSuspended =
735 std::find(suspendedThreads.begin(), suspendedThreads.end(), tid);
736
737 return (isSuspended != suspendedThreads.end());
738 }
739
740 void
741 InOrderCPU::activateNextReadyThread()
742 {
743 if (readyThreads.size() >= 1) {
744 ThreadID ready_tid = readyThreads.front();
745
746 // Activate in Pipeline
747 activateThread(ready_tid);
748
749 // Activate in Resource Pool
750 resPool->activateAll(ready_tid);
751
752 list<ThreadID>::iterator ready_it =
753 std::find(readyThreads.begin(), readyThreads.end(), ready_tid);
754 readyThreads.erase(ready_it);
755 } else {
756 DPRINTF(InOrderCPU,
757 "Attempting to activate new thread, but No Ready Threads to"
758 "activate.\n");
759 DPRINTF(InOrderCPU,
760 "Unable to switch to next active thread.\n");
761 }
762 }
763
764 void
765 InOrderCPU::activateThread(ThreadID tid)
766 {
767 if (isThreadSuspended(tid)) {
768 DPRINTF(InOrderCPU,
769 "Removing [tid:%i] from suspended threads list.\n", tid);
770
771 list<ThreadID>::iterator susp_it =
772 std::find(suspendedThreads.begin(), suspendedThreads.end(),
773 tid);
774 suspendedThreads.erase(susp_it);
775 }
776
777 if (threadModel == SwitchOnCacheMiss &&
778 numActiveThreads() == 1) {
779 DPRINTF(InOrderCPU,
780 "Ignoring activation of [tid:%i], since [tid:%i] is "
781 "already running.\n", tid, activeThreadId());
782
783 DPRINTF(InOrderCPU,"Placing [tid:%i] on ready threads list\n",
784 tid);
785
786 readyThreads.push_back(tid);
787
788 } else if (!isThreadActive(tid)) {
789 DPRINTF(InOrderCPU,
790 "Adding [tid:%i] to active threads list.\n", tid);
791 activeThreads.push_back(tid);
792
793 activateThreadInPipeline(tid);
794
795 thread[tid]->lastActivate = curTick();
796
797 tcBase(tid)->setStatus(ThreadContext::Active);
798
799 wakeCPU();
800
801 numCtxtSwitches++;
802 }
803 }
804
805 void
806 InOrderCPU::activateThreadInPipeline(ThreadID tid)
807 {
808 for (int stNum=0; stNum < NumStages; stNum++) {
809 pipelineStage[stNum]->activateThread(tid);
810 }
811 }
812
813 void
814 InOrderCPU::deactivateContext(ThreadID tid, int delay)
815 {
816 DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid);
817
818 scheduleCpuEvent(DeactivateThread, NoFault, tid, dummyInst[tid], delay);
819
820 // Be sure to signal that there's some activity so the CPU doesn't
821 // deschedule itself.
822 activityRec.activity();
823
824 _status = Running;
825 }
826
827 void
828 InOrderCPU::deactivateThread(ThreadID tid)
829 {
830 DPRINTF(InOrderCPU, "[tid:%i]: Calling deactivate thread.\n", tid);
831
832 if (isThreadActive(tid)) {
833 DPRINTF(InOrderCPU,"[tid:%i]: Removing from active threads list\n",
834 tid);
835 list<ThreadID>::iterator thread_it =
836 std::find(activeThreads.begin(), activeThreads.end(), tid);
837
838 removePipelineStalls(*thread_it);
839
840 activeThreads.erase(thread_it);
841
842 // Ideally, this should be triggered from the
843 // suspendContext/Thread functions
844 tcBase(tid)->setStatus(ThreadContext::Suspended);
845 }
846
847 assert(!isThreadActive(tid));
848 }
849
850 void
851 InOrderCPU::removePipelineStalls(ThreadID tid)
852 {
853 DPRINTF(InOrderCPU,"[tid:%i]: Removing all pipeline stalls\n",
854 tid);
855
856 for (int stNum = 0; stNum < NumStages ; stNum++) {
857 pipelineStage[stNum]->removeStalls(tid);
858 }
859
860 }
861
862 void
863 InOrderCPU::updateThreadPriority()
864 {
865 if (activeThreads.size() > 1)
866 {
867 //DEFAULT TO ROUND ROBIN SCHEME
868 //e.g. Move highest priority to end of thread list
869 list<ThreadID>::iterator list_begin = activeThreads.begin();
870 list<ThreadID>::iterator list_end = activeThreads.end();
871
872 unsigned high_thread = *list_begin;
873
874 activeThreads.erase(list_begin);
875
876 activeThreads.push_back(high_thread);
877 }
878 }
879
880 inline void
881 InOrderCPU::tickThreadStats()
882 {
883 /** Keep track of cycles that each thread is active */
884 list<ThreadID>::iterator thread_it = activeThreads.begin();
885 while (thread_it != activeThreads.end()) {
886 threadCycles[*thread_it]++;
887 thread_it++;
888 }
889
890 // Keep track of cycles where SMT is active
891 if (activeThreads.size() > 1) {
892 smtCycles++;
893 }
894 }
895
896 void
897 InOrderCPU::activateContext(ThreadID tid, int delay)
898 {
899 DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
900
901
902 scheduleCpuEvent(ActivateThread, NoFault, tid, dummyInst[tid], delay);
903
904 // Be sure to signal that there's some activity so the CPU doesn't
905 // deschedule itself.
906 activityRec.activity();
907
908 _status = Running;
909 }
910
911 void
912 InOrderCPU::activateNextReadyContext(int delay)
913 {
914 DPRINTF(InOrderCPU,"Activating next ready thread\n");
915
916 // NOTE: Add 5 to the event priority so that we always activate
917 // threads after we've finished deactivating, squashing,etc.
918 // other threads
919 scheduleCpuEvent(ActivateNextReadyThread, NoFault, 0/*tid*/, dummyInst[0],
920 delay, 5);
921
922 // Be sure to signal that there's some activity so the CPU doesn't
923 // deschedule itself.
924 activityRec.activity();
925
926 _status = Running;
927 }
928
929 void
930 InOrderCPU::haltContext(ThreadID tid, int delay)
931 {
932 DPRINTF(InOrderCPU, "[tid:%i]: Calling Halt Context...\n", tid);
933
934 scheduleCpuEvent(HaltThread, NoFault, tid, dummyInst[tid], delay);
935
936 activityRec.activity();
937 }
938
939 void
940 InOrderCPU::haltThread(ThreadID tid)
941 {
942 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Halted Threads List...\n", tid);
943 deactivateThread(tid);
944 squashThreadInPipeline(tid);
945 haltedThreads.push_back(tid);
946
947 tcBase(tid)->setStatus(ThreadContext::Halted);
948
949 if (threadModel == SwitchOnCacheMiss) {
950 activateNextReadyContext();
951 }
952 }
953
954 void
955 InOrderCPU::suspendContext(ThreadID tid, int delay)
956 {
957 scheduleCpuEvent(SuspendThread, NoFault, tid, dummyInst[tid], delay);
958 }
959
960 void
961 InOrderCPU::suspendThread(ThreadID tid)
962 {
963 DPRINTF(InOrderCPU, "[tid:%i]: Placing on Suspended Threads List...\n",
964 tid);
965 deactivateThread(tid);
966 suspendedThreads.push_back(tid);
967 thread[tid]->lastSuspend = curTick();
968
969 tcBase(tid)->setStatus(ThreadContext::Suspended);
970 }
971
972 void
973 InOrderCPU::squashThreadInPipeline(ThreadID tid)
974 {
975 //Squash all instructions in each stage
976 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
977 pipelineStage[stNum]->squash(0 /*seq_num*/, tid);
978 }
979 }
980
981 PipelineStage*
982 InOrderCPU::getPipeStage(int stage_num)
983 {
984 return pipelineStage[stage_num];
985 }
986
987 uint64_t
988 InOrderCPU::readIntReg(int reg_idx, ThreadID tid)
989 {
990 return intRegs[tid][reg_idx];
991 }
992
993 FloatReg
994 InOrderCPU::readFloatReg(int reg_idx, ThreadID tid)
995 {
996 return floatRegs.f[tid][reg_idx];
997 }
998
999 FloatRegBits
1000 InOrderCPU::readFloatRegBits(int reg_idx, ThreadID tid)
1001 {;
1002 return floatRegs.i[tid][reg_idx];
1003 }
1004
1005 void
1006 InOrderCPU::setIntReg(int reg_idx, uint64_t val, ThreadID tid)
1007 {
1008 intRegs[tid][reg_idx] = val;
1009 }
1010
1011
1012 void
1013 InOrderCPU::setFloatReg(int reg_idx, FloatReg val, ThreadID tid)
1014 {
1015 floatRegs.f[tid][reg_idx] = val;
1016 }
1017
1018
1019 void
1020 InOrderCPU::setFloatRegBits(int reg_idx, FloatRegBits val, ThreadID tid)
1021 {
1022 floatRegs.i[tid][reg_idx] = val;
1023 }
1024
1025 uint64_t
1026 InOrderCPU::readRegOtherThread(unsigned reg_idx, ThreadID tid)
1027 {
1028 // If Default value is set, then retrieve target thread
1029 if (tid == InvalidThreadID) {
1030 tid = TheISA::getTargetThread(tcBase(tid));
1031 }
1032
1033 if (reg_idx < FP_Base_DepTag) {
1034 // Integer Register File
1035 return readIntReg(reg_idx, tid);
1036 } else if (reg_idx < Ctrl_Base_DepTag) {
1037 // Float Register File
1038 reg_idx -= FP_Base_DepTag;
1039 return readFloatRegBits(reg_idx, tid);
1040 } else {
1041 reg_idx -= Ctrl_Base_DepTag;
1042 return readMiscReg(reg_idx, tid); // Misc. Register File
1043 }
1044 }
1045 void
1046 InOrderCPU::setRegOtherThread(unsigned reg_idx, const MiscReg &val,
1047 ThreadID tid)
1048 {
1049 // If Default value is set, then retrieve target thread
1050 if (tid == InvalidThreadID) {
1051 tid = TheISA::getTargetThread(tcBase(tid));
1052 }
1053
1054 if (reg_idx < FP_Base_DepTag) { // Integer Register File
1055 setIntReg(reg_idx, val, tid);
1056 } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
1057 reg_idx -= FP_Base_DepTag;
1058 setFloatRegBits(reg_idx, val, tid);
1059 } else {
1060 reg_idx -= Ctrl_Base_DepTag;
1061 setMiscReg(reg_idx, val, tid); // Misc. Register File
1062 }
1063 }
1064
1065 MiscReg
1066 InOrderCPU::readMiscRegNoEffect(int misc_reg, ThreadID tid)
1067 {
1068 return isa[tid].readMiscRegNoEffect(misc_reg);
1069 }
1070
1071 MiscReg
1072 InOrderCPU::readMiscReg(int misc_reg, ThreadID tid)
1073 {
1074 return isa[tid].readMiscReg(misc_reg, tcBase(tid));
1075 }
1076
1077 void
1078 InOrderCPU::setMiscRegNoEffect(int misc_reg, const MiscReg &val, ThreadID tid)
1079 {
1080 isa[tid].setMiscRegNoEffect(misc_reg, val);
1081 }
1082
1083 void
1084 InOrderCPU::setMiscReg(int misc_reg, const MiscReg &val, ThreadID tid)
1085 {
1086 isa[tid].setMiscReg(misc_reg, val, tcBase(tid));
1087 }
1088
1089
1090 InOrderCPU::ListIt
1091 InOrderCPU::addInst(DynInstPtr &inst)
1092 {
1093 ThreadID tid = inst->readTid();
1094
1095 instList[tid].push_back(inst);
1096
1097 return --(instList[tid].end());
1098 }
1099
1100 void
1101 InOrderCPU::updateContextSwitchStats()
1102 {
1103 // Set Average Stat Here, then reset to 0
1104 instsPerCtxtSwitch = instsPerSwitch;
1105 instsPerSwitch = 0;
1106 }
1107
1108
1109 void
1110 InOrderCPU::instDone(DynInstPtr inst, ThreadID tid)
1111 {
1112 // Set the CPU's PCs - This contributes to the precise state of the CPU
1113 // which can be used when restoring a thread to the CPU after after any
1114 // type of context switching activity (fork, exception, etc.)
1115 pcState(inst->pcState(), tid);
1116
1117 if (inst->isControl()) {
1118 thread[tid]->lastGradIsBranch = true;
1119 thread[tid]->lastBranchPC = inst->pcState();
1120 TheISA::advancePC(thread[tid]->lastBranchPC, inst->staticInst);
1121 } else {
1122 thread[tid]->lastGradIsBranch = false;
1123 }
1124
1125
1126 // Finalize Trace Data For Instruction
1127 if (inst->traceData) {
1128 //inst->traceData->setCycle(curTick());
1129 inst->traceData->setFetchSeq(inst->seqNum);
1130 //inst->traceData->setCPSeq(cpu->tcBase(tid)->numInst);
1131 inst->traceData->dump();
1132 delete inst->traceData;
1133 inst->traceData = NULL;
1134 }
1135
1136 // Increment active thread's instruction count
1137 instsPerSwitch++;
1138
1139 // Increment thread-state's instruction count
1140 thread[tid]->numInst++;
1141
1142 // Increment thread-state's instruction stats
1143 thread[tid]->numInsts++;
1144
1145 // Count committed insts per thread stats
1146 committedInsts[tid]++;
1147
1148 // Count total insts committed stat
1149 totalCommittedInsts++;
1150
1151 // Count SMT-committed insts per thread stat
1152 if (numActiveThreads() > 1) {
1153 smtCommittedInsts[tid]++;
1154 }
1155
1156 // Instruction-Mix Stats
1157 if (inst->isLoad()) {
1158 comLoads++;
1159 } else if (inst->isStore()) {
1160 comStores++;
1161 } else if (inst->isControl()) {
1162 comBranches++;
1163 } else if (inst->isNop()) {
1164 comNops++;
1165 } else if (inst->isNonSpeculative()) {
1166 comNonSpec++;
1167 } else if (inst->isInteger()) {
1168 comInts++;
1169 } else if (inst->isFloating()) {
1170 comFloats++;
1171 }
1172
1173 // Check for instruction-count-based events.
1174 comInstEventQueue[tid]->serviceEvents(thread[tid]->numInst);
1175
1176 // Broadcast to other resources an instruction
1177 // has been completed
1178 resPool->scheduleEvent((CPUEventType)ResourcePool::InstGraduated, inst,
1179 0, 0, tid);
1180
1181 // Finally, remove instruction from CPU
1182 removeInst(inst);
1183 }
1184
1185 // currently unused function, but substitute repetitive code w/this function
1186 // call
1187 void
1188 InOrderCPU::addToRemoveList(DynInstPtr &inst)
1189 {
1190 removeInstsThisCycle = true;
1191 if (!inst->isRemoveList()) {
1192 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %s "
1193 "[sn:%lli] to remove list\n",
1194 inst->threadNumber, inst->pcState(), inst->seqNum);
1195 inst->setRemoveList();
1196 removeList.push(inst->getInstListIt());
1197 } else {
1198 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %s "
1199 "[sn:%lli], already remove list\n",
1200 inst->threadNumber, inst->pcState(), inst->seqNum);
1201 }
1202
1203 }
1204
1205 void
1206 InOrderCPU::removeInst(DynInstPtr &inst)
1207 {
1208 DPRINTF(InOrderCPU, "Removing graduated instruction [tid:%i] PC %s "
1209 "[sn:%lli]\n",
1210 inst->threadNumber, inst->pcState(), inst->seqNum);
1211
1212 removeInstsThisCycle = true;
1213
1214 // Remove the instruction.
1215 if (!inst->isRemoveList()) {
1216 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %s "
1217 "[sn:%lli] to remove list\n",
1218 inst->threadNumber, inst->pcState(), inst->seqNum);
1219 inst->setRemoveList();
1220 removeList.push(inst->getInstListIt());
1221 } else {
1222 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %s "
1223 "[sn:%lli], already on remove list\n",
1224 inst->threadNumber, inst->pcState(), inst->seqNum);
1225 }
1226
1227 }
1228
1229 void
1230 InOrderCPU::removeInstsUntil(const InstSeqNum &seq_num, ThreadID tid)
1231 {
1232 //assert(!instList[tid].empty());
1233
1234 removeInstsThisCycle = true;
1235
1236 ListIt inst_iter = instList[tid].end();
1237
1238 inst_iter--;
1239
1240 DPRINTF(InOrderCPU, "Squashing instructions from CPU instruction "
1241 "list that are from [tid:%i] and above [sn:%lli] (end=%lli).\n",
1242 tid, seq_num, (*inst_iter)->seqNum);
1243
1244 while ((*inst_iter)->seqNum > seq_num) {
1245
1246 bool break_loop = (inst_iter == instList[tid].begin());
1247
1248 squashInstIt(inst_iter, tid);
1249
1250 inst_iter--;
1251
1252 if (break_loop)
1253 break;
1254 }
1255 }
1256
1257
1258 inline void
1259 InOrderCPU::squashInstIt(const ListIt &instIt, ThreadID tid)
1260 {
1261 if ((*instIt)->threadNumber == tid) {
1262 DPRINTF(InOrderCPU, "Squashing instruction, "
1263 "[tid:%i] [sn:%lli] PC %s\n",
1264 (*instIt)->threadNumber,
1265 (*instIt)->seqNum,
1266 (*instIt)->pcState());
1267
1268 (*instIt)->setSquashed();
1269
1270 if (!(*instIt)->isRemoveList()) {
1271 DPRINTF(InOrderCPU, "Pushing instruction [tid:%i] PC %s "
1272 "[sn:%lli] to remove list\n",
1273 (*instIt)->threadNumber, (*instIt)->pcState(),
1274 (*instIt)->seqNum);
1275 (*instIt)->setRemoveList();
1276 removeList.push(instIt);
1277 } else {
1278 DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i]"
1279 " PC %s [sn:%lli], already on remove list\n",
1280 (*instIt)->threadNumber, (*instIt)->pcState(),
1281 (*instIt)->seqNum);
1282 }
1283
1284 }
1285
1286 }
1287
1288
1289 void
1290 InOrderCPU::cleanUpRemovedInsts()
1291 {
1292 while (!removeList.empty()) {
1293 DPRINTF(InOrderCPU, "Removing instruction, "
1294 "[tid:%i] [sn:%lli] PC %s\n",
1295 (*removeList.front())->threadNumber,
1296 (*removeList.front())->seqNum,
1297 (*removeList.front())->pcState());
1298
1299 DynInstPtr inst = *removeList.front();
1300 ThreadID tid = inst->threadNumber;
1301
1302 // Make Sure Resource Schedule Is Emptied Out
1303 ThePipeline::ResSchedule *inst_sched = &inst->resSched;
1304 while (!inst_sched->empty()) {
1305 ScheduleEntry* sch_entry = inst_sched->top();
1306 inst_sched->pop();
1307 delete sch_entry;
1308 }
1309
1310 // Remove From Register Dependency Map, If Necessary
1311 archRegDepMap[(*removeList.front())->threadNumber].
1312 remove((*removeList.front()));
1313
1314
1315 // Clear if Non-Speculative
1316 if (inst->staticInst &&
1317 inst->seqNum == nonSpecSeqNum[tid] &&
1318 nonSpecInstActive[tid] == true) {
1319 nonSpecInstActive[tid] = false;
1320 }
1321
1322 instList[tid].erase(removeList.front());
1323
1324 removeList.pop();
1325 }
1326
1327 removeInstsThisCycle = false;
1328 }
1329
1330 void
1331 InOrderCPU::cleanUpRemovedReqs()
1332 {
1333 while (!reqRemoveList.empty()) {
1334 ResourceRequest *res_req = reqRemoveList.front();
1335
1336 DPRINTF(RefCount, "[tid:%i] [sn:%lli]: Removing Request "
1337 "[stage_num:%i] [res:%s] [slot:%i] [completed:%i].\n",
1338 res_req->inst->threadNumber,
1339 res_req->inst->seqNum,
1340 res_req->getStageNum(),
1341 res_req->res->name(),
1342 (res_req->isCompleted()) ?
1343 res_req->getComplSlot() : res_req->getSlot(),
1344 res_req->isCompleted());
1345
1346 reqRemoveList.pop();
1347
1348 delete res_req;
1349 }
1350 }
1351
1352 void
1353 InOrderCPU::cleanUpRemovedEvents()
1354 {
1355 while (!cpuEventRemoveList.empty()) {
1356 Event *cpu_event = cpuEventRemoveList.front();
1357 cpuEventRemoveList.pop();
1358 delete cpu_event;
1359 }
1360 }
1361
1362
1363 void
1364 InOrderCPU::dumpInsts()
1365 {
1366 int num = 0;
1367
1368 ListIt inst_list_it = instList[0].begin();
1369
1370 cprintf("Dumping Instruction List\n");
1371
1372 while (inst_list_it != instList[0].end()) {
1373 cprintf("Instruction:%i\nPC:%s\n[tid:%i]\n[sn:%lli]\nIssued:%i\n"
1374 "Squashed:%i\n\n",
1375 num, (*inst_list_it)->pcState(),
1376 (*inst_list_it)->threadNumber,
1377 (*inst_list_it)->seqNum, (*inst_list_it)->isIssued(),
1378 (*inst_list_it)->isSquashed());
1379 inst_list_it++;
1380 ++num;
1381 }
1382 }
1383
1384 void
1385 InOrderCPU::wakeCPU()
1386 {
1387 if (/*activityRec.active() || */tickEvent.scheduled()) {
1388 DPRINTF(Activity, "CPU already running.\n");
1389 return;
1390 }
1391
1392 DPRINTF(Activity, "Waking up CPU\n");
1393
1394 Tick extra_cycles = tickToCycles((curTick() - 1) - lastRunningCycle);
1395
1396 idleCycles += extra_cycles;
1397 for (int stage_num = 0; stage_num < NumStages; stage_num++) {
1398 pipelineStage[stage_num]->idleCycles += extra_cycles;
1399 }
1400
1401 numCycles += extra_cycles;
1402
1403 schedule(&tickEvent, nextCycle(curTick()));
1404 }
1405
1406 #if FULL_SYSTEM
1407
1408 void
1409 InOrderCPU::wakeup()
1410 {
1411 if (thread[0]->status() != ThreadContext::Suspended)
1412 return;
1413
1414 wakeCPU();
1415
1416 DPRINTF(Quiesce, "Suspended Processor woken\n");
1417 threadContexts[0]->activate();
1418 }
1419 #endif
1420
1421 #if !FULL_SYSTEM
1422 void
1423 InOrderCPU::syscall(int64_t callnum, ThreadID tid)
1424 {
1425 DPRINTF(InOrderCPU, "[tid:%i] Executing syscall().\n\n", tid);
1426
1427 DPRINTF(Activity,"Activity: syscall() called.\n");
1428
1429 // Temporarily increase this by one to account for the syscall
1430 // instruction.
1431 ++(this->thread[tid]->funcExeInst);
1432
1433 // Execute the actual syscall.
1434 this->thread[tid]->syscall(callnum);
1435
1436 // Decrease funcExeInst by one as the normal commit will handle
1437 // incrementing it.
1438 --(this->thread[tid]->funcExeInst);
1439
1440 // Clear Non-Speculative Block Variable
1441 nonSpecInstActive[tid] = false;
1442 }
1443 #endif
1444
1445 TheISA::TLB*
1446 InOrderCPU::getITBPtr()
1447 {
1448 CacheUnit *itb_res =
1449 dynamic_cast<CacheUnit*>(resPool->getResource(fetchPortIdx));
1450 return itb_res->tlb();
1451 }
1452
1453
1454 TheISA::TLB*
1455 InOrderCPU::getDTBPtr()
1456 {
1457 CacheUnit *dtb_res =
1458 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1459 return dtb_res->tlb();
1460 }
1461
1462 Fault
1463 InOrderCPU::read(DynInstPtr inst, Addr addr,
1464 uint8_t *data, unsigned size, unsigned flags)
1465 {
1466 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1467 // you want to run w/out caches?
1468 CacheUnit *cache_res =
1469 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1470
1471 return cache_res->read(inst, addr, data, size, flags);
1472 }
1473
1474 Fault
1475 InOrderCPU::write(DynInstPtr inst, uint8_t *data, unsigned size,
1476 Addr addr, unsigned flags, uint64_t *write_res)
1477 {
1478 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1479 // you want to run w/out caches?
1480 CacheUnit *cache_res =
1481 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1482 return cache_res->write(inst, data, size, addr, flags, write_res);
1483 }