inorder: track last branch committed
[gem5.git] / src / cpu / inorder / cpu.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <algorithm>
33
34 #include "arch/utility.hh"
35 #include "config/full_system.hh"
36 #include "config/the_isa.hh"
37 #include "cpu/activity.hh"
38 #include "cpu/base.hh"
39 #include "cpu/exetrace.hh"
40 #include "cpu/inorder/cpu.hh"
41 #include "cpu/inorder/first_stage.hh"
42 #include "cpu/inorder/inorder_dyn_inst.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "cpu/inorder/resources/resource_list.hh"
46 #include "cpu/inorder/thread_context.hh"
47 #include "cpu/inorder/thread_state.hh"
48 #include "cpu/simple_thread.hh"
49 #include "cpu/thread_context.hh"
50 #include "mem/translating_port.hh"
51 #include "params/InOrderCPU.hh"
52 #include "sim/process.hh"
53 #include "sim/stat_control.hh"
54
55 #if FULL_SYSTEM
56 #include "cpu/quiesce_event.hh"
57 #include "sim/system.hh"
58 #endif
59
60 #if THE_ISA == ALPHA_ISA
61 #include "arch/alpha/osfpal.hh"
62 #endif
63
64 using namespace std;
65 using namespace TheISA;
66 using namespace ThePipeline;
67
68 InOrderCPU::TickEvent::TickEvent(InOrderCPU *c)
69 : Event(CPU_Tick_Pri), cpu(c)
70 { }
71
72
73 void
74 InOrderCPU::TickEvent::process()
75 {
76 cpu->tick();
77 }
78
79
80 const char *
81 InOrderCPU::TickEvent::description()
82 {
83 return "InOrderCPU tick event";
84 }
85
86 InOrderCPU::CPUEvent::CPUEvent(InOrderCPU *_cpu, CPUEventType e_type,
87 Fault fault, ThreadID _tid, DynInstPtr inst,
88 unsigned event_pri_offset)
89 : Event(Event::Priority((unsigned int)CPU_Tick_Pri + event_pri_offset)),
90 cpu(_cpu)
91 {
92 setEvent(e_type, fault, _tid, inst);
93 }
94
95
96 std::string InOrderCPU::eventNames[NumCPUEvents] =
97 {
98 "ActivateThread",
99 "ActivateNextReadyThread",
100 "DeactivateThread",
101 "DeallocateThread",
102 "SuspendThread",
103 "Trap",
104 "InstGraduated",
105 "SquashFromMemStall",
106 "UpdatePCs"
107 };
108
109 void
110 InOrderCPU::CPUEvent::process()
111 {
112 switch (cpuEventType)
113 {
114 case ActivateThread:
115 cpu->activateThread(tid);
116 break;
117
118 case ActivateNextReadyThread:
119 cpu->activateNextReadyThread();
120 break;
121
122 case DeactivateThread:
123 cpu->deactivateThread(tid);
124 break;
125
126 case DeallocateThread:
127 cpu->deallocateThread(tid);
128 break;
129
130 case SuspendThread:
131 cpu->suspendThread(tid);
132 break;
133
134 case SquashFromMemStall:
135 cpu->squashDueToMemStall(inst->squashingStage, inst->seqNum, tid);
136 break;
137
138 case Trap:
139 cpu->trapCPU(fault, tid);
140 break;
141
142 default:
143 fatal("Unrecognized Event Type %d", cpuEventType);
144
145 }
146
147 cpu->cpuEventRemoveList.push(this);
148 }
149
150
151
152 const char *
153 InOrderCPU::CPUEvent::description()
154 {
155 return "InOrderCPU event";
156 }
157
158 void
159 InOrderCPU::CPUEvent::scheduleEvent(int delay)
160 {
161 if (squashed())
162 mainEventQueue.reschedule(this,curTick + cpu->ticks(delay));
163 else if (!scheduled())
164 mainEventQueue.schedule(this,curTick + cpu->ticks(delay));
165 }
166
167 void
168 InOrderCPU::CPUEvent::unscheduleEvent()
169 {
170 if (scheduled())
171 squash();
172 }
173
174 InOrderCPU::InOrderCPU(Params *params)
175 : BaseCPU(params),
176 cpu_id(params->cpu_id),
177 coreType("default"),
178 _status(Idle),
179 tickEvent(this),
180 timeBuffer(2 , 2),
181 removeInstsThisCycle(false),
182 activityRec(params->name, NumStages, 10, params->activity),
183 #if FULL_SYSTEM
184 system(params->system),
185 physmem(system->physmem),
186 #endif // FULL_SYSTEM
187 #ifdef DEBUG
188 cpuEventNum(0),
189 resReqCount(0),
190 #endif // DEBUG
191 switchCount(0),
192 deferRegistration(false/*params->deferRegistration*/),
193 stageTracing(params->stageTracing)
194 {
195 ThreadID active_threads;
196 cpu_params = params;
197
198 resPool = new ResourcePool(this, params);
199
200 // Resize for Multithreading CPUs
201 thread.resize(numThreads);
202
203 #if FULL_SYSTEM
204 active_threads = 1;
205 #else
206 active_threads = params->workload.size();
207
208 if (active_threads > MaxThreads) {
209 panic("Workload Size too large. Increase the 'MaxThreads'"
210 "in your InOrder implementation or "
211 "edit your workload size.");
212 }
213
214 if (active_threads > 1) {
215 threadModel = (InOrderCPU::ThreadModel) params->threadModel;
216
217 if (threadModel == SMT) {
218 DPRINTF(InOrderCPU, "Setting Thread Model to SMT.\n");
219 } else if (threadModel == SwitchOnCacheMiss) {
220 DPRINTF(InOrderCPU, "Setting Thread Model to "
221 "Switch On Cache Miss\n");
222 }
223
224 } else {
225 threadModel = Single;
226 }
227
228
229
230 #endif
231
232 // Bind the fetch & data ports from the resource pool.
233 fetchPortIdx = resPool->getPortIdx(params->fetchMemPort);
234 if (fetchPortIdx == 0) {
235 fatal("Unable to find port to fetch instructions from.\n");
236 }
237
238 dataPortIdx = resPool->getPortIdx(params->dataMemPort);
239 if (dataPortIdx == 0) {
240 fatal("Unable to find port for data.\n");
241 }
242
243 for (ThreadID tid = 0; tid < numThreads; ++tid) {
244 #if FULL_SYSTEM
245 // SMT is not supported in FS mode yet.
246 assert(numThreads == 1);
247 thread[tid] = new Thread(this, 0);
248 #else
249 if (tid < (ThreadID)params->workload.size()) {
250 DPRINTF(InOrderCPU, "Workload[%i] process is %#x\n",
251 tid, params->workload[tid]->prog_fname);
252 thread[tid] =
253 new Thread(this, tid, params->workload[tid]);
254 } else {
255 //Allocate Empty thread so M5 can use later
256 //when scheduling threads to CPU
257 Process* dummy_proc = params->workload[0];
258 thread[tid] = new Thread(this, tid, dummy_proc);
259 }
260 #endif
261
262 // Setup the TC that will serve as the interface to the threads/CPU.
263 InOrderThreadContext *tc = new InOrderThreadContext;
264 tc->cpu = this;
265 tc->thread = thread[tid];
266
267 // Give the thread the TC.
268 thread[tid]->tc = tc;
269 thread[tid]->setFuncExeInst(0);
270 globalSeqNum[tid] = 1;
271
272 // Add the TC to the CPU's list of TC's.
273 this->threadContexts.push_back(tc);
274 }
275
276 // Initialize TimeBuffer Stage Queues
277 for (int stNum=0; stNum < NumStages - 1; stNum++) {
278 stageQueue[stNum] = new StageQueue(NumStages, NumStages);
279 stageQueue[stNum]->id(stNum);
280 }
281
282
283 // Set Up Pipeline Stages
284 for (int stNum=0; stNum < NumStages; stNum++) {
285 if (stNum == 0)
286 pipelineStage[stNum] = new FirstStage(params, stNum);
287 else
288 pipelineStage[stNum] = new PipelineStage(params, stNum);
289
290 pipelineStage[stNum]->setCPU(this);
291 pipelineStage[stNum]->setActiveThreads(&activeThreads);
292 pipelineStage[stNum]->setTimeBuffer(&timeBuffer);
293
294 // Take Care of 1st/Nth stages
295 if (stNum > 0)
296 pipelineStage[stNum]->setPrevStageQueue(stageQueue[stNum - 1]);
297 if (stNum < NumStages - 1)
298 pipelineStage[stNum]->setNextStageQueue(stageQueue[stNum]);
299 }
300
301 // Initialize thread specific variables
302 for (ThreadID tid = 0; tid < numThreads; tid++) {
303 archRegDepMap[tid].setCPU(this);
304
305 nonSpecInstActive[tid] = false;
306 nonSpecSeqNum[tid] = 0;
307
308 squashSeqNum[tid] = MaxAddr;
309 lastSquashCycle[tid] = 0;
310
311 memset(intRegs[tid], 0, sizeof(intRegs[tid]));
312 memset(floatRegs.i[tid], 0, sizeof(floatRegs.i[tid]));
313 isa[tid].clear();
314
315 isa[tid].expandForMultithreading(numThreads, 1/*numVirtProcs*/);
316 }
317
318 lastRunningCycle = curTick;
319
320 // Define dummy instructions and resource requests to be used.
321 dummyInst = new InOrderDynInst(this, NULL, 0, 0);
322 dummyReq = new ResourceRequest(resPool->getResource(0), NULL, 0, 0, 0, 0);
323
324 // Reset CPU to reset state.
325 #if FULL_SYSTEM
326 Fault resetFault = new ResetFault();
327 resetFault->invoke(tcBase());
328 #else
329 reset();
330 #endif
331
332 // Schedule First Tick Event, CPU will reschedule itself from here on out.
333 scheduleTickEvent(0);
334 }
335
336
337 void
338 InOrderCPU::regStats()
339 {
340 /* Register the Resource Pool's stats here.*/
341 resPool->regStats();
342
343 #ifdef DEBUG
344 maxResReqCount
345 .name(name() + ".maxResReqCount")
346 .desc("Maximum number of live resource requests in CPU")
347 .prereq(maxResReqCount);
348 #endif
349
350 /* Register any of the InOrderCPU's stats here.*/
351 timesIdled
352 .name(name() + ".timesIdled")
353 .desc("Number of times that the entire CPU went into an idle state and"
354 " unscheduled itself")
355 .prereq(timesIdled);
356
357 idleCycles
358 .name(name() + ".idleCycles")
359 .desc("Total number of cycles that the CPU has spent unscheduled due "
360 "to idling")
361 .prereq(idleCycles);
362
363 threadCycles
364 .init(numThreads)
365 .name(name() + ".threadCycles")
366 .desc("Total Number of Cycles A Thread Was Active in CPU (Per-Thread)");
367
368 smtCycles
369 .name(name() + ".smtCycles")
370 .desc("Total number of cycles that the CPU was in SMT-mode");
371
372 committedInsts
373 .init(numThreads)
374 .name(name() + ".committedInsts")
375 .desc("Number of Instructions Simulated (Per-Thread)");
376
377 smtCommittedInsts
378 .init(numThreads)
379 .name(name() + ".smtCommittedInsts")
380 .desc("Number of SMT Instructions Simulated (Per-Thread)");
381
382 totalCommittedInsts
383 .name(name() + ".committedInsts_total")
384 .desc("Number of Instructions Simulated (Total)");
385
386 cpi
387 .name(name() + ".cpi")
388 .desc("CPI: Cycles Per Instruction (Per-Thread)")
389 .precision(6);
390 cpi = threadCycles / committedInsts;
391
392 smtCpi
393 .name(name() + ".smt_cpi")
394 .desc("CPI: Total SMT-CPI")
395 .precision(6);
396 smtCpi = smtCycles / smtCommittedInsts;
397
398 totalCpi
399 .name(name() + ".cpi_total")
400 .desc("CPI: Total CPI of All Threads")
401 .precision(6);
402 totalCpi = numCycles / totalCommittedInsts;
403
404 ipc
405 .name(name() + ".ipc")
406 .desc("IPC: Instructions Per Cycle (Per-Thread)")
407 .precision(6);
408 ipc = committedInsts / threadCycles;
409
410 smtIpc
411 .name(name() + ".smt_ipc")
412 .desc("IPC: Total SMT-IPC")
413 .precision(6);
414 smtIpc = smtCommittedInsts / smtCycles;
415
416 totalIpc
417 .name(name() + ".ipc_total")
418 .desc("IPC: Total IPC of All Threads")
419 .precision(6);
420 totalIpc = totalCommittedInsts / numCycles;
421
422 BaseCPU::regStats();
423 }
424
425
426 void
427 InOrderCPU::tick()
428 {
429 DPRINTF(InOrderCPU, "\n\nInOrderCPU: Ticking main, InOrderCPU.\n");
430
431 ++numCycles;
432
433 //Tick each of the stages
434 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
435 pipelineStage[stNum]->tick();
436 }
437
438 // Now advance the time buffers one tick
439 timeBuffer.advance();
440 for (int sqNum=0; sqNum < NumStages - 1; sqNum++) {
441 stageQueue[sqNum]->advance();
442 }
443 activityRec.advance();
444
445 // Any squashed requests, events, or insts then remove them now
446 cleanUpRemovedReqs();
447 cleanUpRemovedEvents();
448 cleanUpRemovedInsts();
449
450 // Re-schedule CPU for this cycle
451 if (!tickEvent.scheduled()) {
452 if (_status == SwitchedOut) {
453 // increment stat
454 lastRunningCycle = curTick;
455 } else if (!activityRec.active()) {
456 DPRINTF(InOrderCPU, "sleeping CPU.\n");
457 lastRunningCycle = curTick;
458 timesIdled++;
459 } else {
460 //Tick next_tick = curTick + cycles(1);
461 //tickEvent.schedule(next_tick);
462 mainEventQueue.schedule(&tickEvent, nextCycle(curTick + 1));
463 DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
464 nextCycle(curTick + 1));
465 }
466 }
467
468 tickThreadStats();
469 updateThreadPriority();
470 }
471
472
473 void
474 InOrderCPU::init()
475 {
476 if (!deferRegistration) {
477 registerThreadContexts();
478 }
479
480 // Set inSyscall so that the CPU doesn't squash when initially
481 // setting up registers.
482 for (ThreadID tid = 0; tid < numThreads; ++tid)
483 thread[tid]->inSyscall = true;
484
485 #if FULL_SYSTEM
486 for (ThreadID tid = 0; tid < numThreads; tid++) {
487 ThreadContext *src_tc = threadContexts[tid];
488 TheISA::initCPU(src_tc, src_tc->contextId());
489 }
490 #endif
491
492 // Clear inSyscall.
493 for (ThreadID tid = 0; tid < numThreads; ++tid)
494 thread[tid]->inSyscall = false;
495
496 // Call Initializiation Routine for Resource Pool
497 resPool->init();
498 }
499
500 void
501 InOrderCPU::reset()
502 {
503 for (int i = 0; i < numThreads; i++) {
504 isa[i].reset(coreType, numThreads,
505 1/*numVirtProcs*/, dynamic_cast<BaseCPU*>(this));
506 }
507 }
508
509 Port*
510 InOrderCPU::getPort(const std::string &if_name, int idx)
511 {
512 return resPool->getPort(if_name, idx);
513 }
514
515 #if FULL_SYSTEM
516 Fault
517 InOrderCPU::hwrei(ThreadID tid)
518 {
519 panic("hwrei: Unimplemented");
520
521 return NoFault;
522 }
523
524
525 bool
526 InOrderCPU::simPalCheck(int palFunc, ThreadID tid)
527 {
528 panic("simPalCheck: Unimplemented");
529
530 return true;
531 }
532
533
534 Fault
535 InOrderCPU::getInterrupts()
536 {
537 // Check if there are any outstanding interrupts
538 return this->interrupts->getInterrupt(this->threadContexts[0]);
539 }
540
541
542 void
543 InOrderCPU::processInterrupts(Fault interrupt)
544 {
545 // Check for interrupts here. For now can copy the code that
546 // exists within isa_fullsys_traits.hh. Also assume that thread 0
547 // is the one that handles the interrupts.
548 // @todo: Possibly consolidate the interrupt checking code.
549 // @todo: Allow other threads to handle interrupts.
550
551 assert(interrupt != NoFault);
552 this->interrupts->updateIntrInfo(this->threadContexts[0]);
553
554 DPRINTF(InOrderCPU, "Interrupt %s being handled\n", interrupt->name());
555 this->trap(interrupt, 0);
556 }
557
558
559 void
560 InOrderCPU::updateMemPorts()
561 {
562 // Update all ThreadContext's memory ports (Functional/Virtual
563 // Ports)
564 ThreadID size = thread.size();
565 for (ThreadID i = 0; i < size; ++i)
566 thread[i]->connectMemPorts(thread[i]->getTC());
567 }
568 #endif
569
570 void
571 InOrderCPU::trap(Fault fault, ThreadID tid, int delay)
572 {
573 //@ Squash Pipeline during TRAP
574 scheduleCpuEvent(Trap, fault, tid, dummyInst, delay);
575 }
576
577 void
578 InOrderCPU::trapCPU(Fault fault, ThreadID tid)
579 {
580 fault->invoke(tcBase(tid));
581 }
582
583 void
584 InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay)
585 {
586 scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay);
587 }
588
589
590 void
591 InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid)
592 {
593 DPRINTF(InOrderCPU, "Squashing Pipeline Stages Due to Memory Stall...\n");
594
595 // Squash all instructions in each stage including
596 // instruction that caused the squash (seq_num - 1)
597 // NOTE: The stage bandwidth needs to be cleared so thats why
598 // the stalling instruction is squashed as well. The stalled
599 // instruction is previously placed in another intermediate buffer
600 // while it's stall is being handled.
601 InstSeqNum squash_seq_num = seq_num - 1;
602
603 for (int stNum=stage_num; stNum >= 0 ; stNum--) {
604 pipelineStage[stNum]->squashDueToMemStall(squash_seq_num, tid);
605 }
606 }
607
608 void
609 InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
610 ThreadID tid, DynInstPtr inst,
611 unsigned delay, unsigned event_pri_offset)
612 {
613 CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
614 event_pri_offset);
615
616 if (delay >= 0) {
617 DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
618 eventNames[c_event], curTick + delay, tid);
619 mainEventQueue.schedule(cpu_event,curTick + delay);
620 } else {
621 cpu_event->process();
622 cpuEventRemoveList.push(cpu_event);
623 }
624
625 // Broadcast event to the Resource Pool
626 // Need to reset tid just in case this is a dummy instruction
627 inst->setTid(tid);
628 resPool->scheduleEvent(c_event, inst, 0, 0, tid);
629 }
630
631 bool
632 InOrderCPU::isThreadActive(ThreadID tid)
633 {
634 list<ThreadID>::iterator isActive =
635 std::find(activeThreads.begin(), activeThreads.end(), tid);
636
637 return (isActive != activeThreads.end());
638 }
639
640 bool
641 InOrderCPU::isThreadReady(ThreadID tid)
642 {
643 list<ThreadID>::iterator isReady =
644 std::find(readyThreads.begin(), readyThreads.end(), tid);
645
646 return (isReady != readyThreads.end());
647 }
648
649 bool
650 InOrderCPU::isThreadSuspended(ThreadID tid)
651 {
652 list<ThreadID>::iterator isSuspended =
653 std::find(suspendedThreads.begin(), suspendedThreads.end(), tid);
654
655 return (isSuspended != suspendedThreads.end());
656 }
657
658 void
659 InOrderCPU::activateNextReadyThread()
660 {
661 if (readyThreads.size() >= 1) {
662 ThreadID ready_tid = readyThreads.front();
663
664 // Activate in Pipeline
665 activateThread(ready_tid);
666
667 // Activate in Resource Pool
668 resPool->activateAll(ready_tid);
669
670 list<ThreadID>::iterator ready_it =
671 std::find(readyThreads.begin(), readyThreads.end(), ready_tid);
672 readyThreads.erase(ready_it);
673 } else {
674 DPRINTF(InOrderCPU,
675 "Attempting to activate new thread, but No Ready Threads to"
676 "activate.\n");
677 DPRINTF(InOrderCPU,
678 "Unable to switch to next active thread.\n");
679 }
680 }
681
682 void
683 InOrderCPU::activateThread(ThreadID tid)
684 {
685 if (isThreadSuspended(tid)) {
686 DPRINTF(InOrderCPU,
687 "Removing [tid:%i] from suspended threads list.\n", tid);
688
689 list<ThreadID>::iterator susp_it =
690 std::find(suspendedThreads.begin(), suspendedThreads.end(),
691 tid);
692 suspendedThreads.erase(susp_it);
693 }
694
695 if (threadModel == SwitchOnCacheMiss &&
696 numActiveThreads() == 1) {
697 DPRINTF(InOrderCPU,
698 "Ignoring activation of [tid:%i], since [tid:%i] is "
699 "already running.\n", tid, activeThreadId());
700
701 DPRINTF(InOrderCPU,"Placing [tid:%i] on ready threads list\n",
702 tid);
703
704 readyThreads.push_back(tid);
705
706 } else if (!isThreadActive(tid)) {
707 DPRINTF(InOrderCPU,
708 "Adding [tid:%i] to active threads list.\n", tid);
709 activeThreads.push_back(tid);
710
711 activateThreadInPipeline(tid);
712
713 thread[tid]->lastActivate = curTick;
714
715 wakeCPU();
716 }
717 }
718
719 void
720 InOrderCPU::activateThreadInPipeline(ThreadID tid)
721 {
722 for (int stNum=0; stNum < NumStages; stNum++) {
723 pipelineStage[stNum]->activateThread(tid);
724 }
725 }
726
727 void
728 InOrderCPU::deactivateContext(ThreadID tid, int delay)
729 {
730 DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid);
731
732 scheduleCpuEvent(DeactivateThread, NoFault, tid, dummyInst, delay);
733
734 // Be sure to signal that there's some activity so the CPU doesn't
735 // deschedule itself.
736 activityRec.activity();
737
738 _status = Running;
739 }
740
741 void
742 InOrderCPU::deactivateThread(ThreadID tid)
743 {
744 DPRINTF(InOrderCPU, "[tid:%i]: Calling deactivate thread.\n", tid);
745
746 if (isThreadActive(tid)) {
747 DPRINTF(InOrderCPU,"[tid:%i]: Removing from active threads list\n",
748 tid);
749 list<ThreadID>::iterator thread_it =
750 std::find(activeThreads.begin(), activeThreads.end(), tid);
751
752 removePipelineStalls(*thread_it);
753
754 //@TODO: change stage status' to Idle?
755
756 activeThreads.erase(thread_it);
757 }
758
759 assert(!isThreadActive(tid));
760 }
761
762 void
763 InOrderCPU::deallocateContext(ThreadID tid, int delay)
764 {
765 DPRINTF(InOrderCPU,"[tid:%i]: Deallocating ...\n", tid);
766
767 scheduleCpuEvent(DeallocateThread, NoFault, tid, dummyInst, delay);
768
769 // Be sure to signal that there's some activity so the CPU doesn't
770 // deschedule itself.
771 activityRec.activity();
772
773 _status = Running;
774 }
775
776 void
777 InOrderCPU::deallocateThread(ThreadID tid)
778 {
779 DPRINTF(InOrderCPU, "[tid:%i]: Calling deallocate thread.\n", tid);
780
781 if (isThreadActive(tid)) {
782 DPRINTF(InOrderCPU,"[tid:%i]: Removing from active threads list\n",
783 tid);
784 list<ThreadID>::iterator thread_it =
785 std::find(activeThreads.begin(), activeThreads.end(), tid);
786
787 removePipelineStalls(*thread_it);
788
789 activeThreads.erase(thread_it);
790 }
791
792 // TODO: "Un"Load/Unmap register file state
793
794 }
795
796 void
797 InOrderCPU::removePipelineStalls(ThreadID tid)
798 {
799 DPRINTF(InOrderCPU,"[tid:%i]: Removing all pipeline stalls\n",
800 tid);
801
802 for (int stNum = 0; stNum < NumStages ; stNum++) {
803 pipelineStage[stNum]->removeStalls(tid);
804 }
805
806 }
807
808 void
809 InOrderCPU::updateThreadPriority()
810 {
811 if (activeThreads.size() > 1)
812 {
813 //DEFAULT TO ROUND ROBIN SCHEME
814 //e.g. Move highest priority to end of thread list
815 list<ThreadID>::iterator list_begin = activeThreads.begin();
816 list<ThreadID>::iterator list_end = activeThreads.end();
817
818 unsigned high_thread = *list_begin;
819
820 activeThreads.erase(list_begin);
821
822 activeThreads.push_back(high_thread);
823 }
824 }
825
826 inline void
827 InOrderCPU::tickThreadStats()
828 {
829 /** Keep track of cycles that each thread is active */
830 list<ThreadID>::iterator thread_it = activeThreads.begin();
831 while (thread_it != activeThreads.end()) {
832 threadCycles[*thread_it]++;
833 thread_it++;
834 }
835
836 // Keep track of cycles where SMT is active
837 if (activeThreads.size() > 1) {
838 smtCycles++;
839 }
840 }
841
842 void
843 InOrderCPU::activateContext(ThreadID tid, int delay)
844 {
845 DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
846
847 scheduleCpuEvent(ActivateThread, NoFault, tid, dummyInst, delay);
848
849 // Be sure to signal that there's some activity so the CPU doesn't
850 // deschedule itself.
851 activityRec.activity();
852
853 _status = Running;
854 }
855
856 void
857 InOrderCPU::activateNextReadyContext(int delay)
858 {
859 DPRINTF(InOrderCPU,"Activating next ready thread\n");
860
861 // NOTE: Add 5 to the event priority so that we always activate
862 // threads after we've finished deactivating, squashing,etc.
863 // other threads
864 scheduleCpuEvent(ActivateNextReadyThread, NoFault, 0/*tid*/, dummyInst,
865 delay, 5);
866
867 // Be sure to signal that there's some activity so the CPU doesn't
868 // deschedule itself.
869 activityRec.activity();
870
871 _status = Running;
872 }
873
874 void
875 InOrderCPU::haltContext(ThreadID tid, int delay)
876 {
877 suspendContext(tid, delay);
878 }
879
880 void
881 InOrderCPU::suspendContext(ThreadID tid, int delay)
882 {
883 scheduleCpuEvent(SuspendThread, NoFault, tid, dummyInst, delay);
884 //_status = Idle;
885 }
886
887 void
888 InOrderCPU::suspendThread(ThreadID tid)
889 {
890 DPRINTF(InOrderCPU, "[tid: %i]: Placing on Suspended Threads List...\n", tid);
891 deactivateThread(tid);
892 suspendedThreads.push_back(tid);
893 thread[tid]->lastSuspend = curTick;
894 }
895
896 void
897 InOrderCPU::squashThreadInPipeline(ThreadID tid)
898 {
899 //Squash all instructions in each stage
900 for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
901 pipelineStage[stNum]->squash(0 /*seq_num*/, tid);
902 }
903 }
904
905 PipelineStage*
906 InOrderCPU::getPipeStage(int stage_num)
907 {
908 return pipelineStage[stage_num];
909 }
910
911 uint64_t
912 InOrderCPU::readPC(ThreadID tid)
913 {
914 return PC[tid];
915 }
916
917
918 void
919 InOrderCPU::setPC(Addr new_PC, ThreadID tid)
920 {
921 PC[tid] = new_PC;
922 }
923
924
925 uint64_t
926 InOrderCPU::readNextPC(ThreadID tid)
927 {
928 return nextPC[tid];
929 }
930
931
932 void
933 InOrderCPU::setNextPC(uint64_t new_NPC, ThreadID tid)
934 {
935 nextPC[tid] = new_NPC;
936 }
937
938
939 uint64_t
940 InOrderCPU::readNextNPC(ThreadID tid)
941 {
942 return nextNPC[tid];
943 }
944
945
946 void
947 InOrderCPU::setNextNPC(uint64_t new_NNPC, ThreadID tid)
948 {
949 nextNPC[tid] = new_NNPC;
950 }
951
952 uint64_t
953 InOrderCPU::readIntReg(int reg_idx, ThreadID tid)
954 {
955 return intRegs[tid][reg_idx];
956 }
957
958 FloatReg
959 InOrderCPU::readFloatReg(int reg_idx, ThreadID tid)
960 {
961 return floatRegs.f[tid][reg_idx];
962 }
963
964 FloatRegBits
965 InOrderCPU::readFloatRegBits(int reg_idx, ThreadID tid)
966 {;
967 return floatRegs.i[tid][reg_idx];
968 }
969
970 void
971 InOrderCPU::setIntReg(int reg_idx, uint64_t val, ThreadID tid)
972 {
973 intRegs[tid][reg_idx] = val;
974 }
975
976
977 void
978 InOrderCPU::setFloatReg(int reg_idx, FloatReg val, ThreadID tid)
979 {
980 floatRegs.f[tid][reg_idx] = val;
981 }
982
983
984 void
985 InOrderCPU::setFloatRegBits(int reg_idx, FloatRegBits val, ThreadID tid)
986 {
987 floatRegs.i[tid][reg_idx] = val;
988 }
989
990 uint64_t
991 InOrderCPU::readRegOtherThread(unsigned reg_idx, ThreadID tid)
992 {
993 // If Default value is set, then retrieve target thread
994 if (tid == InvalidThreadID) {
995 tid = TheISA::getTargetThread(tcBase(tid));
996 }
997
998 if (reg_idx < FP_Base_DepTag) {
999 // Integer Register File
1000 return readIntReg(reg_idx, tid);
1001 } else if (reg_idx < Ctrl_Base_DepTag) {
1002 // Float Register File
1003 reg_idx -= FP_Base_DepTag;
1004 return readFloatRegBits(reg_idx, tid);
1005 } else {
1006 reg_idx -= Ctrl_Base_DepTag;
1007 return readMiscReg(reg_idx, tid); // Misc. Register File
1008 }
1009 }
1010 void
1011 InOrderCPU::setRegOtherThread(unsigned reg_idx, const MiscReg &val,
1012 ThreadID tid)
1013 {
1014 // If Default value is set, then retrieve target thread
1015 if (tid == InvalidThreadID) {
1016 tid = TheISA::getTargetThread(tcBase(tid));
1017 }
1018
1019 if (reg_idx < FP_Base_DepTag) { // Integer Register File
1020 setIntReg(reg_idx, val, tid);
1021 } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
1022 reg_idx -= FP_Base_DepTag;
1023 setFloatRegBits(reg_idx, val, tid);
1024 } else {
1025 reg_idx -= Ctrl_Base_DepTag;
1026 setMiscReg(reg_idx, val, tid); // Misc. Register File
1027 }
1028 }
1029
1030 MiscReg
1031 InOrderCPU::readMiscRegNoEffect(int misc_reg, ThreadID tid)
1032 {
1033 return isa[tid].readMiscRegNoEffect(misc_reg);
1034 }
1035
1036 MiscReg
1037 InOrderCPU::readMiscReg(int misc_reg, ThreadID tid)
1038 {
1039 return isa[tid].readMiscReg(misc_reg, tcBase(tid));
1040 }
1041
1042 void
1043 InOrderCPU::setMiscRegNoEffect(int misc_reg, const MiscReg &val, ThreadID tid)
1044 {
1045 isa[tid].setMiscRegNoEffect(misc_reg, val);
1046 }
1047
1048 void
1049 InOrderCPU::setMiscReg(int misc_reg, const MiscReg &val, ThreadID tid)
1050 {
1051 isa[tid].setMiscReg(misc_reg, val, tcBase(tid));
1052 }
1053
1054
1055 InOrderCPU::ListIt
1056 InOrderCPU::addInst(DynInstPtr &inst)
1057 {
1058 ThreadID tid = inst->readTid();
1059
1060 instList[tid].push_back(inst);
1061
1062 return --(instList[tid].end());
1063 }
1064
1065 void
1066 InOrderCPU::instDone(DynInstPtr inst, ThreadID tid)
1067 {
1068 // Set the CPU's PCs - This contributes to the precise state of the CPU
1069 // which can be used when restoring a thread to the CPU after after any
1070 // type of context switching activity (fork, exception, etc.)
1071 setPC(inst->readPC(), tid);
1072 setNextPC(inst->readNextPC(), tid);
1073 setNextNPC(inst->readNextNPC(), tid);
1074
1075 if (inst->isControl()) {
1076 thread[tid]->lastGradIsBranch = true;
1077 thread[tid]->lastBranchPC = inst->readPC();
1078 thread[tid]->lastBranchNextPC = inst->readNextPC();
1079 thread[tid]->lastBranchNextNPC = inst->readNextNPC();
1080 } else {
1081 thread[tid]->lastGradIsBranch = false;
1082 }
1083
1084
1085 // Finalize Trace Data For Instruction
1086 if (inst->traceData) {
1087 //inst->traceData->setCycle(curTick);
1088 inst->traceData->setFetchSeq(inst->seqNum);
1089 //inst->traceData->setCPSeq(cpu->tcBase(tid)->numInst);
1090 inst->traceData->dump();
1091 delete inst->traceData;
1092 inst->traceData = NULL;
1093 }
1094
1095 // Increment thread-state's instruction count
1096 thread[tid]->numInst++;
1097
1098 // Increment thread-state's instruction stats
1099 thread[tid]->numInsts++;
1100
1101 // Count committed insts per thread stats
1102 committedInsts[tid]++;
1103
1104 // Count total insts committed stat
1105 totalCommittedInsts++;
1106
1107 // Count SMT-committed insts per thread stat
1108 if (numActiveThreads() > 1) {
1109 smtCommittedInsts[tid]++;
1110 }
1111
1112 // Check for instruction-count-based events.
1113 comInstEventQueue[tid]->serviceEvents(thread[tid]->numInst);
1114
1115 // Broadcast to other resources an instruction
1116 // has been completed
1117 resPool->scheduleEvent((CPUEventType)ResourcePool::InstGraduated, inst,
1118 0, 0, tid);
1119
1120 // Finally, remove instruction from CPU
1121 removeInst(inst);
1122 }
1123
1124 void
1125 InOrderCPU::addToRemoveList(DynInstPtr &inst)
1126 {
1127 removeInstsThisCycle = true;
1128
1129 removeList.push(inst->getInstListIt());
1130 }
1131
1132 void
1133 InOrderCPU::removeInst(DynInstPtr &inst)
1134 {
1135 DPRINTF(InOrderCPU, "Removing graduated instruction [tid:%i] PC %#x "
1136 "[sn:%lli]\n",
1137 inst->threadNumber, inst->readPC(), inst->seqNum);
1138
1139 removeInstsThisCycle = true;
1140
1141 // Remove the instruction.
1142 removeList.push(inst->getInstListIt());
1143 }
1144
1145 void
1146 InOrderCPU::removeInstsUntil(const InstSeqNum &seq_num, ThreadID tid)
1147 {
1148 //assert(!instList[tid].empty());
1149
1150 removeInstsThisCycle = true;
1151
1152 ListIt inst_iter = instList[tid].end();
1153
1154 inst_iter--;
1155
1156 DPRINTF(InOrderCPU, "Deleting instructions from CPU instruction "
1157 "list that are from [tid:%i] and above [sn:%lli] (end=%lli).\n",
1158 tid, seq_num, (*inst_iter)->seqNum);
1159
1160 while ((*inst_iter)->seqNum > seq_num) {
1161
1162 bool break_loop = (inst_iter == instList[tid].begin());
1163
1164 squashInstIt(inst_iter, tid);
1165
1166 inst_iter--;
1167
1168 if (break_loop)
1169 break;
1170 }
1171 }
1172
1173
1174 inline void
1175 InOrderCPU::squashInstIt(const ListIt &instIt, ThreadID tid)
1176 {
1177 if ((*instIt)->threadNumber == tid) {
1178 DPRINTF(InOrderCPU, "Squashing instruction, "
1179 "[tid:%i] [sn:%lli] PC %#x\n",
1180 (*instIt)->threadNumber,
1181 (*instIt)->seqNum,
1182 (*instIt)->readPC());
1183
1184 (*instIt)->setSquashed();
1185
1186 removeList.push(instIt);
1187 }
1188 }
1189
1190
1191 void
1192 InOrderCPU::cleanUpRemovedInsts()
1193 {
1194 while (!removeList.empty()) {
1195 DPRINTF(InOrderCPU, "Removing instruction, "
1196 "[tid:%i] [sn:%lli] PC %#x\n",
1197 (*removeList.front())->threadNumber,
1198 (*removeList.front())->seqNum,
1199 (*removeList.front())->readPC());
1200
1201 DynInstPtr inst = *removeList.front();
1202 ThreadID tid = inst->threadNumber;
1203
1204 // Make Sure Resource Schedule Is Emptied Out
1205 ThePipeline::ResSchedule *inst_sched = &inst->resSched;
1206 while (!inst_sched->empty()) {
1207 ThePipeline::ScheduleEntry* sch_entry = inst_sched->top();
1208 inst_sched->pop();
1209 delete sch_entry;
1210 }
1211
1212 // Remove From Register Dependency Map, If Necessary
1213 archRegDepMap[(*removeList.front())->threadNumber].
1214 remove((*removeList.front()));
1215
1216
1217 // Clear if Non-Speculative
1218 if (inst->staticInst &&
1219 inst->seqNum == nonSpecSeqNum[tid] &&
1220 nonSpecInstActive[tid] == true) {
1221 nonSpecInstActive[tid] = false;
1222 }
1223
1224 instList[tid].erase(removeList.front());
1225
1226 removeList.pop();
1227
1228 DPRINTF(RefCount, "pop from remove list: [sn:%i]: Refcount = %i.\n",
1229 inst->seqNum,
1230 0/*inst->curCount()*/);
1231
1232 }
1233
1234 removeInstsThisCycle = false;
1235 }
1236
1237 void
1238 InOrderCPU::cleanUpRemovedReqs()
1239 {
1240 while (!reqRemoveList.empty()) {
1241 ResourceRequest *res_req = reqRemoveList.front();
1242
1243 DPRINTF(RefCount, "[tid:%i]: Removing Request, "
1244 "[sn:%lli] [slot:%i] [stage_num:%i] [res:%s] [refcount:%i].\n",
1245 res_req->inst->threadNumber,
1246 res_req->inst->seqNum,
1247 res_req->getSlot(),
1248 res_req->getStageNum(),
1249 res_req->res->name(),
1250 0/*res_req->inst->curCount()*/);
1251
1252 reqRemoveList.pop();
1253
1254 delete res_req;
1255
1256 DPRINTF(RefCount, "after remove request: [sn:%i]: Refcount = %i.\n",
1257 res_req->inst->seqNum,
1258 0/*res_req->inst->curCount()*/);
1259 }
1260 }
1261
1262 void
1263 InOrderCPU::cleanUpRemovedEvents()
1264 {
1265 while (!cpuEventRemoveList.empty()) {
1266 Event *cpu_event = cpuEventRemoveList.front();
1267 cpuEventRemoveList.pop();
1268 delete cpu_event;
1269 }
1270 }
1271
1272
1273 void
1274 InOrderCPU::dumpInsts()
1275 {
1276 int num = 0;
1277
1278 ListIt inst_list_it = instList[0].begin();
1279
1280 cprintf("Dumping Instruction List\n");
1281
1282 while (inst_list_it != instList[0].end()) {
1283 cprintf("Instruction:%i\nPC:%#x\n[tid:%i]\n[sn:%lli]\nIssued:%i\n"
1284 "Squashed:%i\n\n",
1285 num, (*inst_list_it)->readPC(), (*inst_list_it)->threadNumber,
1286 (*inst_list_it)->seqNum, (*inst_list_it)->isIssued(),
1287 (*inst_list_it)->isSquashed());
1288 inst_list_it++;
1289 ++num;
1290 }
1291 }
1292
1293 void
1294 InOrderCPU::wakeCPU()
1295 {
1296 if (/*activityRec.active() || */tickEvent.scheduled()) {
1297 DPRINTF(Activity, "CPU already running.\n");
1298 return;
1299 }
1300
1301 DPRINTF(Activity, "Waking up CPU\n");
1302
1303 //@todo: figure out how to count idleCycles correctly
1304 //idleCycles += (curTick - 1) - lastRunningCycle;
1305
1306 mainEventQueue.schedule(&tickEvent, curTick);
1307 }
1308
1309 #if FULL_SYSTEM
1310
1311 void
1312 InOrderCPU::wakeup()
1313 {
1314 if (this->thread[0]->status() != ThreadContext::Suspended)
1315 return;
1316
1317 this->wakeCPU();
1318
1319 DPRINTF(Quiesce, "Suspended Processor woken\n");
1320 this->threadContexts[0]->activate();
1321 }
1322 #endif
1323
1324 #if !FULL_SYSTEM
1325 void
1326 InOrderCPU::syscall(int64_t callnum, ThreadID tid)
1327 {
1328 DPRINTF(InOrderCPU, "[tid:%i] Executing syscall().\n\n", tid);
1329
1330 DPRINTF(Activity,"Activity: syscall() called.\n");
1331
1332 // Temporarily increase this by one to account for the syscall
1333 // instruction.
1334 ++(this->thread[tid]->funcExeInst);
1335
1336 // Execute the actual syscall.
1337 this->thread[tid]->syscall(callnum);
1338
1339 // Decrease funcExeInst by one as the normal commit will handle
1340 // incrementing it.
1341 --(this->thread[tid]->funcExeInst);
1342
1343 // Clear Non-Speculative Block Variable
1344 nonSpecInstActive[tid] = false;
1345 }
1346 #endif
1347
1348 void
1349 InOrderCPU::prefetch(DynInstPtr inst)
1350 {
1351 Resource *mem_res = resPool->getResource(dataPortIdx);
1352 return mem_res->prefetch(inst);
1353 }
1354
1355 void
1356 InOrderCPU::writeHint(DynInstPtr inst)
1357 {
1358 Resource *mem_res = resPool->getResource(dataPortIdx);
1359 return mem_res->writeHint(inst);
1360 }
1361
1362
1363 TheISA::TLB*
1364 InOrderCPU::getITBPtr()
1365 {
1366 CacheUnit *itb_res =
1367 dynamic_cast<CacheUnit*>(resPool->getResource(fetchPortIdx));
1368 return itb_res->tlb();
1369 }
1370
1371
1372 TheISA::TLB*
1373 InOrderCPU::getDTBPtr()
1374 {
1375 CacheUnit *dtb_res =
1376 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1377 return dtb_res->tlb();
1378 }
1379
1380 template <class T>
1381 Fault
1382 InOrderCPU::read(DynInstPtr inst, Addr addr, T &data, unsigned flags)
1383 {
1384 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1385 // you want to run w/out caches?
1386 CacheUnit *cache_res =
1387 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1388
1389 return cache_res->read(inst, addr, data, flags);
1390 }
1391
1392 #ifndef DOXYGEN_SHOULD_SKIP_THIS
1393
1394 template
1395 Fault
1396 InOrderCPU::read(DynInstPtr inst, Addr addr, Twin32_t &data, unsigned flags);
1397
1398 template
1399 Fault
1400 InOrderCPU::read(DynInstPtr inst, Addr addr, Twin64_t &data, unsigned flags);
1401
1402 template
1403 Fault
1404 InOrderCPU::read(DynInstPtr inst, Addr addr, uint64_t &data, unsigned flags);
1405
1406 template
1407 Fault
1408 InOrderCPU::read(DynInstPtr inst, Addr addr, uint32_t &data, unsigned flags);
1409
1410 template
1411 Fault
1412 InOrderCPU::read(DynInstPtr inst, Addr addr, uint16_t &data, unsigned flags);
1413
1414 template
1415 Fault
1416 InOrderCPU::read(DynInstPtr inst, Addr addr, uint8_t &data, unsigned flags);
1417
1418 #endif //DOXYGEN_SHOULD_SKIP_THIS
1419
1420 template<>
1421 Fault
1422 InOrderCPU::read(DynInstPtr inst, Addr addr, double &data, unsigned flags)
1423 {
1424 return read(inst, addr, *(uint64_t*)&data, flags);
1425 }
1426
1427 template<>
1428 Fault
1429 InOrderCPU::read(DynInstPtr inst, Addr addr, float &data, unsigned flags)
1430 {
1431 return read(inst, addr, *(uint32_t*)&data, flags);
1432 }
1433
1434
1435 template<>
1436 Fault
1437 InOrderCPU::read(DynInstPtr inst, Addr addr, int32_t &data, unsigned flags)
1438 {
1439 return read(inst, addr, (uint32_t&)data, flags);
1440 }
1441
1442 template <class T>
1443 Fault
1444 InOrderCPU::write(DynInstPtr inst, T data, Addr addr, unsigned flags,
1445 uint64_t *write_res)
1446 {
1447 //@TODO: Generalize name "CacheUnit" to "MemUnit" just in case
1448 // you want to run w/out caches?
1449 CacheUnit *cache_res =
1450 dynamic_cast<CacheUnit*>(resPool->getResource(dataPortIdx));
1451 return cache_res->write(inst, data, addr, flags, write_res);
1452 }
1453
1454 #ifndef DOXYGEN_SHOULD_SKIP_THIS
1455
1456 template
1457 Fault
1458 InOrderCPU::write(DynInstPtr inst, Twin32_t data, Addr addr,
1459 unsigned flags, uint64_t *res);
1460
1461 template
1462 Fault
1463 InOrderCPU::write(DynInstPtr inst, Twin64_t data, Addr addr,
1464 unsigned flags, uint64_t *res);
1465
1466 template
1467 Fault
1468 InOrderCPU::write(DynInstPtr inst, uint64_t data, Addr addr,
1469 unsigned flags, uint64_t *res);
1470
1471 template
1472 Fault
1473 InOrderCPU::write(DynInstPtr inst, uint32_t data, Addr addr,
1474 unsigned flags, uint64_t *res);
1475
1476 template
1477 Fault
1478 InOrderCPU::write(DynInstPtr inst, uint16_t data, Addr addr,
1479 unsigned flags, uint64_t *res);
1480
1481 template
1482 Fault
1483 InOrderCPU::write(DynInstPtr inst, uint8_t data, Addr addr,
1484 unsigned flags, uint64_t *res);
1485
1486 #endif //DOXYGEN_SHOULD_SKIP_THIS
1487
1488 template<>
1489 Fault
1490 InOrderCPU::write(DynInstPtr inst, double data, Addr addr, unsigned flags,
1491 uint64_t *res)
1492 {
1493 return write(inst, *(uint64_t*)&data, addr, flags, res);
1494 }
1495
1496 template<>
1497 Fault
1498 InOrderCPU::write(DynInstPtr inst, float data, Addr addr, unsigned flags,
1499 uint64_t *res)
1500 {
1501 return write(inst, *(uint32_t*)&data, addr, flags, res);
1502 }
1503
1504
1505 template<>
1506 Fault
1507 InOrderCPU::write(DynInstPtr inst, int32_t data, Addr addr, unsigned flags,
1508 uint64_t *res)
1509 {
1510 return write(inst, (uint32_t)data, addr, flags, res);
1511 }