CPU: Get rid of the now unnecessary getInst/setInst family of functions.
[gem5.git] / src / cpu / ozone / lw_back_end_impl.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31 #include "config/the_isa.hh"
32 #include "config/use_checker.hh"
33 #include "cpu/ozone/lw_back_end.hh"
34 #include "cpu/op_class.hh"
35
36 #if USE_CHECKER
37 #include "cpu/checker/cpu.hh"
38 #endif
39
40 template <class Impl>
41 void
42 LWBackEnd<Impl>::generateTrapEvent(Tick latency)
43 {
44 DPRINTF(BE, "Generating trap event\n");
45
46 TrapEvent *trap = new TrapEvent(this);
47
48 trap->schedule(curTick + cpu->ticks(latency));
49
50 thread->trapPending = true;
51 }
52
53 template <class Impl>
54 int
55 LWBackEnd<Impl>::wakeDependents(DynInstPtr &inst, bool memory_deps)
56 {
57 assert(!inst->isSquashed());
58 std::vector<DynInstPtr> &dependents = memory_deps ? inst->getMemDeps() :
59 inst->getDependents();
60 int num_outputs = dependents.size();
61
62 DPRINTF(BE, "Waking instruction [sn:%lli] dependents in IQ\n", inst->seqNum);
63
64 for (int i = 0; i < num_outputs; i++) {
65 DynInstPtr dep_inst = dependents[i];
66 if (!memory_deps) {
67 dep_inst->markSrcRegReady();
68 } else {
69 if (!dep_inst->isSquashed())
70 dep_inst->markMemInstReady(inst.get());
71 }
72
73 DPRINTF(BE, "Marking source reg ready [sn:%lli] in IQ\n", dep_inst->seqNum);
74
75 if (dep_inst->readyToIssue() && dep_inst->isInROB() &&
76 !dep_inst->isNonSpeculative() && !dep_inst->isStoreConditional() &&
77 dep_inst->memDepReady() && !dep_inst->isMemBarrier() &&
78 !dep_inst->isWriteBarrier()) {
79 DPRINTF(BE, "Adding instruction to exeList [sn:%lli]\n",
80 dep_inst->seqNum);
81 exeList.push(dep_inst);
82 if (dep_inst->iqItValid) {
83 DPRINTF(BE, "Removing instruction from waiting list\n");
84 waitingList.erase(dep_inst->iqIt);
85 waitingInsts--;
86 dep_inst->iqItValid = false;
87 assert(waitingInsts >= 0);
88 }
89 if (dep_inst->isMemRef()) {
90 removeWaitingMemOp(dep_inst);
91 DPRINTF(BE, "Issued a waiting mem op [sn:%lli]\n",
92 dep_inst->seqNum);
93 }
94 }
95 }
96 return num_outputs;
97 }
98
99 template <class Impl>
100 void
101 LWBackEnd<Impl>::rescheduleMemInst(DynInstPtr &inst)
102 {
103 replayList.push_front(inst);
104 }
105
106 template <class Impl>
107 LWBackEnd<Impl>::TrapEvent::TrapEvent(LWBackEnd<Impl> *_be)
108 : Event(&mainEventQueue, CPU_Tick_Pri), be(_be)
109 {
110 this->setFlags(Event::AutoDelete);
111 }
112
113 template <class Impl>
114 void
115 LWBackEnd<Impl>::TrapEvent::process()
116 {
117 be->trapSquash = true;
118 }
119
120 template <class Impl>
121 const char *
122 LWBackEnd<Impl>::TrapEvent::description() const
123 {
124 return "Trap";
125 }
126
127 template <class Impl>
128 void
129 LWBackEnd<Impl>::replayMemInst(DynInstPtr &inst)
130 {
131 bool found_inst = false;
132 while (!replayList.empty()) {
133 exeList.push(replayList.front());
134 if (replayList.front() == inst) {
135 found_inst = true;
136 }
137 replayList.pop_front();
138 }
139 assert(found_inst);
140 }
141
142 template <class Impl>
143 LWBackEnd<Impl>::LWBackEnd(Params *params)
144 : d2i(5, 5), i2e(5, 5), e2c(5, 5), numInstsToWB(params->backEndLatency, 0),
145 trapSquash(false), tcSquash(false),
146 latency(params->backEndLatency),
147 width(params->backEndWidth), lsqLimits(params->lsqLimits),
148 exactFullStall(true)
149 {
150 numROBEntries = params->numROBEntries;
151 numInsts = 0;
152 maxOutstandingMemOps = params->maxOutstandingMemOps;
153 numWaitingMemOps = 0;
154 waitingInsts = 0;
155 switchedOut = false;
156 switchPending = false;
157
158 LSQ.setBE(this);
159
160 // Setup IQ and LSQ with their parameters here.
161 instsToDispatch = d2i.getWire(-1);
162
163 instsToExecute = i2e.getWire(-1);
164
165 dispatchWidth = params->dispatchWidth ? params->dispatchWidth : width;
166 issueWidth = params->issueWidth ? params->issueWidth : width;
167 wbWidth = params->wbWidth ? params->wbWidth : width;
168 commitWidth = params->commitWidth ? params->commitWidth : width;
169
170 LSQ.init(params, params->LQEntries, params->SQEntries, 0);
171
172 dispatchStatus = Running;
173 commitStatus = Running;
174 }
175
176 template <class Impl>
177 std::string
178 LWBackEnd<Impl>::name() const
179 {
180 return cpu->name() + ".backend";
181 }
182
183 template <class Impl>
184 void
185 LWBackEnd<Impl>::regStats()
186 {
187 using namespace Stats;
188 LSQ.regStats();
189
190 robCapEvents
191 .init(cpu->numThreads)
192 .name(name() + ".ROB:cap_events")
193 .desc("number of cycles where ROB cap was active")
194 .flags(total)
195 ;
196
197 robCapInstCount
198 .init(cpu->numThreads)
199 .name(name() + ".ROB:cap_inst")
200 .desc("number of instructions held up by ROB cap")
201 .flags(total)
202 ;
203
204 iqCapEvents
205 .init(cpu->numThreads)
206 .name(name() +".IQ:cap_events" )
207 .desc("number of cycles where IQ cap was active")
208 .flags(total)
209 ;
210
211 iqCapInstCount
212 .init(cpu->numThreads)
213 .name(name() + ".IQ:cap_inst")
214 .desc("number of instructions held up by IQ cap")
215 .flags(total)
216 ;
217
218 exeInst
219 .init(cpu->numThreads)
220 .name(name() + ".ISSUE:count")
221 .desc("number of insts issued")
222 .flags(total)
223 ;
224
225 exeSwp
226 .init(cpu->numThreads)
227 .name(name() + ".ISSUE:swp")
228 .desc("number of swp insts issued")
229 .flags(total)
230 ;
231
232 exeNop
233 .init(cpu->numThreads)
234 .name(name() + ".ISSUE:nop")
235 .desc("number of nop insts issued")
236 .flags(total)
237 ;
238
239 exeRefs
240 .init(cpu->numThreads)
241 .name(name() + ".ISSUE:refs")
242 .desc("number of memory reference insts issued")
243 .flags(total)
244 ;
245
246 exeLoads
247 .init(cpu->numThreads)
248 .name(name() + ".ISSUE:loads")
249 .desc("number of load insts issued")
250 .flags(total)
251 ;
252
253 exeBranches
254 .init(cpu->numThreads)
255 .name(name() + ".ISSUE:branches")
256 .desc("Number of branches issued")
257 .flags(total)
258 ;
259
260 issuedOps
261 .init(cpu->numThreads)
262 .name(name() + ".ISSUE:op_count")
263 .desc("number of insts issued")
264 .flags(total)
265 ;
266
267 /*
268 for (int i=0; i<Num_OpClasses; ++i) {
269 stringstream subname;
270 subname << opClassStrings[i] << "_delay";
271 issue_delay_dist.subname(i, subname.str());
272 }
273 */
274 //
275 // Other stats
276 //
277 lsqForwLoads
278 .init(cpu->numThreads)
279 .name(name() + ".LSQ:forw_loads")
280 .desc("number of loads forwarded via LSQ")
281 .flags(total)
282 ;
283
284 invAddrLoads
285 .init(cpu->numThreads)
286 .name(name() + ".ISSUE:addr_loads")
287 .desc("number of invalid-address loads")
288 .flags(total)
289 ;
290
291 invAddrSwpfs
292 .init(cpu->numThreads)
293 .name(name() + ".ISSUE:addr_swpfs")
294 .desc("number of invalid-address SW prefetches")
295 .flags(total)
296 ;
297
298 lsqBlockedLoads
299 .init(cpu->numThreads)
300 .name(name() + ".LSQ:blocked_loads")
301 .desc("number of ready loads not issued due to memory disambiguation")
302 .flags(total)
303 ;
304
305 lsqInversion
306 .name(name() + ".ISSUE:lsq_invert")
307 .desc("Number of times LSQ instruction issued early")
308 ;
309
310 nIssuedDist
311 .init(issueWidth + 1)
312 .name(name() + ".ISSUE:issued_per_cycle")
313 .desc("Number of insts issued each cycle")
314 .flags(total | pdf | dist)
315 ;
316 /*
317 issueDelayDist
318 .init(Num_OpClasses,0,99,2)
319 .name(name() + ".ISSUE:")
320 .desc("cycles from operands ready to issue")
321 .flags(pdf | cdf)
322 ;
323
324 queueResDist
325 .init(Num_OpClasses, 0, 99, 2)
326 .name(name() + ".IQ:residence:")
327 .desc("cycles from dispatch to issue")
328 .flags(total | pdf | cdf )
329 ;
330 for (int i = 0; i < Num_OpClasses; ++i) {
331 queueResDist.subname(i, opClassStrings[i]);
332 }
333 */
334 writebackCount
335 .init(cpu->numThreads)
336 .name(name() + ".WB:count")
337 .desc("cumulative count of insts written-back")
338 .flags(total)
339 ;
340
341 producerInst
342 .init(cpu->numThreads)
343 .name(name() + ".WB:producers")
344 .desc("num instructions producing a value")
345 .flags(total)
346 ;
347
348 consumerInst
349 .init(cpu->numThreads)
350 .name(name() + ".WB:consumers")
351 .desc("num instructions consuming a value")
352 .flags(total)
353 ;
354
355 wbPenalized
356 .init(cpu->numThreads)
357 .name(name() + ".WB:penalized")
358 .desc("number of instrctions required to write to 'other' IQ")
359 .flags(total)
360 ;
361
362
363 wbPenalizedRate
364 .name(name() + ".WB:penalized_rate")
365 .desc ("fraction of instructions written-back that wrote to 'other' IQ")
366 .flags(total)
367 ;
368
369 wbPenalizedRate = wbPenalized / writebackCount;
370
371 wbFanout
372 .name(name() + ".WB:fanout")
373 .desc("average fanout of values written-back")
374 .flags(total)
375 ;
376
377 wbFanout = producerInst / consumerInst;
378
379 wbRate
380 .name(name() + ".WB:rate")
381 .desc("insts written-back per cycle")
382 .flags(total)
383 ;
384 wbRate = writebackCount / cpu->numCycles;
385
386 statComInst
387 .init(cpu->numThreads)
388 .name(name() + ".COM:count")
389 .desc("Number of instructions committed")
390 .flags(total)
391 ;
392
393 statComSwp
394 .init(cpu->numThreads)
395 .name(name() + ".COM:swp_count")
396 .desc("Number of s/w prefetches committed")
397 .flags(total)
398 ;
399
400 statComRefs
401 .init(cpu->numThreads)
402 .name(name() + ".COM:refs")
403 .desc("Number of memory references committed")
404 .flags(total)
405 ;
406
407 statComLoads
408 .init(cpu->numThreads)
409 .name(name() + ".COM:loads")
410 .desc("Number of loads committed")
411 .flags(total)
412 ;
413
414 statComMembars
415 .init(cpu->numThreads)
416 .name(name() + ".COM:membars")
417 .desc("Number of memory barriers committed")
418 .flags(total)
419 ;
420
421 statComBranches
422 .init(cpu->numThreads)
423 .name(name() + ".COM:branches")
424 .desc("Number of branches committed")
425 .flags(total)
426 ;
427 nCommittedDist
428 .init(0,commitWidth,1)
429 .name(name() + ".COM:committed_per_cycle")
430 .desc("Number of insts commited each cycle")
431 .flags(pdf)
432 ;
433
434 //
435 // Commit-Eligible instructions...
436 //
437 // -> The number of instructions eligible to commit in those
438 // cycles where we reached our commit BW limit (less the number
439 // actually committed)
440 //
441 // -> The average value is computed over ALL CYCLES... not just
442 // the BW limited cycles
443 //
444 // -> The standard deviation is computed only over cycles where
445 // we reached the BW limit
446 //
447 commitEligible
448 .init(cpu->numThreads)
449 .name(name() + ".COM:bw_limited")
450 .desc("number of insts not committed due to BW limits")
451 .flags(total)
452 ;
453
454 commitEligibleSamples
455 .name(name() + ".COM:bw_lim_events")
456 .desc("number cycles where commit BW limit reached")
457 ;
458
459 squashedInsts
460 .init(cpu->numThreads)
461 .name(name() + ".COM:squashed_insts")
462 .desc("Number of instructions removed from inst list")
463 ;
464
465 ROBSquashedInsts
466 .init(cpu->numThreads)
467 .name(name() + ".COM:rob_squashed_insts")
468 .desc("Number of instructions removed from inst list when they reached the head of the ROB")
469 ;
470
471 ROBFcount
472 .name(name() + ".ROB:full_count")
473 .desc("number of cycles where ROB was full")
474 ;
475
476 ROBCount
477 .init(cpu->numThreads)
478 .name(name() + ".ROB:occupancy")
479 .desc(name() + ".ROB occupancy (cumulative)")
480 .flags(total)
481 ;
482
483 ROBFullRate
484 .name(name() + ".ROB:full_rate")
485 .desc("ROB full per cycle")
486 ;
487 ROBFullRate = ROBFcount / cpu->numCycles;
488
489 ROBOccRate
490 .name(name() + ".ROB:occ_rate")
491 .desc("ROB occupancy rate")
492 .flags(total)
493 ;
494 ROBOccRate = ROBCount / cpu->numCycles;
495 /*
496 ROBOccDist
497 .init(cpu->numThreads, 0, numROBEntries, 2)
498 .name(name() + ".ROB:occ_dist")
499 .desc("ROB Occupancy per cycle")
500 .flags(total | cdf)
501 ;
502 */
503 }
504
505 template <class Impl>
506 void
507 LWBackEnd<Impl>::setCPU(OzoneCPU *cpu_ptr)
508 {
509 cpu = cpu_ptr;
510 LSQ.setCPU(cpu_ptr);
511 checker = cpu->checker;
512 }
513
514 template <class Impl>
515 void
516 LWBackEnd<Impl>::setCommBuffer(TimeBuffer<CommStruct> *_comm)
517 {
518 comm = _comm;
519 toIEW = comm->getWire(0);
520 fromCommit = comm->getWire(-1);
521 }
522
523 #if FULL_SYSTEM
524 template <class Impl>
525 void
526 LWBackEnd<Impl>::checkInterrupts()
527 {
528 if (cpu->checkInterrupts(tc) && !trapSquash && !tcSquash) {
529 frontEnd->interruptPending = true;
530 if (robEmpty() && !LSQ.hasStoresToWB()) {
531 // Will need to squash all instructions currently in flight and have
532 // the interrupt handler restart at the last non-committed inst.
533 // Most of that can be handled through the trap() function. The
534 // processInterrupts() function really just checks for interrupts
535 // and then calls trap() if there is an interrupt present.
536
537 // Not sure which thread should be the one to interrupt. For now
538 // always do thread 0.
539 assert(!thread->inSyscall);
540 thread->inSyscall = true;
541
542 // CPU will handle implementation of the interrupt.
543 cpu->processInterrupts();
544
545 // Now squash or record that I need to squash this cycle.
546 commitStatus = TrapPending;
547
548 // Exit state update mode to avoid accidental updating.
549 thread->inSyscall = false;
550
551 // Generate trap squash event.
552 generateTrapEvent();
553
554 DPRINTF(BE, "Interrupt detected.\n");
555 } else {
556 DPRINTF(BE, "Interrupt must wait for ROB to drain.\n");
557 }
558 }
559 }
560 #endif
561
562 template <class Impl>
563 void
564 LWBackEnd<Impl>::handleFault(Fault &fault, Tick latency)
565 {
566 DPRINTF(BE, "Handling fault!\n");
567
568 assert(!thread->inSyscall);
569
570 thread->inSyscall = true;
571
572 // Consider holding onto the trap and waiting until the trap event
573 // happens for this to be executed.
574 fault->invoke(thread->getTC());
575
576 // Exit state update mode to avoid accidental updating.
577 thread->inSyscall = false;
578
579 commitStatus = TrapPending;
580
581 // Generate trap squash event.
582 generateTrapEvent(latency);
583 }
584
585 template <class Impl>
586 void
587 LWBackEnd<Impl>::tick()
588 {
589 DPRINTF(BE, "Ticking back end\n");
590
591 // Read in any done instruction information and update the IQ or LSQ.
592 updateStructures();
593
594 if (switchPending && robEmpty() && !LSQ.hasStoresToWB()) {
595 cpu->signalSwitched();
596 return;
597 }
598
599 readyInstsForCommit();
600
601 numInstsToWB.advance();
602
603 ROBCount[0]+= numInsts;
604
605 wbCycle = 0;
606
607 #if FULL_SYSTEM
608 checkInterrupts();
609 #endif
610
611 if (trapSquash) {
612 assert(!tcSquash);
613 squashFromTrap();
614 } else if (tcSquash) {
615 squashFromTC();
616 }
617
618 if (dispatchStatus != Blocked) {
619 dispatchInsts();
620 } else {
621 checkDispatchStatus();
622 }
623
624 if (commitStatus != TrapPending) {
625 executeInsts();
626
627 commitInsts();
628 }
629
630 LSQ.writebackStores();
631
632 DPRINTF(BE, "Waiting insts: %i, mem ops: %i, ROB entries in use: %i, "
633 "LSQ loads: %i, LSQ stores: %i\n",
634 waitingInsts, numWaitingMemOps, numInsts,
635 LSQ.numLoads(), LSQ.numStores());
636
637 #ifdef DEBUG
638 assert(numInsts == instList.size());
639 assert(waitingInsts == waitingList.size());
640 assert(numWaitingMemOps == waitingMemOps.size());
641 assert(!switchedOut);
642 #endif
643 }
644
645 template <class Impl>
646 void
647 LWBackEnd<Impl>::updateStructures()
648 {
649 if (fromCommit->doneSeqNum) {
650 LSQ.commitLoads(fromCommit->doneSeqNum);
651 LSQ.commitStores(fromCommit->doneSeqNum);
652 }
653
654 if (fromCommit->nonSpecSeqNum) {
655 if (fromCommit->uncached) {
656 // LSQ.executeLoad(fromCommit->lqIdx);
657 } else {
658 // IQ.scheduleNonSpec(
659 // fromCommit->nonSpecSeqNum);
660 }
661 }
662 }
663
664 template <class Impl>
665 void
666 LWBackEnd<Impl>::addToLSQ(DynInstPtr &inst)
667 {
668 // Do anything LSQ specific here?
669 LSQ.insert(inst);
670 }
671
672 template <class Impl>
673 void
674 LWBackEnd<Impl>::dispatchInsts()
675 {
676 DPRINTF(BE, "Trying to dispatch instructions.\n");
677
678 while (numInsts < numROBEntries &&
679 numWaitingMemOps < maxOutstandingMemOps) {
680 // Get instruction from front of time buffer
681 if (lsqLimits && LSQ.isFull()) {
682 break;
683 }
684
685 DynInstPtr inst = frontEnd->getInst();
686 if (!inst) {
687 break;
688 } else if (inst->isSquashed()) {
689 continue;
690 }
691
692 ++numInsts;
693 instList.push_front(inst);
694
695 inst->setInROB();
696
697 DPRINTF(BE, "Dispatching instruction [sn:%lli] PC:%#x\n",
698 inst->seqNum, inst->readPC());
699
700 for (int i = 0; i < inst->numDestRegs(); ++i)
701 renameTable[inst->destRegIdx(i)] = inst;
702
703 if (inst->isMemBarrier() || inst->isWriteBarrier()) {
704 if (memBarrier) {
705 DPRINTF(BE, "Instruction [sn:%lli] is waiting on "
706 "barrier [sn:%lli].\n",
707 inst->seqNum, memBarrier->seqNum);
708 memBarrier->addMemDependent(inst);
709 inst->addSrcMemInst(memBarrier);
710 }
711 memBarrier = inst;
712 inst->setCanCommit();
713 } else if (inst->readyToIssue() &&
714 !inst->isNonSpeculative() &&
715 !inst->isStoreConditional()) {
716 if (inst->isMemRef()) {
717
718 LSQ.insert(inst);
719 if (memBarrier) {
720 DPRINTF(BE, "Instruction [sn:%lli] is waiting on "
721 "barrier [sn:%lli].\n",
722 inst->seqNum, memBarrier->seqNum);
723 memBarrier->addMemDependent(inst);
724 inst->addSrcMemInst(memBarrier);
725 addWaitingMemOp(inst);
726
727 waitingList.push_front(inst);
728 inst->iqIt = waitingList.begin();
729 inst->iqItValid = true;
730 waitingInsts++;
731 } else {
732 DPRINTF(BE, "Instruction [sn:%lli] ready, addding to "
733 "exeList.\n",
734 inst->seqNum);
735 exeList.push(inst);
736 }
737 } else if (inst->isNop()) {
738 DPRINTF(BE, "Nop encountered [sn:%lli], skipping exeList.\n",
739 inst->seqNum);
740 inst->setIssued();
741 inst->setExecuted();
742 inst->setCanCommit();
743 numInstsToWB[0]++;
744 } else {
745 DPRINTF(BE, "Instruction [sn:%lli] ready, addding to "
746 "exeList.\n",
747 inst->seqNum);
748 exeList.push(inst);
749 }
750 } else {
751 if (inst->isNonSpeculative() || inst->isStoreConditional()) {
752 inst->setCanCommit();
753 DPRINTF(BE, "Adding non speculative instruction\n");
754 }
755
756 if (inst->isMemRef()) {
757 addWaitingMemOp(inst);
758 LSQ.insert(inst);
759 if (memBarrier) {
760 memBarrier->addMemDependent(inst);
761 inst->addSrcMemInst(memBarrier);
762
763 DPRINTF(BE, "Instruction [sn:%lli] is waiting on "
764 "barrier [sn:%lli].\n",
765 inst->seqNum, memBarrier->seqNum);
766 }
767 }
768
769 DPRINTF(BE, "Instruction [sn:%lli] not ready, addding to "
770 "waitingList.\n",
771 inst->seqNum);
772 waitingList.push_front(inst);
773 inst->iqIt = waitingList.begin();
774 inst->iqItValid = true;
775 waitingInsts++;
776 }
777 }
778
779 // Check if IQ or LSQ is full. If so we'll need to break and stop
780 // removing instructions. Also update the number of insts to remove
781 // from the queue. Check here if we don't care about exact stall
782 // conditions.
783 /*
784 bool stall = false;
785 if (IQ.isFull()) {
786 DPRINTF(BE, "IQ is full!\n");
787 stall = true;
788 } else if (LSQ.isFull()) {
789 DPRINTF(BE, "LSQ is full!\n");
790 stall = true;
791 } else if (isFull()) {
792 DPRINTF(BE, "ROB is full!\n");
793 stall = true;
794 ROB_fcount++;
795 }
796 if (stall) {
797 d2i.advance();
798 dispatchStall();
799 return;
800 }
801 */
802 }
803
804 template <class Impl>
805 void
806 LWBackEnd<Impl>::dispatchStall()
807 {
808 dispatchStatus = Blocked;
809 if (!cpu->decoupledFrontEnd) {
810 // Tell front end to stall here through a timebuffer, or just tell
811 // it directly.
812 }
813 }
814
815 template <class Impl>
816 void
817 LWBackEnd<Impl>::checkDispatchStatus()
818 {
819 DPRINTF(BE, "Checking dispatch status\n");
820 assert(dispatchStatus == Blocked);
821 if (!LSQ.isFull() && !isFull()) {
822 DPRINTF(BE, "Dispatch no longer blocked\n");
823 dispatchStatus = Running;
824 dispatchInsts();
825 }
826 }
827
828 template <class Impl>
829 void
830 LWBackEnd<Impl>::executeInsts()
831 {
832 DPRINTF(BE, "Trying to execute instructions\n");
833
834 int num_executed = 0;
835 while (!exeList.empty() && num_executed < issueWidth) {
836 DynInstPtr inst = exeList.top();
837
838 DPRINTF(BE, "Executing inst [sn:%lli] PC: %#x\n",
839 inst->seqNum, inst->readPC());
840
841 // Check if the instruction is squashed; if so then skip it
842 // and don't count it towards the FU usage.
843 if (inst->isSquashed()) {
844 DPRINTF(BE, "Execute: Instruction was squashed.\n");
845
846 // Not sure how to handle this plus the method of sending # of
847 // instructions to use. Probably will just have to count it
848 // towards the bandwidth usage, but not the FU usage.
849 ++num_executed;
850
851 // Consider this instruction executed so that commit can go
852 // ahead and retire the instruction.
853 inst->setExecuted();
854
855 // Not sure if I should set this here or just let commit try to
856 // commit any squashed instructions. I like the latter a bit more.
857 inst->setCanCommit();
858
859 // ++iewExecSquashedInsts;
860 exeList.pop();
861
862 continue;
863 }
864
865 Fault fault = NoFault;
866
867 // Execute instruction.
868 // Note that if the instruction faults, it will be handled
869 // at the commit stage.
870 if (inst->isMemRef() &&
871 (!inst->isDataPrefetch() && !inst->isInstPrefetch())) {
872 DPRINTF(BE, "Execute: Initiating access for memory "
873 "reference.\n");
874
875 if (inst->isLoad()) {
876 LSQ.executeLoad(inst);
877 } else if (inst->isStore()) {
878 Fault fault = LSQ.executeStore(inst);
879
880 if (!inst->isStoreConditional() && fault == NoFault) {
881 inst->setExecuted();
882
883 instToCommit(inst);
884 } else if (fault != NoFault) {
885 // If the instruction faulted, then we need to send it along to commit
886 // without the instruction completing.
887 // Send this instruction to commit, also make sure iew stage
888 // realizes there is activity.
889 inst->setExecuted();
890
891 instToCommit(inst);
892 }
893 } else {
894 panic("Unknown mem type!");
895 }
896 } else {
897 inst->execute();
898
899 inst->setExecuted();
900
901 instToCommit(inst);
902 }
903
904 updateExeInstStats(inst);
905
906 ++funcExeInst;
907 ++num_executed;
908
909 exeList.pop();
910
911 if (inst->mispredicted()) {
912 squashDueToBranch(inst);
913 break;
914 } else if (LSQ.violation()) {
915 // Get the DynInst that caused the violation. Note that this
916 // clears the violation signal.
917 DynInstPtr violator;
918 violator = LSQ.getMemDepViolator();
919
920 DPRINTF(BE, "LDSTQ detected a violation. Violator PC: "
921 "%#x, inst PC: %#x. Addr is: %#x.\n",
922 violator->readPC(), inst->readPC(), inst->physEffAddr);
923
924 // Squash.
925 squashDueToMemViolation(inst);
926 }
927 }
928
929 issuedOps[0]+= num_executed;
930 nIssuedDist[num_executed]++;
931 }
932
933 template<class Impl>
934 void
935 LWBackEnd<Impl>::instToCommit(DynInstPtr &inst)
936 {
937 DPRINTF(BE, "Sending instructions to commit [sn:%lli] PC %#x.\n",
938 inst->seqNum, inst->readPC());
939
940 if (!inst->isSquashed()) {
941 if (inst->isExecuted()) {
942 inst->setResultReady();
943 int dependents = wakeDependents(inst);
944 if (dependents) {
945 producerInst[0]++;
946 consumerInst[0]+= dependents;
947 }
948 }
949 }
950
951 writeback.push_back(inst);
952
953 numInstsToWB[0]++;
954
955 writebackCount[0]++;
956 }
957
958 template <class Impl>
959 void
960 LWBackEnd<Impl>::readyInstsForCommit()
961 {
962 for (int i = numInstsToWB[-latency];
963 !writeback.empty() && i;
964 --i)
965 {
966 DynInstPtr inst = writeback.front();
967 writeback.pop_front();
968 if (!inst->isSquashed()) {
969 DPRINTF(BE, "Writing back instruction [sn:%lli] PC %#x.\n",
970 inst->seqNum, inst->readPC());
971
972 inst->setCanCommit();
973 }
974 }
975 }
976
977 #if 0
978 template <class Impl>
979 void
980 LWBackEnd<Impl>::writebackInsts()
981 {
982 int wb_width = wbWidth;
983 // Using this method I'm not quite sure how to prevent an
984 // instruction from waking its own dependents multiple times,
985 // without the guarantee that commit always has enough bandwidth
986 // to accept all instructions being written back. This guarantee
987 // might not be too unrealistic.
988 InstListIt wb_inst_it = writeback.begin();
989 InstListIt wb_end_it = writeback.end();
990 int inst_num = 0;
991 int consumer_insts = 0;
992
993 for (; inst_num < wb_width &&
994 wb_inst_it != wb_end_it; inst_num++) {
995 DynInstPtr inst = (*wb_inst_it);
996
997 // Some instructions will be sent to commit without having
998 // executed because they need commit to handle them.
999 // E.g. Uncached loads have not actually executed when they
1000 // are first sent to commit. Instead commit must tell the LSQ
1001 // when it's ready to execute the uncached load.
1002 if (!inst->isSquashed()) {
1003 DPRINTF(BE, "Writing back instruction [sn:%lli] PC %#x.\n",
1004 inst->seqNum, inst->readPC());
1005
1006 inst->setCanCommit();
1007 inst->setResultReady();
1008
1009 if (inst->isExecuted()) {
1010 int dependents = wakeDependents(inst);
1011 if (dependents) {
1012 producer_inst[0]++;
1013 consumer_insts+= dependents;
1014 }
1015 }
1016 }
1017
1018 writeback.erase(wb_inst_it++);
1019 }
1020 LSQ.writebackStores();
1021 consumer_inst[0]+= consumer_insts;
1022 writeback_count[0]+= inst_num;
1023 }
1024 #endif
1025 template <class Impl>
1026 bool
1027 LWBackEnd<Impl>::commitInst(int inst_num)
1028 {
1029 // Read instruction from the head of the ROB
1030 DynInstPtr inst = instList.back();
1031
1032 // Make sure instruction is valid
1033 assert(inst);
1034
1035 if (!inst->readyToCommit())
1036 return false;
1037
1038 DPRINTF(BE, "Trying to commit instruction [sn:%lli] PC:%#x\n",
1039 inst->seqNum, inst->readPC());
1040
1041 thread->setPC(inst->readPC());
1042 thread->setNextPC(inst->readNextPC());
1043 inst->setAtCommit();
1044
1045 // If the instruction is not executed yet, then it is a non-speculative
1046 // or store inst. Signal backwards that it should be executed.
1047 if (!inst->isExecuted()) {
1048 if (inst->isNonSpeculative() ||
1049 (inst->isStoreConditional() && inst->getFault() == NoFault) ||
1050 inst->isMemBarrier() ||
1051 inst->isWriteBarrier()) {
1052 #if !FULL_SYSTEM
1053 // Hack to make sure syscalls aren't executed until all stores
1054 // write back their data. This direct communication shouldn't
1055 // be used for anything other than this.
1056 if (inst_num > 0 || LSQ.hasStoresToWB())
1057 #else
1058 if ((inst->isMemBarrier() || inst->isWriteBarrier() ||
1059 inst->isQuiesce()) &&
1060 LSQ.hasStoresToWB())
1061 #endif
1062 {
1063 DPRINTF(BE, "Waiting for all stores to writeback.\n");
1064 return false;
1065 }
1066
1067 DPRINTF(BE, "Encountered a store or non-speculative "
1068 "instruction at the head of the ROB, PC %#x.\n",
1069 inst->readPC());
1070
1071 if (inst->isMemBarrier() || inst->isWriteBarrier()) {
1072 DPRINTF(BE, "Waking dependents on barrier [sn:%lli]\n",
1073 inst->seqNum);
1074 assert(memBarrier);
1075 wakeDependents(inst, true);
1076 if (memBarrier == inst)
1077 memBarrier = NULL;
1078 inst->clearMemDependents();
1079 }
1080
1081 // Send back the non-speculative instruction's sequence number.
1082 if (inst->iqItValid) {
1083 DPRINTF(BE, "Removing instruction from waiting list\n");
1084 waitingList.erase(inst->iqIt);
1085 inst->iqItValid = false;
1086 waitingInsts--;
1087 assert(waitingInsts >= 0);
1088 if (inst->isStore())
1089 removeWaitingMemOp(inst);
1090 }
1091
1092 exeList.push(inst);
1093
1094 // Change the instruction so it won't try to commit again until
1095 // it is executed.
1096 inst->clearCanCommit();
1097
1098 // ++commitNonSpecStalls;
1099
1100 return false;
1101 } else if (inst->isLoad()) {
1102 DPRINTF(BE, "[sn:%lli]: Uncached load, PC %#x.\n",
1103 inst->seqNum, inst->readPC());
1104
1105 // Send back the non-speculative instruction's sequence
1106 // number. Maybe just tell the lsq to re-execute the load.
1107
1108 // Send back the non-speculative instruction's sequence number.
1109 if (inst->iqItValid) {
1110 DPRINTF(BE, "Removing instruction from waiting list\n");
1111 waitingList.erase(inst->iqIt);
1112 inst->iqItValid = false;
1113 waitingInsts--;
1114 assert(waitingInsts >= 0);
1115 removeWaitingMemOp(inst);
1116 }
1117 replayMemInst(inst);
1118
1119 inst->clearCanCommit();
1120
1121 return false;
1122 } else {
1123 panic("Trying to commit un-executed instruction "
1124 "of unknown type!\n");
1125 }
1126 }
1127
1128 // Not handled for now.
1129 assert(!inst->isThreadSync());
1130 assert(inst->memDepReady());
1131 // Stores will mark themselves as totally completed as they need
1132 // to wait to writeback to memory. @todo: Hack...attempt to fix
1133 // having the checker be forced to wait until a store completes in
1134 // order to check all of the instructions. If the store at the
1135 // head of the check list misses, but a later store hits, then
1136 // loads in the checker may see the younger store values instead
1137 // of the store they should see. Either the checker needs its own
1138 // memory (annoying to update), its own store buffer (how to tell
1139 // which value is correct?), or something else...
1140 if (!inst->isStore()) {
1141 inst->setCompleted();
1142 }
1143 // Check if the instruction caused a fault. If so, trap.
1144 Fault inst_fault = inst->getFault();
1145
1146 // Use checker prior to updating anything due to traps or PC
1147 // based events.
1148 #if USE_CHECKER
1149 if (checker) {
1150 checker->verify(inst);
1151 }
1152 #endif
1153
1154 if (inst_fault != NoFault) {
1155 DPRINTF(BE, "Inst [sn:%lli] PC %#x has a fault\n",
1156 inst->seqNum, inst->readPC());
1157
1158 // Instruction is completed as it has a fault.
1159 inst->setCompleted();
1160
1161 if (LSQ.hasStoresToWB()) {
1162 DPRINTF(BE, "Stores still in flight, will wait until drained.\n");
1163 return false;
1164 } else if (inst_num != 0) {
1165 DPRINTF(BE, "Will wait until instruction is head of commit group.\n");
1166 return false;
1167 }
1168 #if USE_CHECKER
1169 else if (checker && inst->isStore()) {
1170 checker->verify(inst);
1171 }
1172 #endif
1173
1174 handleFault(inst_fault);
1175 return false;
1176 }
1177
1178 int freed_regs = 0;
1179
1180 for (int i = 0; i < inst->numDestRegs(); ++i) {
1181 DPRINTF(BE, "Commit rename map setting reg %i to [sn:%lli]\n",
1182 (int)inst->destRegIdx(i), inst->seqNum);
1183 thread->renameTable[inst->destRegIdx(i)] = inst;
1184 ++freed_regs;
1185 }
1186
1187 #if FULL_SYSTEM
1188 if (thread->profile) {
1189 // bool usermode =
1190 // (xc->readMiscRegNoEffect(AlphaISA::IPR_DTB_CM) & 0x18) != 0;
1191 // thread->profilePC = usermode ? 1 : inst->readPC();
1192 thread->profilePC = inst->readPC();
1193 ProfileNode *node = thread->profile->consume(thread->getTC(),
1194 inst->staticInst);
1195
1196 if (node)
1197 thread->profileNode = node;
1198 }
1199 #endif
1200
1201 if (inst->traceData) {
1202 inst->traceData->setFetchSeq(inst->seqNum);
1203 inst->traceData->setCPSeq(thread->numInst);
1204 inst->traceData->finalize();
1205 inst->traceData = NULL;
1206 }
1207
1208 if (inst->isCopy())
1209 panic("Should not commit any copy instructions!");
1210
1211 inst->clearDependents();
1212
1213 frontEnd->addFreeRegs(freed_regs);
1214
1215 instList.pop_back();
1216
1217 --numInsts;
1218 ++thread->funcExeInst;
1219 // Maybe move this to where the fault is handled; if the fault is
1220 // handled, don't try to set this myself as the fault will set it.
1221 // If not, then I set thread->PC = thread->nextPC and
1222 // thread->nextPC = thread->nextPC + 4.
1223 thread->setPC(thread->readNextPC());
1224 thread->setNextPC(thread->readNextPC() + sizeof(TheISA::MachInst));
1225 updateComInstStats(inst);
1226
1227 // Write the done sequence number here.
1228 toIEW->doneSeqNum = inst->seqNum;
1229 lastCommitCycle = curTick;
1230
1231 #if FULL_SYSTEM
1232 int count = 0;
1233 Addr oldpc;
1234 do {
1235 if (count == 0)
1236 assert(!thread->inSyscall && !thread->trapPending);
1237 oldpc = thread->readPC();
1238 cpu->system->pcEventQueue.service(
1239 thread->getTC());
1240 count++;
1241 } while (oldpc != thread->readPC());
1242 if (count > 1) {
1243 DPRINTF(BE, "PC skip function event, stopping commit\n");
1244 tcSquash = true;
1245 return false;
1246 }
1247 #endif
1248 return true;
1249 }
1250
1251 template <class Impl>
1252 void
1253 LWBackEnd<Impl>::commitInsts()
1254 {
1255 // Not sure this should be a loop or not.
1256 int inst_num = 0;
1257 while (!instList.empty() && inst_num < commitWidth) {
1258 if (instList.back()->isSquashed()) {
1259 instList.back()->clearDependents();
1260 ROBSquashedInsts[instList.back()->threadNumber]++;
1261 instList.pop_back();
1262 --numInsts;
1263 continue;
1264 }
1265
1266 if (!commitInst(inst_num++)) {
1267 DPRINTF(BE, "Can't commit, Instruction [sn:%lli] PC "
1268 "%#x is head of ROB and not ready\n",
1269 instList.back()->seqNum, instList.back()->readPC());
1270 --inst_num;
1271 break;
1272 }
1273 }
1274 nCommittedDist.sample(inst_num);
1275 }
1276
1277 template <class Impl>
1278 void
1279 LWBackEnd<Impl>::squash(const InstSeqNum &sn)
1280 {
1281 LSQ.squash(sn);
1282
1283 int freed_regs = 0;
1284 InstListIt insts_end_it = waitingList.end();
1285 InstListIt insts_it = waitingList.begin();
1286
1287 while (insts_it != insts_end_it && (*insts_it)->seqNum > sn)
1288 {
1289 if ((*insts_it)->isSquashed()) {
1290 ++insts_it;
1291 continue;
1292 }
1293 DPRINTF(BE, "Squashing instruction on waitingList PC %#x, [sn:%lli].\n",
1294 (*insts_it)->readPC(),
1295 (*insts_it)->seqNum);
1296
1297 if ((*insts_it)->isMemRef()) {
1298 DPRINTF(BE, "Squashing a waiting mem op [sn:%lli]\n",
1299 (*insts_it)->seqNum);
1300 removeWaitingMemOp((*insts_it));
1301 }
1302
1303 waitingList.erase(insts_it++);
1304 waitingInsts--;
1305 }
1306 assert(waitingInsts >= 0);
1307
1308 insts_it = instList.begin();
1309
1310 while (!instList.empty() && (*insts_it)->seqNum > sn)
1311 {
1312 if ((*insts_it)->isSquashed()) {
1313 panic("Instruction should not be already squashed and on list!");
1314 ++insts_it;
1315 continue;
1316 }
1317 DPRINTF(BE, "Squashing instruction on inst list PC %#x, [sn:%lli].\n",
1318 (*insts_it)->readPC(),
1319 (*insts_it)->seqNum);
1320
1321 // Mark the instruction as squashed, and ready to commit so that
1322 // it can drain out of the pipeline.
1323 (*insts_it)->setSquashed();
1324
1325 (*insts_it)->setCanCommit();
1326
1327 (*insts_it)->clearInROB();
1328
1329 for (int i = 0; i < (*insts_it)->numDestRegs(); ++i) {
1330 DynInstPtr prev_dest = (*insts_it)->getPrevDestInst(i);
1331 DPRINTF(BE, "Commit rename map setting reg %i to [sn:%lli]\n",
1332 (int)(*insts_it)->destRegIdx(i), prev_dest->seqNum);
1333 renameTable[(*insts_it)->destRegIdx(i)] = prev_dest;
1334 ++freed_regs;
1335 }
1336
1337 (*insts_it)->clearDependents();
1338
1339 squashedInsts[(*insts_it)->threadNumber]++;
1340
1341 instList.erase(insts_it++);
1342 --numInsts;
1343 }
1344
1345 while (memBarrier && memBarrier->seqNum > sn) {
1346 DPRINTF(BE, "[sn:%lli] Memory barrier squashed (or previously "
1347 "squashed)\n", memBarrier->seqNum);
1348 memBarrier->clearMemDependents();
1349 if (memBarrier->memDepReady()) {
1350 DPRINTF(BE, "No previous barrier\n");
1351 memBarrier = NULL;
1352 } else {
1353 std::list<DynInstPtr> &srcs = memBarrier->getMemSrcs();
1354 memBarrier = srcs.front();
1355 srcs.pop_front();
1356 assert(srcs.empty());
1357 DPRINTF(BE, "Previous barrier: [sn:%lli]\n",
1358 memBarrier->seqNum);
1359 }
1360 }
1361
1362 insts_it = replayList.begin();
1363 insts_end_it = replayList.end();
1364 while (!replayList.empty() && insts_it != insts_end_it) {
1365 if ((*insts_it)->seqNum < sn) {
1366 ++insts_it;
1367 continue;
1368 }
1369 assert((*insts_it)->isSquashed());
1370
1371 replayList.erase(insts_it++);
1372 }
1373
1374 frontEnd->addFreeRegs(freed_regs);
1375 }
1376
1377 template <class Impl>
1378 void
1379 LWBackEnd<Impl>::squashFromTC()
1380 {
1381 InstSeqNum squashed_inst = robEmpty() ? 0 : instList.back()->seqNum - 1;
1382 squash(squashed_inst);
1383 frontEnd->squash(squashed_inst, thread->readPC(),
1384 false, false);
1385 frontEnd->interruptPending = false;
1386
1387 thread->trapPending = false;
1388 thread->inSyscall = false;
1389 tcSquash = false;
1390 commitStatus = Running;
1391 }
1392
1393 template <class Impl>
1394 void
1395 LWBackEnd<Impl>::squashFromTrap()
1396 {
1397 InstSeqNum squashed_inst = robEmpty() ? 0 : instList.back()->seqNum - 1;
1398 squash(squashed_inst);
1399 frontEnd->squash(squashed_inst, thread->readPC(),
1400 false, false);
1401 frontEnd->interruptPending = false;
1402
1403 thread->trapPending = false;
1404 thread->inSyscall = false;
1405 trapSquash = false;
1406 commitStatus = Running;
1407 }
1408
1409 template <class Impl>
1410 void
1411 LWBackEnd<Impl>::squashDueToBranch(DynInstPtr &inst)
1412 {
1413 // Update the branch predictor state I guess
1414 DPRINTF(BE, "Squashing due to branch [sn:%lli], will restart at PC %#x\n",
1415 inst->seqNum, inst->readNextPC());
1416 squash(inst->seqNum);
1417 frontEnd->squash(inst->seqNum, inst->readNextPC(),
1418 true, inst->mispredicted());
1419 }
1420
1421 template <class Impl>
1422 void
1423 LWBackEnd<Impl>::squashDueToMemViolation(DynInstPtr &inst)
1424 {
1425 // Update the branch predictor state I guess
1426 DPRINTF(BE, "Squashing due to violation [sn:%lli], will restart at PC %#x\n",
1427 inst->seqNum, inst->readNextPC());
1428 squash(inst->seqNum);
1429 frontEnd->squash(inst->seqNum, inst->readNextPC(),
1430 false, inst->mispredicted());
1431 }
1432
1433 template <class Impl>
1434 void
1435 LWBackEnd<Impl>::squashDueToMemBlocked(DynInstPtr &inst)
1436 {
1437 DPRINTF(IEW, "Memory blocked, squashing load and younger insts, "
1438 "PC: %#x [sn:%i].\n", inst->readPC(), inst->seqNum);
1439
1440 squash(inst->seqNum - 1);
1441 frontEnd->squash(inst->seqNum - 1, inst->readPC());
1442 }
1443
1444 template <class Impl>
1445 void
1446 LWBackEnd<Impl>::switchOut()
1447 {
1448 switchPending = true;
1449 }
1450
1451 template <class Impl>
1452 void
1453 LWBackEnd<Impl>::doSwitchOut()
1454 {
1455 switchedOut = true;
1456 switchPending = false;
1457 // Need to get rid of all committed, non-speculative state and write it
1458 // to memory/TC. In this case this is stores that have committed and not
1459 // yet written back.
1460 assert(robEmpty());
1461 assert(!LSQ.hasStoresToWB());
1462 writeback.clear();
1463 for (int i = 0; i < numInstsToWB.getSize() + 1; ++i)
1464 numInstsToWB.advance();
1465
1466 // squash(0);
1467 assert(waitingList.empty());
1468 assert(instList.empty());
1469 assert(replayList.empty());
1470 assert(writeback.empty());
1471 LSQ.switchOut();
1472 }
1473
1474 template <class Impl>
1475 void
1476 LWBackEnd<Impl>::takeOverFrom(ThreadContext *old_tc)
1477 {
1478 assert(!squashPending);
1479 squashSeqNum = 0;
1480 squashNextPC = 0;
1481 tcSquash = false;
1482 trapSquash = false;
1483
1484 numInsts = 0;
1485 numWaitingMemOps = 0;
1486 waitingMemOps.clear();
1487 waitingInsts = 0;
1488 switchedOut = false;
1489 dispatchStatus = Running;
1490 commitStatus = Running;
1491 LSQ.takeOverFrom(old_tc);
1492 }
1493
1494 template <class Impl>
1495 void
1496 LWBackEnd<Impl>::updateExeInstStats(DynInstPtr &inst)
1497 {
1498 ThreadID tid = inst->threadNumber;
1499
1500 //
1501 // Pick off the software prefetches
1502 //
1503 #ifdef TARGET_ALPHA
1504 if (inst->isDataPrefetch())
1505 exeSwp[tid]++;
1506 else
1507 exeInst[tid]++;
1508 #else
1509 exeInst[tid]++;
1510 #endif
1511
1512 //
1513 // Control operations
1514 //
1515 if (inst->isControl())
1516 exeBranches[tid]++;
1517
1518 //
1519 // Memory operations
1520 //
1521 if (inst->isMemRef()) {
1522 exeRefs[tid]++;
1523
1524 if (inst->isLoad())
1525 exeLoads[tid]++;
1526 }
1527 }
1528
1529 template <class Impl>
1530 void
1531 LWBackEnd<Impl>::updateComInstStats(DynInstPtr &inst)
1532 {
1533 ThreadID tid = inst->threadNumber;
1534
1535 // keep an instruction count
1536 thread->numInst++;
1537 thread->numInsts++;
1538
1539 cpu->numInst++;
1540 //
1541 // Pick off the software prefetches
1542 //
1543 #ifdef TARGET_ALPHA
1544 if (inst->isDataPrefetch()) {
1545 statComSwp[tid]++;
1546 } else {
1547 statComInst[tid]++;
1548 }
1549 #else
1550 statComInst[tid]++;
1551 #endif
1552
1553 //
1554 // Control Instructions
1555 //
1556 if (inst->isControl())
1557 statComBranches[tid]++;
1558
1559 //
1560 // Memory references
1561 //
1562 if (inst->isMemRef()) {
1563 statComRefs[tid]++;
1564
1565 if (inst->isLoad()) {
1566 statComLoads[tid]++;
1567 }
1568 }
1569
1570 if (inst->isMemBarrier()) {
1571 statComMembars[tid]++;
1572 }
1573 }
1574
1575 template <class Impl>
1576 void
1577 LWBackEnd<Impl>::dumpInsts()
1578 {
1579 int num = 0;
1580 int valid_num = 0;
1581
1582 InstListIt inst_list_it = --(instList.end());
1583
1584 cprintf("ExeList size: %i\n", exeList.size());
1585
1586 cprintf("Inst list size: %i\n", instList.size());
1587
1588 while (inst_list_it != instList.end())
1589 {
1590 cprintf("Instruction:%i\n",
1591 num);
1592 if (!(*inst_list_it)->isSquashed()) {
1593 if (!(*inst_list_it)->isIssued()) {
1594 ++valid_num;
1595 cprintf("Count:%i\n", valid_num);
1596 } else if ((*inst_list_it)->isMemRef() &&
1597 !(*inst_list_it)->memOpDone) {
1598 // Loads that have not been marked as executed still count
1599 // towards the total instructions.
1600 ++valid_num;
1601 cprintf("Count:%i\n", valid_num);
1602 }
1603 }
1604
1605 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1606 "Issued:%i\nSquashed:%i\n",
1607 (*inst_list_it)->readPC(),
1608 (*inst_list_it)->seqNum,
1609 (*inst_list_it)->threadNumber,
1610 (*inst_list_it)->isIssued(),
1611 (*inst_list_it)->isSquashed());
1612
1613 if ((*inst_list_it)->isMemRef()) {
1614 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1615 }
1616
1617 cprintf("\n");
1618
1619 inst_list_it--;
1620 ++num;
1621 }
1622
1623 inst_list_it = --(writeback.end());
1624
1625 cprintf("Writeback list size: %i\n", writeback.size());
1626
1627 while (inst_list_it != writeback.end())
1628 {
1629 cprintf("Instruction:%i\n",
1630 num);
1631 if (!(*inst_list_it)->isSquashed()) {
1632 if (!(*inst_list_it)->isIssued()) {
1633 ++valid_num;
1634 cprintf("Count:%i\n", valid_num);
1635 } else if ((*inst_list_it)->isMemRef() &&
1636 !(*inst_list_it)->memOpDone) {
1637 // Loads that have not been marked as executed still count
1638 // towards the total instructions.
1639 ++valid_num;
1640 cprintf("Count:%i\n", valid_num);
1641 }
1642 }
1643
1644 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1645 "Issued:%i\nSquashed:%i\n",
1646 (*inst_list_it)->readPC(),
1647 (*inst_list_it)->seqNum,
1648 (*inst_list_it)->threadNumber,
1649 (*inst_list_it)->isIssued(),
1650 (*inst_list_it)->isSquashed());
1651
1652 if ((*inst_list_it)->isMemRef()) {
1653 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1654 }
1655
1656 cprintf("\n");
1657
1658 inst_list_it--;
1659 ++num;
1660 }
1661
1662 cprintf("Waiting list size: %i\n", waitingList.size());
1663
1664 inst_list_it = --(waitingList.end());
1665
1666 while (inst_list_it != waitingList.end())
1667 {
1668 cprintf("Instruction:%i\n",
1669 num);
1670 if (!(*inst_list_it)->isSquashed()) {
1671 if (!(*inst_list_it)->isIssued()) {
1672 ++valid_num;
1673 cprintf("Count:%i\n", valid_num);
1674 } else if ((*inst_list_it)->isMemRef() &&
1675 !(*inst_list_it)->memOpDone) {
1676 // Loads that have not been marked as executed still count
1677 // towards the total instructions.
1678 ++valid_num;
1679 cprintf("Count:%i\n", valid_num);
1680 }
1681 }
1682
1683 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1684 "Issued:%i\nSquashed:%i\n",
1685 (*inst_list_it)->readPC(),
1686 (*inst_list_it)->seqNum,
1687 (*inst_list_it)->threadNumber,
1688 (*inst_list_it)->isIssued(),
1689 (*inst_list_it)->isSquashed());
1690
1691 if ((*inst_list_it)->isMemRef()) {
1692 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1693 }
1694
1695 cprintf("\n");
1696
1697 inst_list_it--;
1698 ++num;
1699 }
1700
1701 cprintf("waitingMemOps list size: %i\n", waitingMemOps.size());
1702
1703 MemIt waiting_it = waitingMemOps.begin();
1704
1705 while (waiting_it != waitingMemOps.end())
1706 {
1707 cprintf("[sn:%lli] ", (*waiting_it));
1708 waiting_it++;
1709 ++num;
1710 }
1711 cprintf("\n");
1712 }