Add CoherenceProtocol object to objects list.
[gem5.git] / src / cpu / o3 / inst_queue_impl.hh
1 /*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32 #include <limits>
33 #include <vector>
34
35 #include "sim/root.hh"
36
37 #include "cpu/o3/fu_pool.hh"
38 #include "cpu/o3/inst_queue.hh"
39
40 template <class Impl>
41 InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
42 int fu_idx,
43 InstructionQueue<Impl> *iq_ptr)
44 : Event(&mainEventQueue, Stat_Event_Pri),
45 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
46 {
47 this->setFlags(Event::AutoDelete);
48 }
49
50 template <class Impl>
51 void
52 InstructionQueue<Impl>::FUCompletion::process()
53 {
54 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
55 inst = NULL;
56 }
57
58
59 template <class Impl>
60 const char *
61 InstructionQueue<Impl>::FUCompletion::description()
62 {
63 return "Functional unit completion event";
64 }
65
66 template <class Impl>
67 InstructionQueue<Impl>::InstructionQueue(Params *params)
68 : fuPool(params->fuPool),
69 numEntries(params->numIQEntries),
70 totalWidth(params->issueWidth),
71 numPhysIntRegs(params->numPhysIntRegs),
72 numPhysFloatRegs(params->numPhysFloatRegs),
73 commitToIEWDelay(params->commitToIEWDelay)
74 {
75 assert(fuPool);
76
77 switchedOut = false;
78
79 numThreads = params->numberOfThreads;
80
81 // Set the number of physical registers as the number of int + float
82 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
83
84 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
85
86 //Create an entry for each physical register within the
87 //dependency graph.
88 dependGraph.resize(numPhysRegs);
89
90 // Resize the register scoreboard.
91 regScoreboard.resize(numPhysRegs);
92
93 //Initialize Mem Dependence Units
94 for (int i = 0; i < numThreads; i++) {
95 memDepUnit[i].init(params,i);
96 memDepUnit[i].setIQ(this);
97 }
98
99 resetState();
100
101 std::string policy = params->smtIQPolicy;
102
103 //Convert string to lowercase
104 std::transform(policy.begin(), policy.end(), policy.begin(),
105 (int(*)(int)) tolower);
106
107 //Figure out resource sharing policy
108 if (policy == "dynamic") {
109 iqPolicy = Dynamic;
110
111 //Set Max Entries to Total ROB Capacity
112 for (int i = 0; i < numThreads; i++) {
113 maxEntries[i] = numEntries;
114 }
115
116 } else if (policy == "partitioned") {
117 iqPolicy = Partitioned;
118
119 //@todo:make work if part_amt doesnt divide evenly.
120 int part_amt = numEntries / numThreads;
121
122 //Divide ROB up evenly
123 for (int i = 0; i < numThreads; i++) {
124 maxEntries[i] = part_amt;
125 }
126
127 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
128 "%i entries per thread.\n",part_amt);
129
130 } else if (policy == "threshold") {
131 iqPolicy = Threshold;
132
133 double threshold = (double)params->smtIQThreshold / 100;
134
135 int thresholdIQ = (int)((double)threshold * numEntries);
136
137 //Divide up by threshold amount
138 for (int i = 0; i < numThreads; i++) {
139 maxEntries[i] = thresholdIQ;
140 }
141
142 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
143 "%i entries per thread.\n",thresholdIQ);
144 } else {
145 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
146 "Partitioned, Threshold}");
147 }
148 }
149
150 template <class Impl>
151 InstructionQueue<Impl>::~InstructionQueue()
152 {
153 dependGraph.reset();
154 #ifdef DEBUG
155 cprintf("Nodes traversed: %i, removed: %i\n",
156 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
157 #endif
158 }
159
160 template <class Impl>
161 std::string
162 InstructionQueue<Impl>::name() const
163 {
164 return cpu->name() + ".iq";
165 }
166
167 template <class Impl>
168 void
169 InstructionQueue<Impl>::regStats()
170 {
171 using namespace Stats;
172 iqInstsAdded
173 .name(name() + ".iqInstsAdded")
174 .desc("Number of instructions added to the IQ (excludes non-spec)")
175 .prereq(iqInstsAdded);
176
177 iqNonSpecInstsAdded
178 .name(name() + ".iqNonSpecInstsAdded")
179 .desc("Number of non-speculative instructions added to the IQ")
180 .prereq(iqNonSpecInstsAdded);
181
182 iqInstsIssued
183 .name(name() + ".iqInstsIssued")
184 .desc("Number of instructions issued")
185 .prereq(iqInstsIssued);
186
187 iqIntInstsIssued
188 .name(name() + ".iqIntInstsIssued")
189 .desc("Number of integer instructions issued")
190 .prereq(iqIntInstsIssued);
191
192 iqFloatInstsIssued
193 .name(name() + ".iqFloatInstsIssued")
194 .desc("Number of float instructions issued")
195 .prereq(iqFloatInstsIssued);
196
197 iqBranchInstsIssued
198 .name(name() + ".iqBranchInstsIssued")
199 .desc("Number of branch instructions issued")
200 .prereq(iqBranchInstsIssued);
201
202 iqMemInstsIssued
203 .name(name() + ".iqMemInstsIssued")
204 .desc("Number of memory instructions issued")
205 .prereq(iqMemInstsIssued);
206
207 iqMiscInstsIssued
208 .name(name() + ".iqMiscInstsIssued")
209 .desc("Number of miscellaneous instructions issued")
210 .prereq(iqMiscInstsIssued);
211
212 iqSquashedInstsIssued
213 .name(name() + ".iqSquashedInstsIssued")
214 .desc("Number of squashed instructions issued")
215 .prereq(iqSquashedInstsIssued);
216
217 iqSquashedInstsExamined
218 .name(name() + ".iqSquashedInstsExamined")
219 .desc("Number of squashed instructions iterated over during squash;"
220 " mainly for profiling")
221 .prereq(iqSquashedInstsExamined);
222
223 iqSquashedOperandsExamined
224 .name(name() + ".iqSquashedOperandsExamined")
225 .desc("Number of squashed operands that are examined and possibly "
226 "removed from graph")
227 .prereq(iqSquashedOperandsExamined);
228
229 iqSquashedNonSpecRemoved
230 .name(name() + ".iqSquashedNonSpecRemoved")
231 .desc("Number of squashed non-spec instructions that were removed")
232 .prereq(iqSquashedNonSpecRemoved);
233
234 queueResDist
235 .init(Num_OpClasses, 0, 99, 2)
236 .name(name() + ".IQ:residence:")
237 .desc("cycles from dispatch to issue")
238 .flags(total | pdf | cdf )
239 ;
240 for (int i = 0; i < Num_OpClasses; ++i) {
241 queueResDist.subname(i, opClassStrings[i]);
242 }
243 numIssuedDist
244 .init(0,totalWidth,1)
245 .name(name() + ".ISSUE:issued_per_cycle")
246 .desc("Number of insts issued each cycle")
247 .flags(pdf)
248 ;
249 /*
250 dist_unissued
251 .init(Num_OpClasses+2)
252 .name(name() + ".ISSUE:unissued_cause")
253 .desc("Reason ready instruction not issued")
254 .flags(pdf | dist)
255 ;
256 for (int i=0; i < (Num_OpClasses + 2); ++i) {
257 dist_unissued.subname(i, unissued_names[i]);
258 }
259 */
260 statIssuedInstType
261 .init(numThreads,Num_OpClasses)
262 .name(name() + ".ISSUE:FU_type")
263 .desc("Type of FU issued")
264 .flags(total | pdf | dist)
265 ;
266 statIssuedInstType.ysubnames(opClassStrings);
267
268 //
269 // How long did instructions for a particular FU type wait prior to issue
270 //
271
272 issueDelayDist
273 .init(Num_OpClasses,0,99,2)
274 .name(name() + ".ISSUE:")
275 .desc("cycles from operands ready to issue")
276 .flags(pdf | cdf)
277 ;
278
279 for (int i=0; i<Num_OpClasses; ++i) {
280 std::stringstream subname;
281 subname << opClassStrings[i] << "_delay";
282 issueDelayDist.subname(i, subname.str());
283 }
284
285 issueRate
286 .name(name() + ".ISSUE:rate")
287 .desc("Inst issue rate")
288 .flags(total)
289 ;
290 issueRate = iqInstsIssued / cpu->numCycles;
291
292 statFuBusy
293 .init(Num_OpClasses)
294 .name(name() + ".ISSUE:fu_full")
295 .desc("attempts to use FU when none available")
296 .flags(pdf | dist)
297 ;
298 for (int i=0; i < Num_OpClasses; ++i) {
299 statFuBusy.subname(i, opClassStrings[i]);
300 }
301
302 fuBusy
303 .init(numThreads)
304 .name(name() + ".ISSUE:fu_busy_cnt")
305 .desc("FU busy when requested")
306 .flags(total)
307 ;
308
309 fuBusyRate
310 .name(name() + ".ISSUE:fu_busy_rate")
311 .desc("FU busy rate (busy events/executed inst)")
312 .flags(total)
313 ;
314 fuBusyRate = fuBusy / iqInstsIssued;
315
316 for ( int i=0; i < numThreads; i++) {
317 // Tell mem dependence unit to reg stats as well.
318 memDepUnit[i].regStats();
319 }
320 }
321
322 template <class Impl>
323 void
324 InstructionQueue<Impl>::resetState()
325 {
326 //Initialize thread IQ counts
327 for (int i = 0; i <numThreads; i++) {
328 count[i] = 0;
329 instList[i].clear();
330 }
331
332 // Initialize the number of free IQ entries.
333 freeEntries = numEntries;
334
335 // Note that in actuality, the registers corresponding to the logical
336 // registers start off as ready. However this doesn't matter for the
337 // IQ as the instruction should have been correctly told if those
338 // registers are ready in rename. Thus it can all be initialized as
339 // unready.
340 for (int i = 0; i < numPhysRegs; ++i) {
341 regScoreboard[i] = false;
342 }
343
344 for (int i = 0; i < numThreads; ++i) {
345 squashedSeqNum[i] = 0;
346 }
347
348 for (int i = 0; i < Num_OpClasses; ++i) {
349 while (!readyInsts[i].empty())
350 readyInsts[i].pop();
351 queueOnList[i] = false;
352 readyIt[i] = listOrder.end();
353 }
354 nonSpecInsts.clear();
355 listOrder.clear();
356 }
357
358 template <class Impl>
359 void
360 InstructionQueue<Impl>::setActiveThreads(std::list<unsigned> *at_ptr)
361 {
362 DPRINTF(IQ, "Setting active threads list pointer.\n");
363 activeThreads = at_ptr;
364 }
365
366 template <class Impl>
367 void
368 InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
369 {
370 DPRINTF(IQ, "Set the issue to execute queue.\n");
371 issueToExecuteQueue = i2e_ptr;
372 }
373
374 template <class Impl>
375 void
376 InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
377 {
378 DPRINTF(IQ, "Set the time buffer.\n");
379 timeBuffer = tb_ptr;
380
381 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
382 }
383
384 template <class Impl>
385 void
386 InstructionQueue<Impl>::switchOut()
387 {
388 resetState();
389 dependGraph.reset();
390 switchedOut = true;
391 for (int i = 0; i < numThreads; ++i) {
392 memDepUnit[i].switchOut();
393 }
394 }
395
396 template <class Impl>
397 void
398 InstructionQueue<Impl>::takeOverFrom()
399 {
400 switchedOut = false;
401 }
402
403 template <class Impl>
404 int
405 InstructionQueue<Impl>::entryAmount(int num_threads)
406 {
407 if (iqPolicy == Partitioned) {
408 return numEntries / num_threads;
409 } else {
410 return 0;
411 }
412 }
413
414
415 template <class Impl>
416 void
417 InstructionQueue<Impl>::resetEntries()
418 {
419 if (iqPolicy != Dynamic || numThreads > 1) {
420 int active_threads = (*activeThreads).size();
421
422 std::list<unsigned>::iterator threads = (*activeThreads).begin();
423 std::list<unsigned>::iterator list_end = (*activeThreads).end();
424
425 while (threads != list_end) {
426 if (iqPolicy == Partitioned) {
427 maxEntries[*threads++] = numEntries / active_threads;
428 } else if(iqPolicy == Threshold && active_threads == 1) {
429 maxEntries[*threads++] = numEntries;
430 }
431 }
432 }
433 }
434
435 template <class Impl>
436 unsigned
437 InstructionQueue<Impl>::numFreeEntries()
438 {
439 return freeEntries;
440 }
441
442 template <class Impl>
443 unsigned
444 InstructionQueue<Impl>::numFreeEntries(unsigned tid)
445 {
446 return maxEntries[tid] - count[tid];
447 }
448
449 // Might want to do something more complex if it knows how many instructions
450 // will be issued this cycle.
451 template <class Impl>
452 bool
453 InstructionQueue<Impl>::isFull()
454 {
455 if (freeEntries == 0) {
456 return(true);
457 } else {
458 return(false);
459 }
460 }
461
462 template <class Impl>
463 bool
464 InstructionQueue<Impl>::isFull(unsigned tid)
465 {
466 if (numFreeEntries(tid) == 0) {
467 return(true);
468 } else {
469 return(false);
470 }
471 }
472
473 template <class Impl>
474 bool
475 InstructionQueue<Impl>::hasReadyInsts()
476 {
477 if (!listOrder.empty()) {
478 return true;
479 }
480
481 for (int i = 0; i < Num_OpClasses; ++i) {
482 if (!readyInsts[i].empty()) {
483 return true;
484 }
485 }
486
487 return false;
488 }
489
490 template <class Impl>
491 void
492 InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
493 {
494 // Make sure the instruction is valid
495 assert(new_inst);
496
497 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
498 new_inst->seqNum, new_inst->readPC());
499
500 assert(freeEntries != 0);
501
502 instList[new_inst->threadNumber].push_back(new_inst);
503
504 --freeEntries;
505
506 new_inst->setInIQ();
507
508 // Look through its source registers (physical regs), and mark any
509 // dependencies.
510 addToDependents(new_inst);
511
512 // Have this instruction set itself as the producer of its destination
513 // register(s).
514 addToProducers(new_inst);
515
516 if (new_inst->isMemRef()) {
517 memDepUnit[new_inst->threadNumber].insert(new_inst);
518 } else {
519 addIfReady(new_inst);
520 }
521
522 ++iqInstsAdded;
523
524 count[new_inst->threadNumber]++;
525
526 assert(freeEntries == (numEntries - countInsts()));
527 }
528
529 template <class Impl>
530 void
531 InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
532 {
533 // @todo: Clean up this code; can do it by setting inst as unable
534 // to issue, then calling normal insert on the inst.
535
536 assert(new_inst);
537
538 nonSpecInsts[new_inst->seqNum] = new_inst;
539
540 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
541 "to the IQ.\n",
542 new_inst->seqNum, new_inst->readPC());
543
544 assert(freeEntries != 0);
545
546 instList[new_inst->threadNumber].push_back(new_inst);
547
548 --freeEntries;
549
550 new_inst->setInIQ();
551
552 // Have this instruction set itself as the producer of its destination
553 // register(s).
554 addToProducers(new_inst);
555
556 // If it's a memory instruction, add it to the memory dependency
557 // unit.
558 if (new_inst->isMemRef()) {
559 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
560 }
561
562 ++iqNonSpecInstsAdded;
563
564 count[new_inst->threadNumber]++;
565
566 assert(freeEntries == (numEntries - countInsts()));
567 }
568
569 template <class Impl>
570 void
571 InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
572 {
573 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
574
575 insertNonSpec(barr_inst);
576 }
577
578 template <class Impl>
579 typename Impl::DynInstPtr
580 InstructionQueue<Impl>::getInstToExecute()
581 {
582 assert(!instsToExecute.empty());
583 DynInstPtr inst = instsToExecute.front();
584 instsToExecute.pop_front();
585 return inst;
586 }
587
588 template <class Impl>
589 void
590 InstructionQueue<Impl>::addToOrderList(OpClass op_class)
591 {
592 assert(!readyInsts[op_class].empty());
593
594 ListOrderEntry queue_entry;
595
596 queue_entry.queueType = op_class;
597
598 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
599
600 ListOrderIt list_it = listOrder.begin();
601 ListOrderIt list_end_it = listOrder.end();
602
603 while (list_it != list_end_it) {
604 if ((*list_it).oldestInst > queue_entry.oldestInst) {
605 break;
606 }
607
608 list_it++;
609 }
610
611 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
612 queueOnList[op_class] = true;
613 }
614
615 template <class Impl>
616 void
617 InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
618 {
619 // Get iterator of next item on the list
620 // Delete the original iterator
621 // Determine if the next item is either the end of the list or younger
622 // than the new instruction. If so, then add in a new iterator right here.
623 // If not, then move along.
624 ListOrderEntry queue_entry;
625 OpClass op_class = (*list_order_it).queueType;
626 ListOrderIt next_it = list_order_it;
627
628 ++next_it;
629
630 queue_entry.queueType = op_class;
631 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
632
633 while (next_it != listOrder.end() &&
634 (*next_it).oldestInst < queue_entry.oldestInst) {
635 ++next_it;
636 }
637
638 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
639 }
640
641 template <class Impl>
642 void
643 InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
644 {
645 // The CPU could have been sleeping until this op completed (*extremely*
646 // long latency op). Wake it if it was. This may be overkill.
647 if (isSwitchedOut()) {
648 return;
649 }
650
651 iewStage->wakeCPU();
652
653 if (fu_idx > -1)
654 fuPool->freeUnitNextCycle(fu_idx);
655
656 // @todo: Ensure that these FU Completions happen at the beginning
657 // of a cycle, otherwise they could add too many instructions to
658 // the queue.
659 issueToExecuteQueue->access(0)->size++;
660 instsToExecute.push_back(inst);
661 }
662
663 // @todo: Figure out a better way to remove the squashed items from the
664 // lists. Checking the top item of each list to see if it's squashed
665 // wastes time and forces jumps.
666 template <class Impl>
667 void
668 InstructionQueue<Impl>::scheduleReadyInsts()
669 {
670 DPRINTF(IQ, "Attempting to schedule ready instructions from "
671 "the IQ.\n");
672
673 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
674
675 // Have iterator to head of the list
676 // While I haven't exceeded bandwidth or reached the end of the list,
677 // Try to get a FU that can do what this op needs.
678 // If successful, change the oldestInst to the new top of the list, put
679 // the queue in the proper place in the list.
680 // Increment the iterator.
681 // This will avoid trying to schedule a certain op class if there are no
682 // FUs that handle it.
683 ListOrderIt order_it = listOrder.begin();
684 ListOrderIt order_end_it = listOrder.end();
685 int total_issued = 0;
686
687 while (total_issued < totalWidth &&
688 iewStage->canIssue() &&
689 order_it != order_end_it) {
690 OpClass op_class = (*order_it).queueType;
691
692 assert(!readyInsts[op_class].empty());
693
694 DynInstPtr issuing_inst = readyInsts[op_class].top();
695
696 assert(issuing_inst->seqNum == (*order_it).oldestInst);
697
698 if (issuing_inst->isSquashed()) {
699 readyInsts[op_class].pop();
700
701 if (!readyInsts[op_class].empty()) {
702 moveToYoungerInst(order_it);
703 } else {
704 readyIt[op_class] = listOrder.end();
705 queueOnList[op_class] = false;
706 }
707
708 listOrder.erase(order_it++);
709
710 ++iqSquashedInstsIssued;
711
712 continue;
713 }
714
715 int idx = -2;
716 int op_latency = 1;
717 int tid = issuing_inst->threadNumber;
718
719 if (op_class != No_OpClass) {
720 idx = fuPool->getUnit(op_class);
721
722 if (idx > -1) {
723 op_latency = fuPool->getOpLatency(op_class);
724 }
725 }
726
727 // If we have an instruction that doesn't require a FU, or a
728 // valid FU, then schedule for execution.
729 if (idx == -2 || idx != -1) {
730 if (op_latency == 1) {
731 i2e_info->size++;
732 instsToExecute.push_back(issuing_inst);
733
734 // Add the FU onto the list of FU's to be freed next
735 // cycle if we used one.
736 if (idx >= 0)
737 fuPool->freeUnitNextCycle(idx);
738 } else {
739 int issue_latency = fuPool->getIssueLatency(op_class);
740 // Generate completion event for the FU
741 FUCompletion *execution = new FUCompletion(issuing_inst,
742 idx, this);
743
744 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
745
746 // @todo: Enforce that issue_latency == 1 or op_latency
747 if (issue_latency > 1) {
748 // If FU isn't pipelined, then it must be freed
749 // upon the execution completing.
750 execution->setFreeFU();
751 } else {
752 // Add the FU onto the list of FU's to be freed next cycle.
753 fuPool->freeUnitNextCycle(idx);
754 }
755 }
756
757 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
758 "[sn:%lli]\n",
759 tid, issuing_inst->readPC(),
760 issuing_inst->seqNum);
761
762 readyInsts[op_class].pop();
763
764 if (!readyInsts[op_class].empty()) {
765 moveToYoungerInst(order_it);
766 } else {
767 readyIt[op_class] = listOrder.end();
768 queueOnList[op_class] = false;
769 }
770
771 issuing_inst->setIssued();
772 ++total_issued;
773
774 if (!issuing_inst->isMemRef()) {
775 // Memory instructions can not be freed from the IQ until they
776 // complete.
777 ++freeEntries;
778 count[tid]--;
779 issuing_inst->clearInIQ();
780 } else {
781 memDepUnit[tid].issue(issuing_inst);
782 }
783
784 listOrder.erase(order_it++);
785 statIssuedInstType[tid][op_class]++;
786 iewStage->incrWb(issuing_inst->seqNum);
787 } else {
788 statFuBusy[op_class]++;
789 fuBusy[tid]++;
790 ++order_it;
791 }
792 }
793
794 numIssuedDist.sample(total_issued);
795 iqInstsIssued+= total_issued;
796
797 // If we issued any instructions, tell the CPU we had activity.
798 if (total_issued) {
799 cpu->activityThisCycle();
800 } else {
801 DPRINTF(IQ, "Not able to schedule any instructions.\n");
802 }
803 }
804
805 template <class Impl>
806 void
807 InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
808 {
809 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
810 "to execute.\n", inst);
811
812 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
813
814 assert(inst_it != nonSpecInsts.end());
815
816 unsigned tid = (*inst_it).second->threadNumber;
817
818 (*inst_it).second->setCanIssue();
819
820 if (!(*inst_it).second->isMemRef()) {
821 addIfReady((*inst_it).second);
822 } else {
823 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
824 }
825
826 (*inst_it).second = NULL;
827
828 nonSpecInsts.erase(inst_it);
829 }
830
831 template <class Impl>
832 void
833 InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
834 {
835 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
836 tid,inst);
837
838 ListIt iq_it = instList[tid].begin();
839
840 while (iq_it != instList[tid].end() &&
841 (*iq_it)->seqNum <= inst) {
842 ++iq_it;
843 instList[tid].pop_front();
844 }
845
846 assert(freeEntries == (numEntries - countInsts()));
847 }
848
849 template <class Impl>
850 int
851 InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
852 {
853 int dependents = 0;
854
855 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
856
857 assert(!completed_inst->isSquashed());
858
859 // Tell the memory dependence unit to wake any dependents on this
860 // instruction if it is a memory instruction. Also complete the memory
861 // instruction at this point since we know it executed without issues.
862 // @todo: Might want to rename "completeMemInst" to something that
863 // indicates that it won't need to be replayed, and call this
864 // earlier. Might not be a big deal.
865 if (completed_inst->isMemRef()) {
866 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
867 completeMemInst(completed_inst);
868 } else if (completed_inst->isMemBarrier() ||
869 completed_inst->isWriteBarrier()) {
870 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
871 }
872
873 for (int dest_reg_idx = 0;
874 dest_reg_idx < completed_inst->numDestRegs();
875 dest_reg_idx++)
876 {
877 PhysRegIndex dest_reg =
878 completed_inst->renamedDestRegIdx(dest_reg_idx);
879
880 // Special case of uniq or control registers. They are not
881 // handled by the IQ and thus have no dependency graph entry.
882 // @todo Figure out a cleaner way to handle this.
883 if (dest_reg >= numPhysRegs) {
884 continue;
885 }
886
887 DPRINTF(IQ, "Waking any dependents on register %i.\n",
888 (int) dest_reg);
889
890 //Go through the dependency chain, marking the registers as
891 //ready within the waiting instructions.
892 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
893
894 while (dep_inst) {
895 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
896 dep_inst->readPC());
897
898 // Might want to give more information to the instruction
899 // so that it knows which of its source registers is
900 // ready. However that would mean that the dependency
901 // graph entries would need to hold the src_reg_idx.
902 dep_inst->markSrcRegReady();
903
904 addIfReady(dep_inst);
905
906 dep_inst = dependGraph.pop(dest_reg);
907
908 ++dependents;
909 }
910
911 // Reset the head node now that all of its dependents have
912 // been woken up.
913 assert(dependGraph.empty(dest_reg));
914 dependGraph.clearInst(dest_reg);
915
916 // Mark the scoreboard as having that register ready.
917 regScoreboard[dest_reg] = true;
918 }
919 return dependents;
920 }
921
922 template <class Impl>
923 void
924 InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
925 {
926 OpClass op_class = ready_inst->opClass();
927
928 readyInsts[op_class].push(ready_inst);
929
930 // Will need to reorder the list if either a queue is not on the list,
931 // or it has an older instruction than last time.
932 if (!queueOnList[op_class]) {
933 addToOrderList(op_class);
934 } else if (readyInsts[op_class].top()->seqNum <
935 (*readyIt[op_class]).oldestInst) {
936 listOrder.erase(readyIt[op_class]);
937 addToOrderList(op_class);
938 }
939
940 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
941 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
942 ready_inst->readPC(), op_class, ready_inst->seqNum);
943 }
944
945 template <class Impl>
946 void
947 InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
948 {
949 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
950 }
951
952 template <class Impl>
953 void
954 InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
955 {
956 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
957 }
958
959 template <class Impl>
960 void
961 InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
962 {
963 int tid = completed_inst->threadNumber;
964
965 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
966 completed_inst->readPC(), completed_inst->seqNum);
967
968 ++freeEntries;
969
970 completed_inst->memOpDone = true;
971
972 memDepUnit[tid].completed(completed_inst);
973
974 count[tid]--;
975 }
976
977 template <class Impl>
978 void
979 InstructionQueue<Impl>::violation(DynInstPtr &store,
980 DynInstPtr &faulting_load)
981 {
982 memDepUnit[store->threadNumber].violation(store, faulting_load);
983 }
984
985 template <class Impl>
986 void
987 InstructionQueue<Impl>::squash(unsigned tid)
988 {
989 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
990 "the IQ.\n", tid);
991
992 // Read instruction sequence number of last instruction out of the
993 // time buffer.
994 #if ISA_HAS_DELAY_SLOT
995 squashedSeqNum[tid] = fromCommit->commitInfo[tid].bdelayDoneSeqNum;
996 #else
997 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
998 #endif
999
1000 // Call doSquash if there are insts in the IQ
1001 if (count[tid] > 0) {
1002 doSquash(tid);
1003 }
1004
1005 // Also tell the memory dependence unit to squash.
1006 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1007 }
1008
1009 template <class Impl>
1010 void
1011 InstructionQueue<Impl>::doSquash(unsigned tid)
1012 {
1013 // Start at the tail.
1014 ListIt squash_it = instList[tid].end();
1015 --squash_it;
1016
1017 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1018 tid, squashedSeqNum[tid]);
1019
1020 // Squash any instructions younger than the squashed sequence number
1021 // given.
1022 while (squash_it != instList[tid].end() &&
1023 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1024
1025 DynInstPtr squashed_inst = (*squash_it);
1026
1027 // Only handle the instruction if it actually is in the IQ and
1028 // hasn't already been squashed in the IQ.
1029 if (squashed_inst->threadNumber != tid ||
1030 squashed_inst->isSquashedInIQ()) {
1031 --squash_it;
1032 continue;
1033 }
1034
1035 if (!squashed_inst->isIssued() ||
1036 (squashed_inst->isMemRef() &&
1037 !squashed_inst->memOpDone)) {
1038
1039 // Remove the instruction from the dependency list.
1040 if (!squashed_inst->isNonSpeculative() &&
1041 !squashed_inst->isStoreConditional() &&
1042 !squashed_inst->isMemBarrier() &&
1043 !squashed_inst->isWriteBarrier()) {
1044
1045 for (int src_reg_idx = 0;
1046 src_reg_idx < squashed_inst->numSrcRegs();
1047 src_reg_idx++)
1048 {
1049 PhysRegIndex src_reg =
1050 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1051
1052 // Only remove it from the dependency graph if it
1053 // was placed there in the first place.
1054
1055 // Instead of doing a linked list traversal, we
1056 // can just remove these squashed instructions
1057 // either at issue time, or when the register is
1058 // overwritten. The only downside to this is it
1059 // leaves more room for error.
1060
1061 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1062 src_reg < numPhysRegs) {
1063 dependGraph.remove(src_reg, squashed_inst);
1064 }
1065
1066
1067 ++iqSquashedOperandsExamined;
1068 }
1069 } else {
1070 NonSpecMapIt ns_inst_it =
1071 nonSpecInsts.find(squashed_inst->seqNum);
1072 assert(ns_inst_it != nonSpecInsts.end());
1073
1074 (*ns_inst_it).second = NULL;
1075
1076 nonSpecInsts.erase(ns_inst_it);
1077
1078 ++iqSquashedNonSpecRemoved;
1079 }
1080
1081 // Might want to also clear out the head of the dependency graph.
1082
1083 // Mark it as squashed within the IQ.
1084 squashed_inst->setSquashedInIQ();
1085
1086 // @todo: Remove this hack where several statuses are set so the
1087 // inst will flow through the rest of the pipeline.
1088 squashed_inst->setIssued();
1089 squashed_inst->setCanCommit();
1090 squashed_inst->clearInIQ();
1091
1092 //Update Thread IQ Count
1093 count[squashed_inst->threadNumber]--;
1094
1095 ++freeEntries;
1096
1097 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1098 "squashed.\n",
1099 tid, squashed_inst->seqNum, squashed_inst->readPC());
1100 }
1101
1102 instList[tid].erase(squash_it--);
1103 ++iqSquashedInstsExamined;
1104 }
1105 }
1106
1107 template <class Impl>
1108 bool
1109 InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1110 {
1111 // Loop through the instruction's source registers, adding
1112 // them to the dependency list if they are not ready.
1113 int8_t total_src_regs = new_inst->numSrcRegs();
1114 bool return_val = false;
1115
1116 for (int src_reg_idx = 0;
1117 src_reg_idx < total_src_regs;
1118 src_reg_idx++)
1119 {
1120 // Only add it to the dependency graph if it's not ready.
1121 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1122 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1123
1124 // Check the IQ's scoreboard to make sure the register
1125 // hasn't become ready while the instruction was in flight
1126 // between stages. Only if it really isn't ready should
1127 // it be added to the dependency graph.
1128 if (src_reg >= numPhysRegs) {
1129 continue;
1130 } else if (regScoreboard[src_reg] == false) {
1131 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1132 "is being added to the dependency chain.\n",
1133 new_inst->readPC(), src_reg);
1134
1135 dependGraph.insert(src_reg, new_inst);
1136
1137 // Change the return value to indicate that something
1138 // was added to the dependency graph.
1139 return_val = true;
1140 } else {
1141 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1142 "became ready before it reached the IQ.\n",
1143 new_inst->readPC(), src_reg);
1144 // Mark a register ready within the instruction.
1145 new_inst->markSrcRegReady(src_reg_idx);
1146 }
1147 }
1148 }
1149
1150 return return_val;
1151 }
1152
1153 template <class Impl>
1154 void
1155 InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1156 {
1157 // Nothing really needs to be marked when an instruction becomes
1158 // the producer of a register's value, but for convenience a ptr
1159 // to the producing instruction will be placed in the head node of
1160 // the dependency links.
1161 int8_t total_dest_regs = new_inst->numDestRegs();
1162
1163 for (int dest_reg_idx = 0;
1164 dest_reg_idx < total_dest_regs;
1165 dest_reg_idx++)
1166 {
1167 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1168
1169 // Instructions that use the misc regs will have a reg number
1170 // higher than the normal physical registers. In this case these
1171 // registers are not renamed, and there is no need to track
1172 // dependencies as these instructions must be executed at commit.
1173 if (dest_reg >= numPhysRegs) {
1174 continue;
1175 }
1176
1177 if (!dependGraph.empty(dest_reg)) {
1178 dependGraph.dump();
1179 panic("Dependency graph %i not empty!", dest_reg);
1180 }
1181
1182 dependGraph.setInst(dest_reg, new_inst);
1183
1184 // Mark the scoreboard to say it's not yet ready.
1185 regScoreboard[dest_reg] = false;
1186 }
1187 }
1188
1189 template <class Impl>
1190 void
1191 InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1192 {
1193 // If the instruction now has all of its source registers
1194 // available, then add it to the list of ready instructions.
1195 if (inst->readyToIssue()) {
1196
1197 //Add the instruction to the proper ready list.
1198 if (inst->isMemRef()) {
1199
1200 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1201
1202 // Message to the mem dependence unit that this instruction has
1203 // its registers ready.
1204 memDepUnit[inst->threadNumber].regsReady(inst);
1205
1206 return;
1207 }
1208
1209 OpClass op_class = inst->opClass();
1210
1211 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1212 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1213 inst->readPC(), op_class, inst->seqNum);
1214
1215 readyInsts[op_class].push(inst);
1216
1217 // Will need to reorder the list if either a queue is not on the list,
1218 // or it has an older instruction than last time.
1219 if (!queueOnList[op_class]) {
1220 addToOrderList(op_class);
1221 } else if (readyInsts[op_class].top()->seqNum <
1222 (*readyIt[op_class]).oldestInst) {
1223 listOrder.erase(readyIt[op_class]);
1224 addToOrderList(op_class);
1225 }
1226 }
1227 }
1228
1229 template <class Impl>
1230 int
1231 InstructionQueue<Impl>::countInsts()
1232 {
1233 #if 0
1234 //ksewell:This works but definitely could use a cleaner write
1235 //with a more intuitive way of counting. Right now it's
1236 //just brute force ....
1237 // Change the #if if you want to use this method.
1238 int total_insts = 0;
1239
1240 for (int i = 0; i < numThreads; ++i) {
1241 ListIt count_it = instList[i].begin();
1242
1243 while (count_it != instList[i].end()) {
1244 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1245 if (!(*count_it)->isIssued()) {
1246 ++total_insts;
1247 } else if ((*count_it)->isMemRef() &&
1248 !(*count_it)->memOpDone) {
1249 // Loads that have not been marked as executed still count
1250 // towards the total instructions.
1251 ++total_insts;
1252 }
1253 }
1254
1255 ++count_it;
1256 }
1257 }
1258
1259 return total_insts;
1260 #else
1261 return numEntries - freeEntries;
1262 #endif
1263 }
1264
1265 template <class Impl>
1266 void
1267 InstructionQueue<Impl>::dumpLists()
1268 {
1269 for (int i = 0; i < Num_OpClasses; ++i) {
1270 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1271
1272 cprintf("\n");
1273 }
1274
1275 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1276
1277 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1278 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1279
1280 cprintf("Non speculative list: ");
1281
1282 while (non_spec_it != non_spec_end_it) {
1283 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1284 (*non_spec_it).second->seqNum);
1285 ++non_spec_it;
1286 }
1287
1288 cprintf("\n");
1289
1290 ListOrderIt list_order_it = listOrder.begin();
1291 ListOrderIt list_order_end_it = listOrder.end();
1292 int i = 1;
1293
1294 cprintf("List order: ");
1295
1296 while (list_order_it != list_order_end_it) {
1297 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1298 (*list_order_it).oldestInst);
1299
1300 ++list_order_it;
1301 ++i;
1302 }
1303
1304 cprintf("\n");
1305 }
1306
1307
1308 template <class Impl>
1309 void
1310 InstructionQueue<Impl>::dumpInsts()
1311 {
1312 for (int i = 0; i < numThreads; ++i) {
1313 int num = 0;
1314 int valid_num = 0;
1315 ListIt inst_list_it = instList[i].begin();
1316
1317 while (inst_list_it != instList[i].end())
1318 {
1319 cprintf("Instruction:%i\n",
1320 num);
1321 if (!(*inst_list_it)->isSquashed()) {
1322 if (!(*inst_list_it)->isIssued()) {
1323 ++valid_num;
1324 cprintf("Count:%i\n", valid_num);
1325 } else if ((*inst_list_it)->isMemRef() &&
1326 !(*inst_list_it)->memOpDone) {
1327 // Loads that have not been marked as executed
1328 // still count towards the total instructions.
1329 ++valid_num;
1330 cprintf("Count:%i\n", valid_num);
1331 }
1332 }
1333
1334 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1335 "Issued:%i\nSquashed:%i\n",
1336 (*inst_list_it)->readPC(),
1337 (*inst_list_it)->seqNum,
1338 (*inst_list_it)->threadNumber,
1339 (*inst_list_it)->isIssued(),
1340 (*inst_list_it)->isSquashed());
1341
1342 if ((*inst_list_it)->isMemRef()) {
1343 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1344 }
1345
1346 cprintf("\n");
1347
1348 inst_list_it++;
1349 ++num;
1350 }
1351 }
1352
1353 cprintf("Insts to Execute list:\n");
1354
1355 int num = 0;
1356 int valid_num = 0;
1357 ListIt inst_list_it = instsToExecute.begin();
1358
1359 while (inst_list_it != instsToExecute.end())
1360 {
1361 cprintf("Instruction:%i\n",
1362 num);
1363 if (!(*inst_list_it)->isSquashed()) {
1364 if (!(*inst_list_it)->isIssued()) {
1365 ++valid_num;
1366 cprintf("Count:%i\n", valid_num);
1367 } else if ((*inst_list_it)->isMemRef() &&
1368 !(*inst_list_it)->memOpDone) {
1369 // Loads that have not been marked as executed
1370 // still count towards the total instructions.
1371 ++valid_num;
1372 cprintf("Count:%i\n", valid_num);
1373 }
1374 }
1375
1376 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1377 "Issued:%i\nSquashed:%i\n",
1378 (*inst_list_it)->readPC(),
1379 (*inst_list_it)->seqNum,
1380 (*inst_list_it)->threadNumber,
1381 (*inst_list_it)->isIssued(),
1382 (*inst_list_it)->isSquashed());
1383
1384 if ((*inst_list_it)->isMemRef()) {
1385 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1386 }
1387
1388 cprintf("\n");
1389
1390 inst_list_it++;
1391 ++num;
1392 }
1393 }