6edb528a99df5d3327027c7798cc4ea001c3452f
[gem5.git] / src / cpu / o3 / inst_queue_impl.hh
1 /*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32 #include <limits>
33 #include <vector>
34
35 #include "sim/root.hh"
36
37 #include "cpu/o3/fu_pool.hh"
38 #include "cpu/o3/inst_queue.hh"
39
40 template <class Impl>
41 InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
42 int fu_idx,
43 InstructionQueue<Impl> *iq_ptr)
44 : Event(&mainEventQueue, Stat_Event_Pri),
45 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
46 {
47 this->setFlags(Event::AutoDelete);
48 }
49
50 template <class Impl>
51 void
52 InstructionQueue<Impl>::FUCompletion::process()
53 {
54 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
55 inst = NULL;
56 }
57
58
59 template <class Impl>
60 const char *
61 InstructionQueue<Impl>::FUCompletion::description()
62 {
63 return "Functional unit completion event";
64 }
65
66 template <class Impl>
67 InstructionQueue<Impl>::InstructionQueue(Params *params)
68 : fuPool(params->fuPool),
69 numEntries(params->numIQEntries),
70 totalWidth(params->issueWidth),
71 numPhysIntRegs(params->numPhysIntRegs),
72 numPhysFloatRegs(params->numPhysFloatRegs),
73 commitToIEWDelay(params->commitToIEWDelay)
74 {
75 assert(fuPool);
76
77 switchedOut = false;
78
79 numThreads = params->numberOfThreads;
80
81 // Set the number of physical registers as the number of int + float
82 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
83
84 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
85
86 //Create an entry for each physical register within the
87 //dependency graph.
88 dependGraph.resize(numPhysRegs);
89
90 // Resize the register scoreboard.
91 regScoreboard.resize(numPhysRegs);
92
93 //Initialize Mem Dependence Units
94 for (int i = 0; i < numThreads; i++) {
95 memDepUnit[i].init(params,i);
96 memDepUnit[i].setIQ(this);
97 }
98
99 resetState();
100
101 std::string policy = params->smtIQPolicy;
102
103 //Convert string to lowercase
104 std::transform(policy.begin(), policy.end(), policy.begin(),
105 (int(*)(int)) tolower);
106
107 //Figure out resource sharing policy
108 if (policy == "dynamic") {
109 iqPolicy = Dynamic;
110
111 //Set Max Entries to Total ROB Capacity
112 for (int i = 0; i < numThreads; i++) {
113 maxEntries[i] = numEntries;
114 }
115
116 } else if (policy == "partitioned") {
117 iqPolicy = Partitioned;
118
119 //@todo:make work if part_amt doesnt divide evenly.
120 int part_amt = numEntries / numThreads;
121
122 //Divide ROB up evenly
123 for (int i = 0; i < numThreads; i++) {
124 maxEntries[i] = part_amt;
125 }
126
127 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
128 "%i entries per thread.\n",part_amt);
129
130 } else if (policy == "threshold") {
131 iqPolicy = Threshold;
132
133 double threshold = (double)params->smtIQThreshold / 100;
134
135 int thresholdIQ = (int)((double)threshold * numEntries);
136
137 //Divide up by threshold amount
138 for (int i = 0; i < numThreads; i++) {
139 maxEntries[i] = thresholdIQ;
140 }
141
142 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
143 "%i entries per thread.\n",thresholdIQ);
144 } else {
145 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
146 "Partitioned, Threshold}");
147 }
148 }
149
150 template <class Impl>
151 InstructionQueue<Impl>::~InstructionQueue()
152 {
153 dependGraph.reset();
154 #ifdef DEBUG
155 cprintf("Nodes traversed: %i, removed: %i\n",
156 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
157 #endif
158 }
159
160 template <class Impl>
161 std::string
162 InstructionQueue<Impl>::name() const
163 {
164 return cpu->name() + ".iq";
165 }
166
167 template <class Impl>
168 void
169 InstructionQueue<Impl>::regStats()
170 {
171 using namespace Stats;
172 iqInstsAdded
173 .name(name() + ".iqInstsAdded")
174 .desc("Number of instructions added to the IQ (excludes non-spec)")
175 .prereq(iqInstsAdded);
176
177 iqNonSpecInstsAdded
178 .name(name() + ".iqNonSpecInstsAdded")
179 .desc("Number of non-speculative instructions added to the IQ")
180 .prereq(iqNonSpecInstsAdded);
181
182 iqInstsIssued
183 .name(name() + ".iqInstsIssued")
184 .desc("Number of instructions issued")
185 .prereq(iqInstsIssued);
186
187 iqIntInstsIssued
188 .name(name() + ".iqIntInstsIssued")
189 .desc("Number of integer instructions issued")
190 .prereq(iqIntInstsIssued);
191
192 iqFloatInstsIssued
193 .name(name() + ".iqFloatInstsIssued")
194 .desc("Number of float instructions issued")
195 .prereq(iqFloatInstsIssued);
196
197 iqBranchInstsIssued
198 .name(name() + ".iqBranchInstsIssued")
199 .desc("Number of branch instructions issued")
200 .prereq(iqBranchInstsIssued);
201
202 iqMemInstsIssued
203 .name(name() + ".iqMemInstsIssued")
204 .desc("Number of memory instructions issued")
205 .prereq(iqMemInstsIssued);
206
207 iqMiscInstsIssued
208 .name(name() + ".iqMiscInstsIssued")
209 .desc("Number of miscellaneous instructions issued")
210 .prereq(iqMiscInstsIssued);
211
212 iqSquashedInstsIssued
213 .name(name() + ".iqSquashedInstsIssued")
214 .desc("Number of squashed instructions issued")
215 .prereq(iqSquashedInstsIssued);
216
217 iqSquashedInstsExamined
218 .name(name() + ".iqSquashedInstsExamined")
219 .desc("Number of squashed instructions iterated over during squash;"
220 " mainly for profiling")
221 .prereq(iqSquashedInstsExamined);
222
223 iqSquashedOperandsExamined
224 .name(name() + ".iqSquashedOperandsExamined")
225 .desc("Number of squashed operands that are examined and possibly "
226 "removed from graph")
227 .prereq(iqSquashedOperandsExamined);
228
229 iqSquashedNonSpecRemoved
230 .name(name() + ".iqSquashedNonSpecRemoved")
231 .desc("Number of squashed non-spec instructions that were removed")
232 .prereq(iqSquashedNonSpecRemoved);
233 /*
234 queueResDist
235 .init(Num_OpClasses, 0, 99, 2)
236 .name(name() + ".IQ:residence:")
237 .desc("cycles from dispatch to issue")
238 .flags(total | pdf | cdf )
239 ;
240 for (int i = 0; i < Num_OpClasses; ++i) {
241 queueResDist.subname(i, opClassStrings[i]);
242 }
243 */
244 numIssuedDist
245 .init(0,totalWidth,1)
246 .name(name() + ".ISSUE:issued_per_cycle")
247 .desc("Number of insts issued each cycle")
248 .flags(pdf)
249 ;
250 /*
251 dist_unissued
252 .init(Num_OpClasses+2)
253 .name(name() + ".ISSUE:unissued_cause")
254 .desc("Reason ready instruction not issued")
255 .flags(pdf | dist)
256 ;
257 for (int i=0; i < (Num_OpClasses + 2); ++i) {
258 dist_unissued.subname(i, unissued_names[i]);
259 }
260 */
261 statIssuedInstType
262 .init(numThreads,Num_OpClasses)
263 .name(name() + ".ISSUE:FU_type")
264 .desc("Type of FU issued")
265 .flags(total | pdf | dist)
266 ;
267 statIssuedInstType.ysubnames(opClassStrings);
268
269 //
270 // How long did instructions for a particular FU type wait prior to issue
271 //
272 /*
273 issueDelayDist
274 .init(Num_OpClasses,0,99,2)
275 .name(name() + ".ISSUE:")
276 .desc("cycles from operands ready to issue")
277 .flags(pdf | cdf)
278 ;
279
280 for (int i=0; i<Num_OpClasses; ++i) {
281 std::stringstream subname;
282 subname << opClassStrings[i] << "_delay";
283 issueDelayDist.subname(i, subname.str());
284 }
285 */
286 issueRate
287 .name(name() + ".ISSUE:rate")
288 .desc("Inst issue rate")
289 .flags(total)
290 ;
291 issueRate = iqInstsIssued / cpu->numCycles;
292
293 statFuBusy
294 .init(Num_OpClasses)
295 .name(name() + ".ISSUE:fu_full")
296 .desc("attempts to use FU when none available")
297 .flags(pdf | dist)
298 ;
299 for (int i=0; i < Num_OpClasses; ++i) {
300 statFuBusy.subname(i, opClassStrings[i]);
301 }
302
303 fuBusy
304 .init(numThreads)
305 .name(name() + ".ISSUE:fu_busy_cnt")
306 .desc("FU busy when requested")
307 .flags(total)
308 ;
309
310 fuBusyRate
311 .name(name() + ".ISSUE:fu_busy_rate")
312 .desc("FU busy rate (busy events/executed inst)")
313 .flags(total)
314 ;
315 fuBusyRate = fuBusy / iqInstsIssued;
316
317 for ( int i=0; i < numThreads; i++) {
318 // Tell mem dependence unit to reg stats as well.
319 memDepUnit[i].regStats();
320 }
321 }
322
323 template <class Impl>
324 void
325 InstructionQueue<Impl>::resetState()
326 {
327 //Initialize thread IQ counts
328 for (int i = 0; i <numThreads; i++) {
329 count[i] = 0;
330 instList[i].clear();
331 }
332
333 // Initialize the number of free IQ entries.
334 freeEntries = numEntries;
335
336 // Note that in actuality, the registers corresponding to the logical
337 // registers start off as ready. However this doesn't matter for the
338 // IQ as the instruction should have been correctly told if those
339 // registers are ready in rename. Thus it can all be initialized as
340 // unready.
341 for (int i = 0; i < numPhysRegs; ++i) {
342 regScoreboard[i] = false;
343 }
344
345 for (int i = 0; i < numThreads; ++i) {
346 squashedSeqNum[i] = 0;
347 }
348
349 for (int i = 0; i < Num_OpClasses; ++i) {
350 while (!readyInsts[i].empty())
351 readyInsts[i].pop();
352 queueOnList[i] = false;
353 readyIt[i] = listOrder.end();
354 }
355 nonSpecInsts.clear();
356 listOrder.clear();
357 }
358
359 template <class Impl>
360 void
361 InstructionQueue<Impl>::setActiveThreads(std::list<unsigned> *at_ptr)
362 {
363 DPRINTF(IQ, "Setting active threads list pointer.\n");
364 activeThreads = at_ptr;
365 }
366
367 template <class Impl>
368 void
369 InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
370 {
371 DPRINTF(IQ, "Set the issue to execute queue.\n");
372 issueToExecuteQueue = i2e_ptr;
373 }
374
375 template <class Impl>
376 void
377 InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
378 {
379 DPRINTF(IQ, "Set the time buffer.\n");
380 timeBuffer = tb_ptr;
381
382 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
383 }
384
385 template <class Impl>
386 void
387 InstructionQueue<Impl>::switchOut()
388 {
389 /*
390 if (!instList[0].empty() || (numEntries != freeEntries) ||
391 !readyInsts[0].empty() || !nonSpecInsts.empty() || !listOrder.empty()) {
392 dumpInsts();
393 // assert(0);
394 }
395 */
396 resetState();
397 dependGraph.reset();
398 instsToExecute.clear();
399 switchedOut = true;
400 for (int i = 0; i < numThreads; ++i) {
401 memDepUnit[i].switchOut();
402 }
403 }
404
405 template <class Impl>
406 void
407 InstructionQueue<Impl>::takeOverFrom()
408 {
409 switchedOut = false;
410 }
411
412 template <class Impl>
413 int
414 InstructionQueue<Impl>::entryAmount(int num_threads)
415 {
416 if (iqPolicy == Partitioned) {
417 return numEntries / num_threads;
418 } else {
419 return 0;
420 }
421 }
422
423
424 template <class Impl>
425 void
426 InstructionQueue<Impl>::resetEntries()
427 {
428 if (iqPolicy != Dynamic || numThreads > 1) {
429 int active_threads = (*activeThreads).size();
430
431 std::list<unsigned>::iterator threads = (*activeThreads).begin();
432 std::list<unsigned>::iterator list_end = (*activeThreads).end();
433
434 while (threads != list_end) {
435 if (iqPolicy == Partitioned) {
436 maxEntries[*threads++] = numEntries / active_threads;
437 } else if(iqPolicy == Threshold && active_threads == 1) {
438 maxEntries[*threads++] = numEntries;
439 }
440 }
441 }
442 }
443
444 template <class Impl>
445 unsigned
446 InstructionQueue<Impl>::numFreeEntries()
447 {
448 return freeEntries;
449 }
450
451 template <class Impl>
452 unsigned
453 InstructionQueue<Impl>::numFreeEntries(unsigned tid)
454 {
455 return maxEntries[tid] - count[tid];
456 }
457
458 // Might want to do something more complex if it knows how many instructions
459 // will be issued this cycle.
460 template <class Impl>
461 bool
462 InstructionQueue<Impl>::isFull()
463 {
464 if (freeEntries == 0) {
465 return(true);
466 } else {
467 return(false);
468 }
469 }
470
471 template <class Impl>
472 bool
473 InstructionQueue<Impl>::isFull(unsigned tid)
474 {
475 if (numFreeEntries(tid) == 0) {
476 return(true);
477 } else {
478 return(false);
479 }
480 }
481
482 template <class Impl>
483 bool
484 InstructionQueue<Impl>::hasReadyInsts()
485 {
486 if (!listOrder.empty()) {
487 return true;
488 }
489
490 for (int i = 0; i < Num_OpClasses; ++i) {
491 if (!readyInsts[i].empty()) {
492 return true;
493 }
494 }
495
496 return false;
497 }
498
499 template <class Impl>
500 void
501 InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
502 {
503 // Make sure the instruction is valid
504 assert(new_inst);
505
506 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
507 new_inst->seqNum, new_inst->readPC());
508
509 assert(freeEntries != 0);
510
511 instList[new_inst->threadNumber].push_back(new_inst);
512
513 --freeEntries;
514
515 new_inst->setInIQ();
516
517 // Look through its source registers (physical regs), and mark any
518 // dependencies.
519 addToDependents(new_inst);
520
521 // Have this instruction set itself as the producer of its destination
522 // register(s).
523 addToProducers(new_inst);
524
525 if (new_inst->isMemRef()) {
526 memDepUnit[new_inst->threadNumber].insert(new_inst);
527 } else {
528 addIfReady(new_inst);
529 }
530
531 ++iqInstsAdded;
532
533 count[new_inst->threadNumber]++;
534
535 assert(freeEntries == (numEntries - countInsts()));
536 }
537
538 template <class Impl>
539 void
540 InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
541 {
542 // @todo: Clean up this code; can do it by setting inst as unable
543 // to issue, then calling normal insert on the inst.
544
545 assert(new_inst);
546
547 nonSpecInsts[new_inst->seqNum] = new_inst;
548
549 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
550 "to the IQ.\n",
551 new_inst->seqNum, new_inst->readPC());
552
553 assert(freeEntries != 0);
554
555 instList[new_inst->threadNumber].push_back(new_inst);
556
557 --freeEntries;
558
559 new_inst->setInIQ();
560
561 // Have this instruction set itself as the producer of its destination
562 // register(s).
563 addToProducers(new_inst);
564
565 // If it's a memory instruction, add it to the memory dependency
566 // unit.
567 if (new_inst->isMemRef()) {
568 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
569 }
570
571 ++iqNonSpecInstsAdded;
572
573 count[new_inst->threadNumber]++;
574
575 assert(freeEntries == (numEntries - countInsts()));
576 }
577
578 template <class Impl>
579 void
580 InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
581 {
582 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
583
584 insertNonSpec(barr_inst);
585 }
586
587 template <class Impl>
588 typename Impl::DynInstPtr
589 InstructionQueue<Impl>::getInstToExecute()
590 {
591 assert(!instsToExecute.empty());
592 DynInstPtr inst = instsToExecute.front();
593 instsToExecute.pop_front();
594 return inst;
595 }
596
597 template <class Impl>
598 void
599 InstructionQueue<Impl>::addToOrderList(OpClass op_class)
600 {
601 assert(!readyInsts[op_class].empty());
602
603 ListOrderEntry queue_entry;
604
605 queue_entry.queueType = op_class;
606
607 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
608
609 ListOrderIt list_it = listOrder.begin();
610 ListOrderIt list_end_it = listOrder.end();
611
612 while (list_it != list_end_it) {
613 if ((*list_it).oldestInst > queue_entry.oldestInst) {
614 break;
615 }
616
617 list_it++;
618 }
619
620 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
621 queueOnList[op_class] = true;
622 }
623
624 template <class Impl>
625 void
626 InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
627 {
628 // Get iterator of next item on the list
629 // Delete the original iterator
630 // Determine if the next item is either the end of the list or younger
631 // than the new instruction. If so, then add in a new iterator right here.
632 // If not, then move along.
633 ListOrderEntry queue_entry;
634 OpClass op_class = (*list_order_it).queueType;
635 ListOrderIt next_it = list_order_it;
636
637 ++next_it;
638
639 queue_entry.queueType = op_class;
640 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
641
642 while (next_it != listOrder.end() &&
643 (*next_it).oldestInst < queue_entry.oldestInst) {
644 ++next_it;
645 }
646
647 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
648 }
649
650 template <class Impl>
651 void
652 InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
653 {
654 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
655 // The CPU could have been sleeping until this op completed (*extremely*
656 // long latency op). Wake it if it was. This may be overkill.
657 if (isSwitchedOut()) {
658 DPRINTF(IQ, "FU completion not processed, IQ is switched out [sn:%lli]\n",
659 inst->seqNum);
660 return;
661 }
662
663 iewStage->wakeCPU();
664
665 if (fu_idx > -1)
666 fuPool->freeUnitNextCycle(fu_idx);
667
668 // @todo: Ensure that these FU Completions happen at the beginning
669 // of a cycle, otherwise they could add too many instructions to
670 // the queue.
671 issueToExecuteQueue->access(0)->size++;
672 instsToExecute.push_back(inst);
673 }
674
675 // @todo: Figure out a better way to remove the squashed items from the
676 // lists. Checking the top item of each list to see if it's squashed
677 // wastes time and forces jumps.
678 template <class Impl>
679 void
680 InstructionQueue<Impl>::scheduleReadyInsts()
681 {
682 DPRINTF(IQ, "Attempting to schedule ready instructions from "
683 "the IQ.\n");
684
685 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
686
687 // Have iterator to head of the list
688 // While I haven't exceeded bandwidth or reached the end of the list,
689 // Try to get a FU that can do what this op needs.
690 // If successful, change the oldestInst to the new top of the list, put
691 // the queue in the proper place in the list.
692 // Increment the iterator.
693 // This will avoid trying to schedule a certain op class if there are no
694 // FUs that handle it.
695 ListOrderIt order_it = listOrder.begin();
696 ListOrderIt order_end_it = listOrder.end();
697 int total_issued = 0;
698
699 while (total_issued < totalWidth &&
700 iewStage->canIssue() &&
701 order_it != order_end_it) {
702 OpClass op_class = (*order_it).queueType;
703
704 assert(!readyInsts[op_class].empty());
705
706 DynInstPtr issuing_inst = readyInsts[op_class].top();
707
708 assert(issuing_inst->seqNum == (*order_it).oldestInst);
709
710 if (issuing_inst->isSquashed()) {
711 readyInsts[op_class].pop();
712
713 if (!readyInsts[op_class].empty()) {
714 moveToYoungerInst(order_it);
715 } else {
716 readyIt[op_class] = listOrder.end();
717 queueOnList[op_class] = false;
718 }
719
720 listOrder.erase(order_it++);
721
722 ++iqSquashedInstsIssued;
723
724 continue;
725 }
726
727 int idx = -2;
728 int op_latency = 1;
729 int tid = issuing_inst->threadNumber;
730
731 if (op_class != No_OpClass) {
732 idx = fuPool->getUnit(op_class);
733
734 if (idx > -1) {
735 op_latency = fuPool->getOpLatency(op_class);
736 }
737 }
738
739 // If we have an instruction that doesn't require a FU, or a
740 // valid FU, then schedule for execution.
741 if (idx == -2 || idx != -1) {
742 if (op_latency == 1) {
743 i2e_info->size++;
744 instsToExecute.push_back(issuing_inst);
745
746 // Add the FU onto the list of FU's to be freed next
747 // cycle if we used one.
748 if (idx >= 0)
749 fuPool->freeUnitNextCycle(idx);
750 } else {
751 int issue_latency = fuPool->getIssueLatency(op_class);
752 // Generate completion event for the FU
753 FUCompletion *execution = new FUCompletion(issuing_inst,
754 idx, this);
755
756 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
757
758 // @todo: Enforce that issue_latency == 1 or op_latency
759 if (issue_latency > 1) {
760 // If FU isn't pipelined, then it must be freed
761 // upon the execution completing.
762 execution->setFreeFU();
763 } else {
764 // Add the FU onto the list of FU's to be freed next cycle.
765 fuPool->freeUnitNextCycle(idx);
766 }
767 }
768
769 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
770 "[sn:%lli]\n",
771 tid, issuing_inst->readPC(),
772 issuing_inst->seqNum);
773
774 readyInsts[op_class].pop();
775
776 if (!readyInsts[op_class].empty()) {
777 moveToYoungerInst(order_it);
778 } else {
779 readyIt[op_class] = listOrder.end();
780 queueOnList[op_class] = false;
781 }
782
783 issuing_inst->setIssued();
784 ++total_issued;
785
786 if (!issuing_inst->isMemRef()) {
787 // Memory instructions can not be freed from the IQ until they
788 // complete.
789 ++freeEntries;
790 count[tid]--;
791 issuing_inst->clearInIQ();
792 } else {
793 memDepUnit[tid].issue(issuing_inst);
794 }
795
796 listOrder.erase(order_it++);
797 statIssuedInstType[tid][op_class]++;
798 iewStage->incrWb(issuing_inst->seqNum);
799 } else {
800 statFuBusy[op_class]++;
801 fuBusy[tid]++;
802 ++order_it;
803 }
804 }
805
806 numIssuedDist.sample(total_issued);
807 iqInstsIssued+= total_issued;
808
809 // If we issued any instructions, tell the CPU we had activity.
810 if (total_issued) {
811 cpu->activityThisCycle();
812 } else {
813 DPRINTF(IQ, "Not able to schedule any instructions.\n");
814 }
815 }
816
817 template <class Impl>
818 void
819 InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
820 {
821 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
822 "to execute.\n", inst);
823
824 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
825
826 assert(inst_it != nonSpecInsts.end());
827
828 unsigned tid = (*inst_it).second->threadNumber;
829
830 (*inst_it).second->setCanIssue();
831
832 if (!(*inst_it).second->isMemRef()) {
833 addIfReady((*inst_it).second);
834 } else {
835 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
836 }
837
838 (*inst_it).second = NULL;
839
840 nonSpecInsts.erase(inst_it);
841 }
842
843 template <class Impl>
844 void
845 InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
846 {
847 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
848 tid,inst);
849
850 ListIt iq_it = instList[tid].begin();
851
852 while (iq_it != instList[tid].end() &&
853 (*iq_it)->seqNum <= inst) {
854 ++iq_it;
855 instList[tid].pop_front();
856 }
857
858 assert(freeEntries == (numEntries - countInsts()));
859 }
860
861 template <class Impl>
862 int
863 InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
864 {
865 int dependents = 0;
866
867 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
868
869 assert(!completed_inst->isSquashed());
870
871 // Tell the memory dependence unit to wake any dependents on this
872 // instruction if it is a memory instruction. Also complete the memory
873 // instruction at this point since we know it executed without issues.
874 // @todo: Might want to rename "completeMemInst" to something that
875 // indicates that it won't need to be replayed, and call this
876 // earlier. Might not be a big deal.
877 if (completed_inst->isMemRef()) {
878 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
879 completeMemInst(completed_inst);
880 } else if (completed_inst->isMemBarrier() ||
881 completed_inst->isWriteBarrier()) {
882 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
883 }
884
885 for (int dest_reg_idx = 0;
886 dest_reg_idx < completed_inst->numDestRegs();
887 dest_reg_idx++)
888 {
889 PhysRegIndex dest_reg =
890 completed_inst->renamedDestRegIdx(dest_reg_idx);
891
892 // Special case of uniq or control registers. They are not
893 // handled by the IQ and thus have no dependency graph entry.
894 // @todo Figure out a cleaner way to handle this.
895 if (dest_reg >= numPhysRegs) {
896 continue;
897 }
898
899 DPRINTF(IQ, "Waking any dependents on register %i.\n",
900 (int) dest_reg);
901
902 //Go through the dependency chain, marking the registers as
903 //ready within the waiting instructions.
904 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
905
906 while (dep_inst) {
907 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
908 dep_inst->readPC());
909
910 // Might want to give more information to the instruction
911 // so that it knows which of its source registers is
912 // ready. However that would mean that the dependency
913 // graph entries would need to hold the src_reg_idx.
914 dep_inst->markSrcRegReady();
915
916 addIfReady(dep_inst);
917
918 dep_inst = dependGraph.pop(dest_reg);
919
920 ++dependents;
921 }
922
923 // Reset the head node now that all of its dependents have
924 // been woken up.
925 assert(dependGraph.empty(dest_reg));
926 dependGraph.clearInst(dest_reg);
927
928 // Mark the scoreboard as having that register ready.
929 regScoreboard[dest_reg] = true;
930 }
931 return dependents;
932 }
933
934 template <class Impl>
935 void
936 InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
937 {
938 OpClass op_class = ready_inst->opClass();
939
940 readyInsts[op_class].push(ready_inst);
941
942 // Will need to reorder the list if either a queue is not on the list,
943 // or it has an older instruction than last time.
944 if (!queueOnList[op_class]) {
945 addToOrderList(op_class);
946 } else if (readyInsts[op_class].top()->seqNum <
947 (*readyIt[op_class]).oldestInst) {
948 listOrder.erase(readyIt[op_class]);
949 addToOrderList(op_class);
950 }
951
952 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
953 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
954 ready_inst->readPC(), op_class, ready_inst->seqNum);
955 }
956
957 template <class Impl>
958 void
959 InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
960 {
961 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
962 }
963
964 template <class Impl>
965 void
966 InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
967 {
968 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
969 }
970
971 template <class Impl>
972 void
973 InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
974 {
975 int tid = completed_inst->threadNumber;
976
977 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
978 completed_inst->readPC(), completed_inst->seqNum);
979
980 ++freeEntries;
981
982 completed_inst->memOpDone = true;
983
984 memDepUnit[tid].completed(completed_inst);
985
986 count[tid]--;
987 }
988
989 template <class Impl>
990 void
991 InstructionQueue<Impl>::violation(DynInstPtr &store,
992 DynInstPtr &faulting_load)
993 {
994 memDepUnit[store->threadNumber].violation(store, faulting_load);
995 }
996
997 template <class Impl>
998 void
999 InstructionQueue<Impl>::squash(unsigned tid)
1000 {
1001 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1002 "the IQ.\n", tid);
1003
1004 // Read instruction sequence number of last instruction out of the
1005 // time buffer.
1006 #if ISA_HAS_DELAY_SLOT
1007 squashedSeqNum[tid] = fromCommit->commitInfo[tid].bdelayDoneSeqNum;
1008 #else
1009 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1010 #endif
1011
1012 // Call doSquash if there are insts in the IQ
1013 if (count[tid] > 0) {
1014 doSquash(tid);
1015 }
1016
1017 // Also tell the memory dependence unit to squash.
1018 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1019 }
1020
1021 template <class Impl>
1022 void
1023 InstructionQueue<Impl>::doSquash(unsigned tid)
1024 {
1025 // Start at the tail.
1026 ListIt squash_it = instList[tid].end();
1027 --squash_it;
1028
1029 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1030 tid, squashedSeqNum[tid]);
1031
1032 // Squash any instructions younger than the squashed sequence number
1033 // given.
1034 while (squash_it != instList[tid].end() &&
1035 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1036
1037 DynInstPtr squashed_inst = (*squash_it);
1038
1039 // Only handle the instruction if it actually is in the IQ and
1040 // hasn't already been squashed in the IQ.
1041 if (squashed_inst->threadNumber != tid ||
1042 squashed_inst->isSquashedInIQ()) {
1043 --squash_it;
1044 continue;
1045 }
1046
1047 if (!squashed_inst->isIssued() ||
1048 (squashed_inst->isMemRef() &&
1049 !squashed_inst->memOpDone)) {
1050
1051 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1052 "squashed.\n",
1053 tid, squashed_inst->seqNum, squashed_inst->readPC());
1054
1055 // Remove the instruction from the dependency list.
1056 if (!squashed_inst->isNonSpeculative() &&
1057 !squashed_inst->isStoreConditional() &&
1058 !squashed_inst->isMemBarrier() &&
1059 !squashed_inst->isWriteBarrier()) {
1060
1061 for (int src_reg_idx = 0;
1062 src_reg_idx < squashed_inst->numSrcRegs();
1063 src_reg_idx++)
1064 {
1065 PhysRegIndex src_reg =
1066 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1067
1068 // Only remove it from the dependency graph if it
1069 // was placed there in the first place.
1070
1071 // Instead of doing a linked list traversal, we
1072 // can just remove these squashed instructions
1073 // either at issue time, or when the register is
1074 // overwritten. The only downside to this is it
1075 // leaves more room for error.
1076
1077 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1078 src_reg < numPhysRegs) {
1079 dependGraph.remove(src_reg, squashed_inst);
1080 }
1081
1082
1083 ++iqSquashedOperandsExamined;
1084 }
1085 } else if (!squashed_inst->isStoreConditional() || !squashed_inst->isCompleted()) {
1086 NonSpecMapIt ns_inst_it =
1087 nonSpecInsts.find(squashed_inst->seqNum);
1088 assert(ns_inst_it != nonSpecInsts.end());
1089
1090 (*ns_inst_it).second = NULL;
1091
1092 nonSpecInsts.erase(ns_inst_it);
1093
1094 ++iqSquashedNonSpecRemoved;
1095 }
1096
1097 // Might want to also clear out the head of the dependency graph.
1098
1099 // Mark it as squashed within the IQ.
1100 squashed_inst->setSquashedInIQ();
1101
1102 // @todo: Remove this hack where several statuses are set so the
1103 // inst will flow through the rest of the pipeline.
1104 squashed_inst->setIssued();
1105 squashed_inst->setCanCommit();
1106 squashed_inst->clearInIQ();
1107
1108 //Update Thread IQ Count
1109 count[squashed_inst->threadNumber]--;
1110
1111 ++freeEntries;
1112 }
1113
1114 instList[tid].erase(squash_it--);
1115 ++iqSquashedInstsExamined;
1116 }
1117 }
1118
1119 template <class Impl>
1120 bool
1121 InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1122 {
1123 // Loop through the instruction's source registers, adding
1124 // them to the dependency list if they are not ready.
1125 int8_t total_src_regs = new_inst->numSrcRegs();
1126 bool return_val = false;
1127
1128 for (int src_reg_idx = 0;
1129 src_reg_idx < total_src_regs;
1130 src_reg_idx++)
1131 {
1132 // Only add it to the dependency graph if it's not ready.
1133 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1134 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1135
1136 // Check the IQ's scoreboard to make sure the register
1137 // hasn't become ready while the instruction was in flight
1138 // between stages. Only if it really isn't ready should
1139 // it be added to the dependency graph.
1140 if (src_reg >= numPhysRegs) {
1141 continue;
1142 } else if (regScoreboard[src_reg] == false) {
1143 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1144 "is being added to the dependency chain.\n",
1145 new_inst->readPC(), src_reg);
1146
1147 dependGraph.insert(src_reg, new_inst);
1148
1149 // Change the return value to indicate that something
1150 // was added to the dependency graph.
1151 return_val = true;
1152 } else {
1153 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1154 "became ready before it reached the IQ.\n",
1155 new_inst->readPC(), src_reg);
1156 // Mark a register ready within the instruction.
1157 new_inst->markSrcRegReady(src_reg_idx);
1158 }
1159 }
1160 }
1161
1162 return return_val;
1163 }
1164
1165 template <class Impl>
1166 void
1167 InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1168 {
1169 // Nothing really needs to be marked when an instruction becomes
1170 // the producer of a register's value, but for convenience a ptr
1171 // to the producing instruction will be placed in the head node of
1172 // the dependency links.
1173 int8_t total_dest_regs = new_inst->numDestRegs();
1174
1175 for (int dest_reg_idx = 0;
1176 dest_reg_idx < total_dest_regs;
1177 dest_reg_idx++)
1178 {
1179 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1180
1181 // Instructions that use the misc regs will have a reg number
1182 // higher than the normal physical registers. In this case these
1183 // registers are not renamed, and there is no need to track
1184 // dependencies as these instructions must be executed at commit.
1185 if (dest_reg >= numPhysRegs) {
1186 continue;
1187 }
1188
1189 if (!dependGraph.empty(dest_reg)) {
1190 dependGraph.dump();
1191 panic("Dependency graph %i not empty!", dest_reg);
1192 }
1193
1194 dependGraph.setInst(dest_reg, new_inst);
1195
1196 // Mark the scoreboard to say it's not yet ready.
1197 regScoreboard[dest_reg] = false;
1198 }
1199 }
1200
1201 template <class Impl>
1202 void
1203 InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1204 {
1205 // If the instruction now has all of its source registers
1206 // available, then add it to the list of ready instructions.
1207 if (inst->readyToIssue()) {
1208
1209 //Add the instruction to the proper ready list.
1210 if (inst->isMemRef()) {
1211
1212 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1213
1214 // Message to the mem dependence unit that this instruction has
1215 // its registers ready.
1216 memDepUnit[inst->threadNumber].regsReady(inst);
1217
1218 return;
1219 }
1220
1221 OpClass op_class = inst->opClass();
1222
1223 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1224 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1225 inst->readPC(), op_class, inst->seqNum);
1226
1227 readyInsts[op_class].push(inst);
1228
1229 // Will need to reorder the list if either a queue is not on the list,
1230 // or it has an older instruction than last time.
1231 if (!queueOnList[op_class]) {
1232 addToOrderList(op_class);
1233 } else if (readyInsts[op_class].top()->seqNum <
1234 (*readyIt[op_class]).oldestInst) {
1235 listOrder.erase(readyIt[op_class]);
1236 addToOrderList(op_class);
1237 }
1238 }
1239 }
1240
1241 template <class Impl>
1242 int
1243 InstructionQueue<Impl>::countInsts()
1244 {
1245 #if 0
1246 //ksewell:This works but definitely could use a cleaner write
1247 //with a more intuitive way of counting. Right now it's
1248 //just brute force ....
1249 // Change the #if if you want to use this method.
1250 int total_insts = 0;
1251
1252 for (int i = 0; i < numThreads; ++i) {
1253 ListIt count_it = instList[i].begin();
1254
1255 while (count_it != instList[i].end()) {
1256 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1257 if (!(*count_it)->isIssued()) {
1258 ++total_insts;
1259 } else if ((*count_it)->isMemRef() &&
1260 !(*count_it)->memOpDone) {
1261 // Loads that have not been marked as executed still count
1262 // towards the total instructions.
1263 ++total_insts;
1264 }
1265 }
1266
1267 ++count_it;
1268 }
1269 }
1270
1271 return total_insts;
1272 #else
1273 return numEntries - freeEntries;
1274 #endif
1275 }
1276
1277 template <class Impl>
1278 void
1279 InstructionQueue<Impl>::dumpLists()
1280 {
1281 for (int i = 0; i < Num_OpClasses; ++i) {
1282 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1283
1284 cprintf("\n");
1285 }
1286
1287 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1288
1289 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1290 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1291
1292 cprintf("Non speculative list: ");
1293
1294 while (non_spec_it != non_spec_end_it) {
1295 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1296 (*non_spec_it).second->seqNum);
1297 ++non_spec_it;
1298 }
1299
1300 cprintf("\n");
1301
1302 ListOrderIt list_order_it = listOrder.begin();
1303 ListOrderIt list_order_end_it = listOrder.end();
1304 int i = 1;
1305
1306 cprintf("List order: ");
1307
1308 while (list_order_it != list_order_end_it) {
1309 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1310 (*list_order_it).oldestInst);
1311
1312 ++list_order_it;
1313 ++i;
1314 }
1315
1316 cprintf("\n");
1317 }
1318
1319
1320 template <class Impl>
1321 void
1322 InstructionQueue<Impl>::dumpInsts()
1323 {
1324 for (int i = 0; i < numThreads; ++i) {
1325 int num = 0;
1326 int valid_num = 0;
1327 ListIt inst_list_it = instList[i].begin();
1328
1329 while (inst_list_it != instList[i].end())
1330 {
1331 cprintf("Instruction:%i\n",
1332 num);
1333 if (!(*inst_list_it)->isSquashed()) {
1334 if (!(*inst_list_it)->isIssued()) {
1335 ++valid_num;
1336 cprintf("Count:%i\n", valid_num);
1337 } else if ((*inst_list_it)->isMemRef() &&
1338 !(*inst_list_it)->memOpDone) {
1339 // Loads that have not been marked as executed
1340 // still count towards the total instructions.
1341 ++valid_num;
1342 cprintf("Count:%i\n", valid_num);
1343 }
1344 }
1345
1346 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1347 "Issued:%i\nSquashed:%i\n",
1348 (*inst_list_it)->readPC(),
1349 (*inst_list_it)->seqNum,
1350 (*inst_list_it)->threadNumber,
1351 (*inst_list_it)->isIssued(),
1352 (*inst_list_it)->isSquashed());
1353
1354 if ((*inst_list_it)->isMemRef()) {
1355 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1356 }
1357
1358 cprintf("\n");
1359
1360 inst_list_it++;
1361 ++num;
1362 }
1363 }
1364
1365 cprintf("Insts to Execute list:\n");
1366
1367 int num = 0;
1368 int valid_num = 0;
1369 ListIt inst_list_it = instsToExecute.begin();
1370
1371 while (inst_list_it != instsToExecute.end())
1372 {
1373 cprintf("Instruction:%i\n",
1374 num);
1375 if (!(*inst_list_it)->isSquashed()) {
1376 if (!(*inst_list_it)->isIssued()) {
1377 ++valid_num;
1378 cprintf("Count:%i\n", valid_num);
1379 } else if ((*inst_list_it)->isMemRef() &&
1380 !(*inst_list_it)->memOpDone) {
1381 // Loads that have not been marked as executed
1382 // still count towards the total instructions.
1383 ++valid_num;
1384 cprintf("Count:%i\n", valid_num);
1385 }
1386 }
1387
1388 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1389 "Issued:%i\nSquashed:%i\n",
1390 (*inst_list_it)->readPC(),
1391 (*inst_list_it)->seqNum,
1392 (*inst_list_it)->threadNumber,
1393 (*inst_list_it)->isIssued(),
1394 (*inst_list_it)->isSquashed());
1395
1396 if ((*inst_list_it)->isMemRef()) {
1397 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1398 }
1399
1400 cprintf("\n");
1401
1402 inst_list_it++;
1403 ++num;
1404 }
1405 }