Merge ktlim@zizzer:/bk/newmem
[gem5.git] / src / cpu / o3 / inst_queue_impl.hh
1 /*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32 #include <limits>
33 #include <vector>
34
35 #include "sim/root.hh"
36
37 #include "cpu/o3/fu_pool.hh"
38 #include "cpu/o3/inst_queue.hh"
39
40 using namespace std;
41
42 template <class Impl>
43 InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
44 int fu_idx,
45 InstructionQueue<Impl> *iq_ptr)
46 : Event(&mainEventQueue, Stat_Event_Pri),
47 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
48 {
49 this->setFlags(Event::AutoDelete);
50 }
51
52 template <class Impl>
53 void
54 InstructionQueue<Impl>::FUCompletion::process()
55 {
56 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
57 inst = NULL;
58 }
59
60
61 template <class Impl>
62 const char *
63 InstructionQueue<Impl>::FUCompletion::description()
64 {
65 return "Functional unit completion event";
66 }
67
68 template <class Impl>
69 InstructionQueue<Impl>::InstructionQueue(Params *params)
70 : fuPool(params->fuPool),
71 numEntries(params->numIQEntries),
72 totalWidth(params->issueWidth),
73 numPhysIntRegs(params->numPhysIntRegs),
74 numPhysFloatRegs(params->numPhysFloatRegs),
75 commitToIEWDelay(params->commitToIEWDelay)
76 {
77 assert(fuPool);
78
79 switchedOut = false;
80
81 numThreads = params->numberOfThreads;
82
83 // Set the number of physical registers as the number of int + float
84 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
85
86 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
87
88 //Create an entry for each physical register within the
89 //dependency graph.
90 dependGraph.resize(numPhysRegs);
91
92 // Resize the register scoreboard.
93 regScoreboard.resize(numPhysRegs);
94
95 //Initialize Mem Dependence Units
96 for (int i = 0; i < numThreads; i++) {
97 memDepUnit[i].init(params,i);
98 memDepUnit[i].setIQ(this);
99 }
100
101 resetState();
102
103 string policy = params->smtIQPolicy;
104
105 //Convert string to lowercase
106 std::transform(policy.begin(), policy.end(), policy.begin(),
107 (int(*)(int)) tolower);
108
109 //Figure out resource sharing policy
110 if (policy == "dynamic") {
111 iqPolicy = Dynamic;
112
113 //Set Max Entries to Total ROB Capacity
114 for (int i = 0; i < numThreads; i++) {
115 maxEntries[i] = numEntries;
116 }
117
118 } else if (policy == "partitioned") {
119 iqPolicy = Partitioned;
120
121 //@todo:make work if part_amt doesnt divide evenly.
122 int part_amt = numEntries / numThreads;
123
124 //Divide ROB up evenly
125 for (int i = 0; i < numThreads; i++) {
126 maxEntries[i] = part_amt;
127 }
128
129 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
130 "%i entries per thread.\n",part_amt);
131
132 } else if (policy == "threshold") {
133 iqPolicy = Threshold;
134
135 double threshold = (double)params->smtIQThreshold / 100;
136
137 int thresholdIQ = (int)((double)threshold * numEntries);
138
139 //Divide up by threshold amount
140 for (int i = 0; i < numThreads; i++) {
141 maxEntries[i] = thresholdIQ;
142 }
143
144 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
145 "%i entries per thread.\n",thresholdIQ);
146 } else {
147 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
148 "Partitioned, Threshold}");
149 }
150 }
151
152 template <class Impl>
153 InstructionQueue<Impl>::~InstructionQueue()
154 {
155 dependGraph.reset();
156 #ifdef DEBUG
157 cprintf("Nodes traversed: %i, removed: %i\n",
158 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
159 #endif
160 }
161
162 template <class Impl>
163 std::string
164 InstructionQueue<Impl>::name() const
165 {
166 return cpu->name() + ".iq";
167 }
168
169 template <class Impl>
170 void
171 InstructionQueue<Impl>::regStats()
172 {
173 using namespace Stats;
174 iqInstsAdded
175 .name(name() + ".iqInstsAdded")
176 .desc("Number of instructions added to the IQ (excludes non-spec)")
177 .prereq(iqInstsAdded);
178
179 iqNonSpecInstsAdded
180 .name(name() + ".iqNonSpecInstsAdded")
181 .desc("Number of non-speculative instructions added to the IQ")
182 .prereq(iqNonSpecInstsAdded);
183
184 iqInstsIssued
185 .name(name() + ".iqInstsIssued")
186 .desc("Number of instructions issued")
187 .prereq(iqInstsIssued);
188
189 iqIntInstsIssued
190 .name(name() + ".iqIntInstsIssued")
191 .desc("Number of integer instructions issued")
192 .prereq(iqIntInstsIssued);
193
194 iqFloatInstsIssued
195 .name(name() + ".iqFloatInstsIssued")
196 .desc("Number of float instructions issued")
197 .prereq(iqFloatInstsIssued);
198
199 iqBranchInstsIssued
200 .name(name() + ".iqBranchInstsIssued")
201 .desc("Number of branch instructions issued")
202 .prereq(iqBranchInstsIssued);
203
204 iqMemInstsIssued
205 .name(name() + ".iqMemInstsIssued")
206 .desc("Number of memory instructions issued")
207 .prereq(iqMemInstsIssued);
208
209 iqMiscInstsIssued
210 .name(name() + ".iqMiscInstsIssued")
211 .desc("Number of miscellaneous instructions issued")
212 .prereq(iqMiscInstsIssued);
213
214 iqSquashedInstsIssued
215 .name(name() + ".iqSquashedInstsIssued")
216 .desc("Number of squashed instructions issued")
217 .prereq(iqSquashedInstsIssued);
218
219 iqSquashedInstsExamined
220 .name(name() + ".iqSquashedInstsExamined")
221 .desc("Number of squashed instructions iterated over during squash;"
222 " mainly for profiling")
223 .prereq(iqSquashedInstsExamined);
224
225 iqSquashedOperandsExamined
226 .name(name() + ".iqSquashedOperandsExamined")
227 .desc("Number of squashed operands that are examined and possibly "
228 "removed from graph")
229 .prereq(iqSquashedOperandsExamined);
230
231 iqSquashedNonSpecRemoved
232 .name(name() + ".iqSquashedNonSpecRemoved")
233 .desc("Number of squashed non-spec instructions that were removed")
234 .prereq(iqSquashedNonSpecRemoved);
235
236 queueResDist
237 .init(Num_OpClasses, 0, 99, 2)
238 .name(name() + ".IQ:residence:")
239 .desc("cycles from dispatch to issue")
240 .flags(total | pdf | cdf )
241 ;
242 for (int i = 0; i < Num_OpClasses; ++i) {
243 queueResDist.subname(i, opClassStrings[i]);
244 }
245 numIssuedDist
246 .init(0,totalWidth,1)
247 .name(name() + ".ISSUE:issued_per_cycle")
248 .desc("Number of insts issued each cycle")
249 .flags(pdf)
250 ;
251 /*
252 dist_unissued
253 .init(Num_OpClasses+2)
254 .name(name() + ".ISSUE:unissued_cause")
255 .desc("Reason ready instruction not issued")
256 .flags(pdf | dist)
257 ;
258 for (int i=0; i < (Num_OpClasses + 2); ++i) {
259 dist_unissued.subname(i, unissued_names[i]);
260 }
261 */
262 statIssuedInstType
263 .init(numThreads,Num_OpClasses)
264 .name(name() + ".ISSUE:FU_type")
265 .desc("Type of FU issued")
266 .flags(total | pdf | dist)
267 ;
268 statIssuedInstType.ysubnames(opClassStrings);
269
270 //
271 // How long did instructions for a particular FU type wait prior to issue
272 //
273
274 issueDelayDist
275 .init(Num_OpClasses,0,99,2)
276 .name(name() + ".ISSUE:")
277 .desc("cycles from operands ready to issue")
278 .flags(pdf | cdf)
279 ;
280
281 for (int i=0; i<Num_OpClasses; ++i) {
282 stringstream subname;
283 subname << opClassStrings[i] << "_delay";
284 issueDelayDist.subname(i, subname.str());
285 }
286
287 issueRate
288 .name(name() + ".ISSUE:rate")
289 .desc("Inst issue rate")
290 .flags(total)
291 ;
292 issueRate = iqInstsIssued / cpu->numCycles;
293
294 statFuBusy
295 .init(Num_OpClasses)
296 .name(name() + ".ISSUE:fu_full")
297 .desc("attempts to use FU when none available")
298 .flags(pdf | dist)
299 ;
300 for (int i=0; i < Num_OpClasses; ++i) {
301 statFuBusy.subname(i, opClassStrings[i]);
302 }
303
304 fuBusy
305 .init(numThreads)
306 .name(name() + ".ISSUE:fu_busy_cnt")
307 .desc("FU busy when requested")
308 .flags(total)
309 ;
310
311 fuBusyRate
312 .name(name() + ".ISSUE:fu_busy_rate")
313 .desc("FU busy rate (busy events/executed inst)")
314 .flags(total)
315 ;
316 fuBusyRate = fuBusy / iqInstsIssued;
317
318 for ( int i=0; i < numThreads; i++) {
319 // Tell mem dependence unit to reg stats as well.
320 memDepUnit[i].regStats();
321 }
322 }
323
324 template <class Impl>
325 void
326 InstructionQueue<Impl>::resetState()
327 {
328 //Initialize thread IQ counts
329 for (int i = 0; i <numThreads; i++) {
330 count[i] = 0;
331 instList[i].clear();
332 }
333
334 // Initialize the number of free IQ entries.
335 freeEntries = numEntries;
336
337 // Note that in actuality, the registers corresponding to the logical
338 // registers start off as ready. However this doesn't matter for the
339 // IQ as the instruction should have been correctly told if those
340 // registers are ready in rename. Thus it can all be initialized as
341 // unready.
342 for (int i = 0; i < numPhysRegs; ++i) {
343 regScoreboard[i] = false;
344 }
345
346 for (int i = 0; i < numThreads; ++i) {
347 squashedSeqNum[i] = 0;
348 }
349
350 for (int i = 0; i < Num_OpClasses; ++i) {
351 while (!readyInsts[i].empty())
352 readyInsts[i].pop();
353 queueOnList[i] = false;
354 readyIt[i] = listOrder.end();
355 }
356 nonSpecInsts.clear();
357 listOrder.clear();
358 }
359
360 template <class Impl>
361 void
362 InstructionQueue<Impl>::setActiveThreads(list<unsigned> *at_ptr)
363 {
364 DPRINTF(IQ, "Setting active threads list pointer.\n");
365 activeThreads = at_ptr;
366 }
367
368 template <class Impl>
369 void
370 InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
371 {
372 DPRINTF(IQ, "Set the issue to execute queue.\n");
373 issueToExecuteQueue = i2e_ptr;
374 }
375
376 template <class Impl>
377 void
378 InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
379 {
380 DPRINTF(IQ, "Set the time buffer.\n");
381 timeBuffer = tb_ptr;
382
383 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
384 }
385
386 template <class Impl>
387 void
388 InstructionQueue<Impl>::switchOut()
389 {
390 resetState();
391 dependGraph.reset();
392 switchedOut = true;
393 for (int i = 0; i < numThreads; ++i) {
394 memDepUnit[i].switchOut();
395 }
396 }
397
398 template <class Impl>
399 void
400 InstructionQueue<Impl>::takeOverFrom()
401 {
402 switchedOut = false;
403 }
404
405 template <class Impl>
406 int
407 InstructionQueue<Impl>::entryAmount(int num_threads)
408 {
409 if (iqPolicy == Partitioned) {
410 return numEntries / num_threads;
411 } else {
412 return 0;
413 }
414 }
415
416
417 template <class Impl>
418 void
419 InstructionQueue<Impl>::resetEntries()
420 {
421 if (iqPolicy != Dynamic || numThreads > 1) {
422 int active_threads = (*activeThreads).size();
423
424 list<unsigned>::iterator threads = (*activeThreads).begin();
425 list<unsigned>::iterator list_end = (*activeThreads).end();
426
427 while (threads != list_end) {
428 if (iqPolicy == Partitioned) {
429 maxEntries[*threads++] = numEntries / active_threads;
430 } else if(iqPolicy == Threshold && active_threads == 1) {
431 maxEntries[*threads++] = numEntries;
432 }
433 }
434 }
435 }
436
437 template <class Impl>
438 unsigned
439 InstructionQueue<Impl>::numFreeEntries()
440 {
441 return freeEntries;
442 }
443
444 template <class Impl>
445 unsigned
446 InstructionQueue<Impl>::numFreeEntries(unsigned tid)
447 {
448 return maxEntries[tid] - count[tid];
449 }
450
451 // Might want to do something more complex if it knows how many instructions
452 // will be issued this cycle.
453 template <class Impl>
454 bool
455 InstructionQueue<Impl>::isFull()
456 {
457 if (freeEntries == 0) {
458 return(true);
459 } else {
460 return(false);
461 }
462 }
463
464 template <class Impl>
465 bool
466 InstructionQueue<Impl>::isFull(unsigned tid)
467 {
468 if (numFreeEntries(tid) == 0) {
469 return(true);
470 } else {
471 return(false);
472 }
473 }
474
475 template <class Impl>
476 bool
477 InstructionQueue<Impl>::hasReadyInsts()
478 {
479 if (!listOrder.empty()) {
480 return true;
481 }
482
483 for (int i = 0; i < Num_OpClasses; ++i) {
484 if (!readyInsts[i].empty()) {
485 return true;
486 }
487 }
488
489 return false;
490 }
491
492 template <class Impl>
493 void
494 InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
495 {
496 // Make sure the instruction is valid
497 assert(new_inst);
498
499 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
500 new_inst->seqNum, new_inst->readPC());
501
502 assert(freeEntries != 0);
503
504 instList[new_inst->threadNumber].push_back(new_inst);
505
506 --freeEntries;
507
508 new_inst->setInIQ();
509
510 // Look through its source registers (physical regs), and mark any
511 // dependencies.
512 addToDependents(new_inst);
513
514 // Have this instruction set itself as the producer of its destination
515 // register(s).
516 addToProducers(new_inst);
517
518 if (new_inst->isMemRef()) {
519 memDepUnit[new_inst->threadNumber].insert(new_inst);
520 } else {
521 addIfReady(new_inst);
522 }
523
524 ++iqInstsAdded;
525
526 count[new_inst->threadNumber]++;
527
528 assert(freeEntries == (numEntries - countInsts()));
529 }
530
531 template <class Impl>
532 void
533 InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
534 {
535 // @todo: Clean up this code; can do it by setting inst as unable
536 // to issue, then calling normal insert on the inst.
537
538 assert(new_inst);
539
540 nonSpecInsts[new_inst->seqNum] = new_inst;
541
542 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
543 "to the IQ.\n",
544 new_inst->seqNum, new_inst->readPC());
545
546 assert(freeEntries != 0);
547
548 instList[new_inst->threadNumber].push_back(new_inst);
549
550 --freeEntries;
551
552 new_inst->setInIQ();
553
554 // Have this instruction set itself as the producer of its destination
555 // register(s).
556 addToProducers(new_inst);
557
558 // If it's a memory instruction, add it to the memory dependency
559 // unit.
560 if (new_inst->isMemRef()) {
561 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
562 }
563
564 ++iqNonSpecInstsAdded;
565
566 count[new_inst->threadNumber]++;
567
568 assert(freeEntries == (numEntries - countInsts()));
569 }
570
571 template <class Impl>
572 void
573 InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
574 {
575 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
576
577 insertNonSpec(barr_inst);
578 }
579
580 template <class Impl>
581 typename Impl::DynInstPtr
582 InstructionQueue<Impl>::getInstToExecute()
583 {
584 assert(!instsToExecute.empty());
585 DynInstPtr inst = instsToExecute.front();
586 instsToExecute.pop_front();
587 return inst;
588 }
589
590 template <class Impl>
591 void
592 InstructionQueue<Impl>::addToOrderList(OpClass op_class)
593 {
594 assert(!readyInsts[op_class].empty());
595
596 ListOrderEntry queue_entry;
597
598 queue_entry.queueType = op_class;
599
600 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
601
602 ListOrderIt list_it = listOrder.begin();
603 ListOrderIt list_end_it = listOrder.end();
604
605 while (list_it != list_end_it) {
606 if ((*list_it).oldestInst > queue_entry.oldestInst) {
607 break;
608 }
609
610 list_it++;
611 }
612
613 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
614 queueOnList[op_class] = true;
615 }
616
617 template <class Impl>
618 void
619 InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
620 {
621 // Get iterator of next item on the list
622 // Delete the original iterator
623 // Determine if the next item is either the end of the list or younger
624 // than the new instruction. If so, then add in a new iterator right here.
625 // If not, then move along.
626 ListOrderEntry queue_entry;
627 OpClass op_class = (*list_order_it).queueType;
628 ListOrderIt next_it = list_order_it;
629
630 ++next_it;
631
632 queue_entry.queueType = op_class;
633 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
634
635 while (next_it != listOrder.end() &&
636 (*next_it).oldestInst < queue_entry.oldestInst) {
637 ++next_it;
638 }
639
640 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
641 }
642
643 template <class Impl>
644 void
645 InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
646 {
647 // The CPU could have been sleeping until this op completed (*extremely*
648 // long latency op). Wake it if it was. This may be overkill.
649 if (isSwitchedOut()) {
650 return;
651 }
652
653 iewStage->wakeCPU();
654
655 if (fu_idx > -1)
656 fuPool->freeUnitNextCycle(fu_idx);
657
658 // @todo: Ensure that these FU Completions happen at the beginning
659 // of a cycle, otherwise they could add too many instructions to
660 // the queue.
661 issueToExecuteQueue->access(0)->size++;
662 instsToExecute.push_back(inst);
663 }
664
665 // @todo: Figure out a better way to remove the squashed items from the
666 // lists. Checking the top item of each list to see if it's squashed
667 // wastes time and forces jumps.
668 template <class Impl>
669 void
670 InstructionQueue<Impl>::scheduleReadyInsts()
671 {
672 DPRINTF(IQ, "Attempting to schedule ready instructions from "
673 "the IQ.\n");
674
675 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
676
677 // Have iterator to head of the list
678 // While I haven't exceeded bandwidth or reached the end of the list,
679 // Try to get a FU that can do what this op needs.
680 // If successful, change the oldestInst to the new top of the list, put
681 // the queue in the proper place in the list.
682 // Increment the iterator.
683 // This will avoid trying to schedule a certain op class if there are no
684 // FUs that handle it.
685 ListOrderIt order_it = listOrder.begin();
686 ListOrderIt order_end_it = listOrder.end();
687 int total_issued = 0;
688
689 while (total_issued < totalWidth &&
690 iewStage->canIssue() &&
691 order_it != order_end_it) {
692 OpClass op_class = (*order_it).queueType;
693
694 assert(!readyInsts[op_class].empty());
695
696 DynInstPtr issuing_inst = readyInsts[op_class].top();
697
698 assert(issuing_inst->seqNum == (*order_it).oldestInst);
699
700 if (issuing_inst->isSquashed()) {
701 readyInsts[op_class].pop();
702
703 if (!readyInsts[op_class].empty()) {
704 moveToYoungerInst(order_it);
705 } else {
706 readyIt[op_class] = listOrder.end();
707 queueOnList[op_class] = false;
708 }
709
710 listOrder.erase(order_it++);
711
712 ++iqSquashedInstsIssued;
713
714 continue;
715 }
716
717 int idx = -2;
718 int op_latency = 1;
719 int tid = issuing_inst->threadNumber;
720
721 if (op_class != No_OpClass) {
722 idx = fuPool->getUnit(op_class);
723
724 if (idx > -1) {
725 op_latency = fuPool->getOpLatency(op_class);
726 }
727 }
728
729 // If we have an instruction that doesn't require a FU, or a
730 // valid FU, then schedule for execution.
731 if (idx == -2 || idx != -1) {
732 if (op_latency == 1) {
733 i2e_info->size++;
734 instsToExecute.push_back(issuing_inst);
735
736 // Add the FU onto the list of FU's to be freed next
737 // cycle if we used one.
738 if (idx >= 0)
739 fuPool->freeUnitNextCycle(idx);
740 } else {
741 int issue_latency = fuPool->getIssueLatency(op_class);
742 // Generate completion event for the FU
743 FUCompletion *execution = new FUCompletion(issuing_inst,
744 idx, this);
745
746 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
747
748 // @todo: Enforce that issue_latency == 1 or op_latency
749 if (issue_latency > 1) {
750 // If FU isn't pipelined, then it must be freed
751 // upon the execution completing.
752 execution->setFreeFU();
753 } else {
754 // Add the FU onto the list of FU's to be freed next cycle.
755 fuPool->freeUnitNextCycle(idx);
756 }
757 }
758
759 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
760 "[sn:%lli]\n",
761 tid, issuing_inst->readPC(),
762 issuing_inst->seqNum);
763
764 readyInsts[op_class].pop();
765
766 if (!readyInsts[op_class].empty()) {
767 moveToYoungerInst(order_it);
768 } else {
769 readyIt[op_class] = listOrder.end();
770 queueOnList[op_class] = false;
771 }
772
773 issuing_inst->setIssued();
774 ++total_issued;
775
776 if (!issuing_inst->isMemRef()) {
777 // Memory instructions can not be freed from the IQ until they
778 // complete.
779 ++freeEntries;
780 count[tid]--;
781 issuing_inst->clearInIQ();
782 } else {
783 memDepUnit[tid].issue(issuing_inst);
784 }
785
786 listOrder.erase(order_it++);
787 statIssuedInstType[tid][op_class]++;
788 iewStage->incrWb(issuing_inst->seqNum);
789 } else {
790 statFuBusy[op_class]++;
791 fuBusy[tid]++;
792 ++order_it;
793 }
794 }
795
796 numIssuedDist.sample(total_issued);
797 iqInstsIssued+= total_issued;
798
799 // If we issued any instructions, tell the CPU we had activity.
800 if (total_issued) {
801 cpu->activityThisCycle();
802 } else {
803 DPRINTF(IQ, "Not able to schedule any instructions.\n");
804 }
805 }
806
807 template <class Impl>
808 void
809 InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
810 {
811 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
812 "to execute.\n", inst);
813
814 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
815
816 assert(inst_it != nonSpecInsts.end());
817
818 unsigned tid = (*inst_it).second->threadNumber;
819
820 (*inst_it).second->setCanIssue();
821
822 if (!(*inst_it).second->isMemRef()) {
823 addIfReady((*inst_it).second);
824 } else {
825 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
826 }
827
828 (*inst_it).second = NULL;
829
830 nonSpecInsts.erase(inst_it);
831 }
832
833 template <class Impl>
834 void
835 InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
836 {
837 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
838 tid,inst);
839
840 ListIt iq_it = instList[tid].begin();
841
842 while (iq_it != instList[tid].end() &&
843 (*iq_it)->seqNum <= inst) {
844 ++iq_it;
845 instList[tid].pop_front();
846 }
847
848 assert(freeEntries == (numEntries - countInsts()));
849 }
850
851 template <class Impl>
852 int
853 InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
854 {
855 int dependents = 0;
856
857 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
858
859 assert(!completed_inst->isSquashed());
860
861 // Tell the memory dependence unit to wake any dependents on this
862 // instruction if it is a memory instruction. Also complete the memory
863 // instruction at this point since we know it executed without issues.
864 // @todo: Might want to rename "completeMemInst" to something that
865 // indicates that it won't need to be replayed, and call this
866 // earlier. Might not be a big deal.
867 if (completed_inst->isMemRef()) {
868 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
869 completeMemInst(completed_inst);
870 } else if (completed_inst->isMemBarrier() ||
871 completed_inst->isWriteBarrier()) {
872 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
873 }
874
875 for (int dest_reg_idx = 0;
876 dest_reg_idx < completed_inst->numDestRegs();
877 dest_reg_idx++)
878 {
879 PhysRegIndex dest_reg =
880 completed_inst->renamedDestRegIdx(dest_reg_idx);
881
882 // Special case of uniq or control registers. They are not
883 // handled by the IQ and thus have no dependency graph entry.
884 // @todo Figure out a cleaner way to handle this.
885 if (dest_reg >= numPhysRegs) {
886 continue;
887 }
888
889 DPRINTF(IQ, "Waking any dependents on register %i.\n",
890 (int) dest_reg);
891
892 //Go through the dependency chain, marking the registers as
893 //ready within the waiting instructions.
894 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
895
896 while (dep_inst) {
897 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
898 dep_inst->readPC());
899
900 // Might want to give more information to the instruction
901 // so that it knows which of its source registers is
902 // ready. However that would mean that the dependency
903 // graph entries would need to hold the src_reg_idx.
904 dep_inst->markSrcRegReady();
905
906 addIfReady(dep_inst);
907
908 dep_inst = dependGraph.pop(dest_reg);
909
910 ++dependents;
911 }
912
913 // Reset the head node now that all of its dependents have
914 // been woken up.
915 assert(dependGraph.empty(dest_reg));
916 dependGraph.clearInst(dest_reg);
917
918 // Mark the scoreboard as having that register ready.
919 regScoreboard[dest_reg] = true;
920 }
921 return dependents;
922 }
923
924 template <class Impl>
925 void
926 InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
927 {
928 OpClass op_class = ready_inst->opClass();
929
930 readyInsts[op_class].push(ready_inst);
931
932 // Will need to reorder the list if either a queue is not on the list,
933 // or it has an older instruction than last time.
934 if (!queueOnList[op_class]) {
935 addToOrderList(op_class);
936 } else if (readyInsts[op_class].top()->seqNum <
937 (*readyIt[op_class]).oldestInst) {
938 listOrder.erase(readyIt[op_class]);
939 addToOrderList(op_class);
940 }
941
942 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
943 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
944 ready_inst->readPC(), op_class, ready_inst->seqNum);
945 }
946
947 template <class Impl>
948 void
949 InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
950 {
951 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
952 }
953
954 template <class Impl>
955 void
956 InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
957 {
958 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
959 }
960
961 template <class Impl>
962 void
963 InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
964 {
965 int tid = completed_inst->threadNumber;
966
967 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
968 completed_inst->readPC(), completed_inst->seqNum);
969
970 ++freeEntries;
971
972 completed_inst->memOpDone = true;
973
974 memDepUnit[tid].completed(completed_inst);
975
976 count[tid]--;
977 }
978
979 template <class Impl>
980 void
981 InstructionQueue<Impl>::violation(DynInstPtr &store,
982 DynInstPtr &faulting_load)
983 {
984 memDepUnit[store->threadNumber].violation(store, faulting_load);
985 }
986
987 template <class Impl>
988 void
989 InstructionQueue<Impl>::squash(unsigned tid)
990 {
991 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
992 "the IQ.\n", tid);
993
994 // Read instruction sequence number of last instruction out of the
995 // time buffer.
996 #if THE_ISA == ALPHA_ISA
997 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
998 #else
999 squashedSeqNum[tid] = fromCommit->commitInfo[tid].bdelayDoneSeqNum;
1000 #endif
1001
1002 // Call doSquash if there are insts in the IQ
1003 if (count[tid] > 0) {
1004 doSquash(tid);
1005 }
1006
1007 // Also tell the memory dependence unit to squash.
1008 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1009 }
1010
1011 template <class Impl>
1012 void
1013 InstructionQueue<Impl>::doSquash(unsigned tid)
1014 {
1015 // Start at the tail.
1016 ListIt squash_it = instList[tid].end();
1017 --squash_it;
1018
1019 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1020 tid, squashedSeqNum[tid]);
1021
1022 // Squash any instructions younger than the squashed sequence number
1023 // given.
1024 while (squash_it != instList[tid].end() &&
1025 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1026
1027 DynInstPtr squashed_inst = (*squash_it);
1028
1029 // Only handle the instruction if it actually is in the IQ and
1030 // hasn't already been squashed in the IQ.
1031 if (squashed_inst->threadNumber != tid ||
1032 squashed_inst->isSquashedInIQ()) {
1033 --squash_it;
1034 continue;
1035 }
1036
1037 if (!squashed_inst->isIssued() ||
1038 (squashed_inst->isMemRef() &&
1039 !squashed_inst->memOpDone)) {
1040
1041 // Remove the instruction from the dependency list.
1042 if (!squashed_inst->isNonSpeculative() &&
1043 !squashed_inst->isStoreConditional() &&
1044 !squashed_inst->isMemBarrier() &&
1045 !squashed_inst->isWriteBarrier()) {
1046
1047 for (int src_reg_idx = 0;
1048 src_reg_idx < squashed_inst->numSrcRegs();
1049 src_reg_idx++)
1050 {
1051 PhysRegIndex src_reg =
1052 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1053
1054 // Only remove it from the dependency graph if it
1055 // was placed there in the first place.
1056
1057 // Instead of doing a linked list traversal, we
1058 // can just remove these squashed instructions
1059 // either at issue time, or when the register is
1060 // overwritten. The only downside to this is it
1061 // leaves more room for error.
1062
1063 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1064 src_reg < numPhysRegs) {
1065 dependGraph.remove(src_reg, squashed_inst);
1066 }
1067
1068
1069 ++iqSquashedOperandsExamined;
1070 }
1071 } else {
1072 NonSpecMapIt ns_inst_it =
1073 nonSpecInsts.find(squashed_inst->seqNum);
1074 assert(ns_inst_it != nonSpecInsts.end());
1075
1076 (*ns_inst_it).second = NULL;
1077
1078 nonSpecInsts.erase(ns_inst_it);
1079
1080 ++iqSquashedNonSpecRemoved;
1081 }
1082
1083 // Might want to also clear out the head of the dependency graph.
1084
1085 // Mark it as squashed within the IQ.
1086 squashed_inst->setSquashedInIQ();
1087
1088 // @todo: Remove this hack where several statuses are set so the
1089 // inst will flow through the rest of the pipeline.
1090 squashed_inst->setIssued();
1091 squashed_inst->setCanCommit();
1092 squashed_inst->clearInIQ();
1093
1094 //Update Thread IQ Count
1095 count[squashed_inst->threadNumber]--;
1096
1097 ++freeEntries;
1098
1099 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1100 "squashed.\n",
1101 tid, squashed_inst->seqNum, squashed_inst->readPC());
1102 }
1103
1104 instList[tid].erase(squash_it--);
1105 ++iqSquashedInstsExamined;
1106 }
1107 }
1108
1109 template <class Impl>
1110 bool
1111 InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1112 {
1113 // Loop through the instruction's source registers, adding
1114 // them to the dependency list if they are not ready.
1115 int8_t total_src_regs = new_inst->numSrcRegs();
1116 bool return_val = false;
1117
1118 for (int src_reg_idx = 0;
1119 src_reg_idx < total_src_regs;
1120 src_reg_idx++)
1121 {
1122 // Only add it to the dependency graph if it's not ready.
1123 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1124 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1125
1126 // Check the IQ's scoreboard to make sure the register
1127 // hasn't become ready while the instruction was in flight
1128 // between stages. Only if it really isn't ready should
1129 // it be added to the dependency graph.
1130 if (src_reg >= numPhysRegs) {
1131 continue;
1132 } else if (regScoreboard[src_reg] == false) {
1133 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1134 "is being added to the dependency chain.\n",
1135 new_inst->readPC(), src_reg);
1136
1137 dependGraph.insert(src_reg, new_inst);
1138
1139 // Change the return value to indicate that something
1140 // was added to the dependency graph.
1141 return_val = true;
1142 } else {
1143 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1144 "became ready before it reached the IQ.\n",
1145 new_inst->readPC(), src_reg);
1146 // Mark a register ready within the instruction.
1147 new_inst->markSrcRegReady(src_reg_idx);
1148 }
1149 }
1150 }
1151
1152 return return_val;
1153 }
1154
1155 template <class Impl>
1156 void
1157 InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1158 {
1159 // Nothing really needs to be marked when an instruction becomes
1160 // the producer of a register's value, but for convenience a ptr
1161 // to the producing instruction will be placed in the head node of
1162 // the dependency links.
1163 int8_t total_dest_regs = new_inst->numDestRegs();
1164
1165 for (int dest_reg_idx = 0;
1166 dest_reg_idx < total_dest_regs;
1167 dest_reg_idx++)
1168 {
1169 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1170
1171 // Instructions that use the misc regs will have a reg number
1172 // higher than the normal physical registers. In this case these
1173 // registers are not renamed, and there is no need to track
1174 // dependencies as these instructions must be executed at commit.
1175 if (dest_reg >= numPhysRegs) {
1176 continue;
1177 }
1178
1179 if (!dependGraph.empty(dest_reg)) {
1180 dependGraph.dump();
1181 panic("Dependency graph %i not empty!", dest_reg);
1182 }
1183
1184 dependGraph.setInst(dest_reg, new_inst);
1185
1186 // Mark the scoreboard to say it's not yet ready.
1187 regScoreboard[dest_reg] = false;
1188 }
1189 }
1190
1191 template <class Impl>
1192 void
1193 InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1194 {
1195 // If the instruction now has all of its source registers
1196 // available, then add it to the list of ready instructions.
1197 if (inst->readyToIssue()) {
1198
1199 //Add the instruction to the proper ready list.
1200 if (inst->isMemRef()) {
1201
1202 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1203
1204 // Message to the mem dependence unit that this instruction has
1205 // its registers ready.
1206 memDepUnit[inst->threadNumber].regsReady(inst);
1207
1208 return;
1209 }
1210
1211 OpClass op_class = inst->opClass();
1212
1213 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1214 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1215 inst->readPC(), op_class, inst->seqNum);
1216
1217 readyInsts[op_class].push(inst);
1218
1219 // Will need to reorder the list if either a queue is not on the list,
1220 // or it has an older instruction than last time.
1221 if (!queueOnList[op_class]) {
1222 addToOrderList(op_class);
1223 } else if (readyInsts[op_class].top()->seqNum <
1224 (*readyIt[op_class]).oldestInst) {
1225 listOrder.erase(readyIt[op_class]);
1226 addToOrderList(op_class);
1227 }
1228 }
1229 }
1230
1231 template <class Impl>
1232 int
1233 InstructionQueue<Impl>::countInsts()
1234 {
1235 #if 0
1236 //ksewell:This works but definitely could use a cleaner write
1237 //with a more intuitive way of counting. Right now it's
1238 //just brute force ....
1239 // Change the #if if you want to use this method.
1240 int total_insts = 0;
1241
1242 for (int i = 0; i < numThreads; ++i) {
1243 ListIt count_it = instList[i].begin();
1244
1245 while (count_it != instList[i].end()) {
1246 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1247 if (!(*count_it)->isIssued()) {
1248 ++total_insts;
1249 } else if ((*count_it)->isMemRef() &&
1250 !(*count_it)->memOpDone) {
1251 // Loads that have not been marked as executed still count
1252 // towards the total instructions.
1253 ++total_insts;
1254 }
1255 }
1256
1257 ++count_it;
1258 }
1259 }
1260
1261 return total_insts;
1262 #else
1263 return numEntries - freeEntries;
1264 #endif
1265 }
1266
1267 template <class Impl>
1268 void
1269 InstructionQueue<Impl>::dumpLists()
1270 {
1271 for (int i = 0; i < Num_OpClasses; ++i) {
1272 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1273
1274 cprintf("\n");
1275 }
1276
1277 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1278
1279 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1280 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1281
1282 cprintf("Non speculative list: ");
1283
1284 while (non_spec_it != non_spec_end_it) {
1285 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1286 (*non_spec_it).second->seqNum);
1287 ++non_spec_it;
1288 }
1289
1290 cprintf("\n");
1291
1292 ListOrderIt list_order_it = listOrder.begin();
1293 ListOrderIt list_order_end_it = listOrder.end();
1294 int i = 1;
1295
1296 cprintf("List order: ");
1297
1298 while (list_order_it != list_order_end_it) {
1299 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1300 (*list_order_it).oldestInst);
1301
1302 ++list_order_it;
1303 ++i;
1304 }
1305
1306 cprintf("\n");
1307 }
1308
1309
1310 template <class Impl>
1311 void
1312 InstructionQueue<Impl>::dumpInsts()
1313 {
1314 for (int i = 0; i < numThreads; ++i) {
1315 int num = 0;
1316 int valid_num = 0;
1317 ListIt inst_list_it = instList[i].begin();
1318
1319 while (inst_list_it != instList[i].end())
1320 {
1321 cprintf("Instruction:%i\n",
1322 num);
1323 if (!(*inst_list_it)->isSquashed()) {
1324 if (!(*inst_list_it)->isIssued()) {
1325 ++valid_num;
1326 cprintf("Count:%i\n", valid_num);
1327 } else if ((*inst_list_it)->isMemRef() &&
1328 !(*inst_list_it)->memOpDone) {
1329 // Loads that have not been marked as executed
1330 // still count towards the total instructions.
1331 ++valid_num;
1332 cprintf("Count:%i\n", valid_num);
1333 }
1334 }
1335
1336 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1337 "Issued:%i\nSquashed:%i\n",
1338 (*inst_list_it)->readPC(),
1339 (*inst_list_it)->seqNum,
1340 (*inst_list_it)->threadNumber,
1341 (*inst_list_it)->isIssued(),
1342 (*inst_list_it)->isSquashed());
1343
1344 if ((*inst_list_it)->isMemRef()) {
1345 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1346 }
1347
1348 cprintf("\n");
1349
1350 inst_list_it++;
1351 ++num;
1352 }
1353 }
1354
1355 cprintf("Insts to Execute list:\n");
1356
1357 int num = 0;
1358 int valid_num = 0;
1359 ListIt inst_list_it = instsToExecute.begin();
1360
1361 while (inst_list_it != instsToExecute.end())
1362 {
1363 cprintf("Instruction:%i\n",
1364 num);
1365 if (!(*inst_list_it)->isSquashed()) {
1366 if (!(*inst_list_it)->isIssued()) {
1367 ++valid_num;
1368 cprintf("Count:%i\n", valid_num);
1369 } else if ((*inst_list_it)->isMemRef() &&
1370 !(*inst_list_it)->memOpDone) {
1371 // Loads that have not been marked as executed
1372 // still count towards the total instructions.
1373 ++valid_num;
1374 cprintf("Count:%i\n", valid_num);
1375 }
1376 }
1377
1378 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1379 "Issued:%i\nSquashed:%i\n",
1380 (*inst_list_it)->readPC(),
1381 (*inst_list_it)->seqNum,
1382 (*inst_list_it)->threadNumber,
1383 (*inst_list_it)->isIssued(),
1384 (*inst_list_it)->isSquashed());
1385
1386 if ((*inst_list_it)->isMemRef()) {
1387 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1388 }
1389
1390 cprintf("\n");
1391
1392 inst_list_it++;
1393 ++num;
1394 }
1395 }