Major changes to how SimObjects are created and initialized. Almost all
[gem5.git] / src / cpu / o3 / inst_queue_impl.hh
1 /*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32 #include <limits>
33 #include <vector>
34
35 #include "cpu/o3/fu_pool.hh"
36 #include "cpu/o3/inst_queue.hh"
37 #include "enums/OpClass.hh"
38 #include "sim/core.hh"
39
40 template <class Impl>
41 InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
42 int fu_idx,
43 InstructionQueue<Impl> *iq_ptr)
44 : Event(&mainEventQueue, Stat_Event_Pri),
45 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
46 {
47 this->setFlags(Event::AutoDelete);
48 }
49
50 template <class Impl>
51 void
52 InstructionQueue<Impl>::FUCompletion::process()
53 {
54 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
55 inst = NULL;
56 }
57
58
59 template <class Impl>
60 const char *
61 InstructionQueue<Impl>::FUCompletion::description()
62 {
63 return "Functional unit completion event";
64 }
65
66 template <class Impl>
67 InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr,
68 Params *params)
69 : cpu(cpu_ptr),
70 iewStage(iew_ptr),
71 fuPool(params->fuPool),
72 numEntries(params->numIQEntries),
73 totalWidth(params->issueWidth),
74 numPhysIntRegs(params->numPhysIntRegs),
75 numPhysFloatRegs(params->numPhysFloatRegs),
76 commitToIEWDelay(params->commitToIEWDelay)
77 {
78 assert(fuPool);
79
80 switchedOut = false;
81
82 numThreads = params->numberOfThreads;
83
84 // Set the number of physical registers as the number of int + float
85 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
86
87 //Create an entry for each physical register within the
88 //dependency graph.
89 dependGraph.resize(numPhysRegs);
90
91 // Resize the register scoreboard.
92 regScoreboard.resize(numPhysRegs);
93
94 //Initialize Mem Dependence Units
95 for (int i = 0; i < numThreads; i++) {
96 memDepUnit[i].init(params,i);
97 memDepUnit[i].setIQ(this);
98 }
99
100 resetState();
101
102 std::string policy = params->smtIQPolicy;
103
104 //Convert string to lowercase
105 std::transform(policy.begin(), policy.end(), policy.begin(),
106 (int(*)(int)) tolower);
107
108 //Figure out resource sharing policy
109 if (policy == "dynamic") {
110 iqPolicy = Dynamic;
111
112 //Set Max Entries to Total ROB Capacity
113 for (int i = 0; i < numThreads; i++) {
114 maxEntries[i] = numEntries;
115 }
116
117 } else if (policy == "partitioned") {
118 iqPolicy = Partitioned;
119
120 //@todo:make work if part_amt doesnt divide evenly.
121 int part_amt = numEntries / numThreads;
122
123 //Divide ROB up evenly
124 for (int i = 0; i < numThreads; i++) {
125 maxEntries[i] = part_amt;
126 }
127
128 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
129 "%i entries per thread.\n",part_amt);
130 } else if (policy == "threshold") {
131 iqPolicy = Threshold;
132
133 double threshold = (double)params->smtIQThreshold / 100;
134
135 int thresholdIQ = (int)((double)threshold * numEntries);
136
137 //Divide up by threshold amount
138 for (int i = 0; i < numThreads; i++) {
139 maxEntries[i] = thresholdIQ;
140 }
141
142 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
143 "%i entries per thread.\n",thresholdIQ);
144 } else {
145 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
146 "Partitioned, Threshold}");
147 }
148 }
149
150 template <class Impl>
151 InstructionQueue<Impl>::~InstructionQueue()
152 {
153 dependGraph.reset();
154 #ifdef DEBUG
155 cprintf("Nodes traversed: %i, removed: %i\n",
156 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
157 #endif
158 }
159
160 template <class Impl>
161 std::string
162 InstructionQueue<Impl>::name() const
163 {
164 return cpu->name() + ".iq";
165 }
166
167 template <class Impl>
168 void
169 InstructionQueue<Impl>::regStats()
170 {
171 using namespace Stats;
172 iqInstsAdded
173 .name(name() + ".iqInstsAdded")
174 .desc("Number of instructions added to the IQ (excludes non-spec)")
175 .prereq(iqInstsAdded);
176
177 iqNonSpecInstsAdded
178 .name(name() + ".iqNonSpecInstsAdded")
179 .desc("Number of non-speculative instructions added to the IQ")
180 .prereq(iqNonSpecInstsAdded);
181
182 iqInstsIssued
183 .name(name() + ".iqInstsIssued")
184 .desc("Number of instructions issued")
185 .prereq(iqInstsIssued);
186
187 iqIntInstsIssued
188 .name(name() + ".iqIntInstsIssued")
189 .desc("Number of integer instructions issued")
190 .prereq(iqIntInstsIssued);
191
192 iqFloatInstsIssued
193 .name(name() + ".iqFloatInstsIssued")
194 .desc("Number of float instructions issued")
195 .prereq(iqFloatInstsIssued);
196
197 iqBranchInstsIssued
198 .name(name() + ".iqBranchInstsIssued")
199 .desc("Number of branch instructions issued")
200 .prereq(iqBranchInstsIssued);
201
202 iqMemInstsIssued
203 .name(name() + ".iqMemInstsIssued")
204 .desc("Number of memory instructions issued")
205 .prereq(iqMemInstsIssued);
206
207 iqMiscInstsIssued
208 .name(name() + ".iqMiscInstsIssued")
209 .desc("Number of miscellaneous instructions issued")
210 .prereq(iqMiscInstsIssued);
211
212 iqSquashedInstsIssued
213 .name(name() + ".iqSquashedInstsIssued")
214 .desc("Number of squashed instructions issued")
215 .prereq(iqSquashedInstsIssued);
216
217 iqSquashedInstsExamined
218 .name(name() + ".iqSquashedInstsExamined")
219 .desc("Number of squashed instructions iterated over during squash;"
220 " mainly for profiling")
221 .prereq(iqSquashedInstsExamined);
222
223 iqSquashedOperandsExamined
224 .name(name() + ".iqSquashedOperandsExamined")
225 .desc("Number of squashed operands that are examined and possibly "
226 "removed from graph")
227 .prereq(iqSquashedOperandsExamined);
228
229 iqSquashedNonSpecRemoved
230 .name(name() + ".iqSquashedNonSpecRemoved")
231 .desc("Number of squashed non-spec instructions that were removed")
232 .prereq(iqSquashedNonSpecRemoved);
233 /*
234 queueResDist
235 .init(Num_OpClasses, 0, 99, 2)
236 .name(name() + ".IQ:residence:")
237 .desc("cycles from dispatch to issue")
238 .flags(total | pdf | cdf )
239 ;
240 for (int i = 0; i < Num_OpClasses; ++i) {
241 queueResDist.subname(i, opClassStrings[i]);
242 }
243 */
244 numIssuedDist
245 .init(0,totalWidth,1)
246 .name(name() + ".ISSUE:issued_per_cycle")
247 .desc("Number of insts issued each cycle")
248 .flags(pdf)
249 ;
250 /*
251 dist_unissued
252 .init(Num_OpClasses+2)
253 .name(name() + ".ISSUE:unissued_cause")
254 .desc("Reason ready instruction not issued")
255 .flags(pdf | dist)
256 ;
257 for (int i=0; i < (Num_OpClasses + 2); ++i) {
258 dist_unissued.subname(i, unissued_names[i]);
259 }
260 */
261 statIssuedInstType
262 .init(numThreads,Enums::Num_OpClass)
263 .name(name() + ".ISSUE:FU_type")
264 .desc("Type of FU issued")
265 .flags(total | pdf | dist)
266 ;
267 statIssuedInstType.ysubnames(Enums::OpClassStrings);
268
269 //
270 // How long did instructions for a particular FU type wait prior to issue
271 //
272 /*
273 issueDelayDist
274 .init(Num_OpClasses,0,99,2)
275 .name(name() + ".ISSUE:")
276 .desc("cycles from operands ready to issue")
277 .flags(pdf | cdf)
278 ;
279
280 for (int i=0; i<Num_OpClasses; ++i) {
281 std::stringstream subname;
282 subname << opClassStrings[i] << "_delay";
283 issueDelayDist.subname(i, subname.str());
284 }
285 */
286 issueRate
287 .name(name() + ".ISSUE:rate")
288 .desc("Inst issue rate")
289 .flags(total)
290 ;
291 issueRate = iqInstsIssued / cpu->numCycles;
292
293 statFuBusy
294 .init(Num_OpClasses)
295 .name(name() + ".ISSUE:fu_full")
296 .desc("attempts to use FU when none available")
297 .flags(pdf | dist)
298 ;
299 for (int i=0; i < Num_OpClasses; ++i) {
300 statFuBusy.subname(i, Enums::OpClassStrings[i]);
301 }
302
303 fuBusy
304 .init(numThreads)
305 .name(name() + ".ISSUE:fu_busy_cnt")
306 .desc("FU busy when requested")
307 .flags(total)
308 ;
309
310 fuBusyRate
311 .name(name() + ".ISSUE:fu_busy_rate")
312 .desc("FU busy rate (busy events/executed inst)")
313 .flags(total)
314 ;
315 fuBusyRate = fuBusy / iqInstsIssued;
316
317 for ( int i=0; i < numThreads; i++) {
318 // Tell mem dependence unit to reg stats as well.
319 memDepUnit[i].regStats();
320 }
321 }
322
323 template <class Impl>
324 void
325 InstructionQueue<Impl>::resetState()
326 {
327 //Initialize thread IQ counts
328 for (int i = 0; i <numThreads; i++) {
329 count[i] = 0;
330 instList[i].clear();
331 }
332
333 // Initialize the number of free IQ entries.
334 freeEntries = numEntries;
335
336 // Note that in actuality, the registers corresponding to the logical
337 // registers start off as ready. However this doesn't matter for the
338 // IQ as the instruction should have been correctly told if those
339 // registers are ready in rename. Thus it can all be initialized as
340 // unready.
341 for (int i = 0; i < numPhysRegs; ++i) {
342 regScoreboard[i] = false;
343 }
344
345 for (int i = 0; i < numThreads; ++i) {
346 squashedSeqNum[i] = 0;
347 }
348
349 for (int i = 0; i < Num_OpClasses; ++i) {
350 while (!readyInsts[i].empty())
351 readyInsts[i].pop();
352 queueOnList[i] = false;
353 readyIt[i] = listOrder.end();
354 }
355 nonSpecInsts.clear();
356 listOrder.clear();
357 }
358
359 template <class Impl>
360 void
361 InstructionQueue<Impl>::setActiveThreads(std::list<unsigned> *at_ptr)
362 {
363 activeThreads = at_ptr;
364 }
365
366 template <class Impl>
367 void
368 InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
369 {
370 issueToExecuteQueue = i2e_ptr;
371 }
372
373 template <class Impl>
374 void
375 InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
376 {
377 timeBuffer = tb_ptr;
378
379 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
380 }
381
382 template <class Impl>
383 void
384 InstructionQueue<Impl>::switchOut()
385 {
386 /*
387 if (!instList[0].empty() || (numEntries != freeEntries) ||
388 !readyInsts[0].empty() || !nonSpecInsts.empty() || !listOrder.empty()) {
389 dumpInsts();
390 // assert(0);
391 }
392 */
393 resetState();
394 dependGraph.reset();
395 instsToExecute.clear();
396 switchedOut = true;
397 for (int i = 0; i < numThreads; ++i) {
398 memDepUnit[i].switchOut();
399 }
400 }
401
402 template <class Impl>
403 void
404 InstructionQueue<Impl>::takeOverFrom()
405 {
406 switchedOut = false;
407 }
408
409 template <class Impl>
410 int
411 InstructionQueue<Impl>::entryAmount(int num_threads)
412 {
413 if (iqPolicy == Partitioned) {
414 return numEntries / num_threads;
415 } else {
416 return 0;
417 }
418 }
419
420
421 template <class Impl>
422 void
423 InstructionQueue<Impl>::resetEntries()
424 {
425 if (iqPolicy != Dynamic || numThreads > 1) {
426 int active_threads = activeThreads->size();
427
428 std::list<unsigned>::iterator threads = activeThreads->begin();
429 std::list<unsigned>::iterator end = activeThreads->end();
430
431 while (threads != end) {
432 unsigned tid = *threads++;
433
434 if (iqPolicy == Partitioned) {
435 maxEntries[tid] = numEntries / active_threads;
436 } else if(iqPolicy == Threshold && active_threads == 1) {
437 maxEntries[tid] = numEntries;
438 }
439 }
440 }
441 }
442
443 template <class Impl>
444 unsigned
445 InstructionQueue<Impl>::numFreeEntries()
446 {
447 return freeEntries;
448 }
449
450 template <class Impl>
451 unsigned
452 InstructionQueue<Impl>::numFreeEntries(unsigned tid)
453 {
454 return maxEntries[tid] - count[tid];
455 }
456
457 // Might want to do something more complex if it knows how many instructions
458 // will be issued this cycle.
459 template <class Impl>
460 bool
461 InstructionQueue<Impl>::isFull()
462 {
463 if (freeEntries == 0) {
464 return(true);
465 } else {
466 return(false);
467 }
468 }
469
470 template <class Impl>
471 bool
472 InstructionQueue<Impl>::isFull(unsigned tid)
473 {
474 if (numFreeEntries(tid) == 0) {
475 return(true);
476 } else {
477 return(false);
478 }
479 }
480
481 template <class Impl>
482 bool
483 InstructionQueue<Impl>::hasReadyInsts()
484 {
485 if (!listOrder.empty()) {
486 return true;
487 }
488
489 for (int i = 0; i < Num_OpClasses; ++i) {
490 if (!readyInsts[i].empty()) {
491 return true;
492 }
493 }
494
495 return false;
496 }
497
498 template <class Impl>
499 void
500 InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
501 {
502 // Make sure the instruction is valid
503 assert(new_inst);
504
505 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
506 new_inst->seqNum, new_inst->readPC());
507
508 assert(freeEntries != 0);
509
510 instList[new_inst->threadNumber].push_back(new_inst);
511
512 --freeEntries;
513
514 new_inst->setInIQ();
515
516 // Look through its source registers (physical regs), and mark any
517 // dependencies.
518 addToDependents(new_inst);
519
520 // Have this instruction set itself as the producer of its destination
521 // register(s).
522 addToProducers(new_inst);
523
524 if (new_inst->isMemRef()) {
525 memDepUnit[new_inst->threadNumber].insert(new_inst);
526 } else {
527 addIfReady(new_inst);
528 }
529
530 ++iqInstsAdded;
531
532 count[new_inst->threadNumber]++;
533
534 assert(freeEntries == (numEntries - countInsts()));
535 }
536
537 template <class Impl>
538 void
539 InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
540 {
541 // @todo: Clean up this code; can do it by setting inst as unable
542 // to issue, then calling normal insert on the inst.
543
544 assert(new_inst);
545
546 nonSpecInsts[new_inst->seqNum] = new_inst;
547
548 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
549 "to the IQ.\n",
550 new_inst->seqNum, new_inst->readPC());
551
552 assert(freeEntries != 0);
553
554 instList[new_inst->threadNumber].push_back(new_inst);
555
556 --freeEntries;
557
558 new_inst->setInIQ();
559
560 // Have this instruction set itself as the producer of its destination
561 // register(s).
562 addToProducers(new_inst);
563
564 // If it's a memory instruction, add it to the memory dependency
565 // unit.
566 if (new_inst->isMemRef()) {
567 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
568 }
569
570 ++iqNonSpecInstsAdded;
571
572 count[new_inst->threadNumber]++;
573
574 assert(freeEntries == (numEntries - countInsts()));
575 }
576
577 template <class Impl>
578 void
579 InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
580 {
581 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
582
583 insertNonSpec(barr_inst);
584 }
585
586 template <class Impl>
587 typename Impl::DynInstPtr
588 InstructionQueue<Impl>::getInstToExecute()
589 {
590 assert(!instsToExecute.empty());
591 DynInstPtr inst = instsToExecute.front();
592 instsToExecute.pop_front();
593 return inst;
594 }
595
596 template <class Impl>
597 void
598 InstructionQueue<Impl>::addToOrderList(OpClass op_class)
599 {
600 assert(!readyInsts[op_class].empty());
601
602 ListOrderEntry queue_entry;
603
604 queue_entry.queueType = op_class;
605
606 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
607
608 ListOrderIt list_it = listOrder.begin();
609 ListOrderIt list_end_it = listOrder.end();
610
611 while (list_it != list_end_it) {
612 if ((*list_it).oldestInst > queue_entry.oldestInst) {
613 break;
614 }
615
616 list_it++;
617 }
618
619 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
620 queueOnList[op_class] = true;
621 }
622
623 template <class Impl>
624 void
625 InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
626 {
627 // Get iterator of next item on the list
628 // Delete the original iterator
629 // Determine if the next item is either the end of the list or younger
630 // than the new instruction. If so, then add in a new iterator right here.
631 // If not, then move along.
632 ListOrderEntry queue_entry;
633 OpClass op_class = (*list_order_it).queueType;
634 ListOrderIt next_it = list_order_it;
635
636 ++next_it;
637
638 queue_entry.queueType = op_class;
639 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
640
641 while (next_it != listOrder.end() &&
642 (*next_it).oldestInst < queue_entry.oldestInst) {
643 ++next_it;
644 }
645
646 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
647 }
648
649 template <class Impl>
650 void
651 InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
652 {
653 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
654 // The CPU could have been sleeping until this op completed (*extremely*
655 // long latency op). Wake it if it was. This may be overkill.
656 if (isSwitchedOut()) {
657 DPRINTF(IQ, "FU completion not processed, IQ is switched out [sn:%lli]\n",
658 inst->seqNum);
659 return;
660 }
661
662 iewStage->wakeCPU();
663
664 if (fu_idx > -1)
665 fuPool->freeUnitNextCycle(fu_idx);
666
667 // @todo: Ensure that these FU Completions happen at the beginning
668 // of a cycle, otherwise they could add too many instructions to
669 // the queue.
670 issueToExecuteQueue->access(0)->size++;
671 instsToExecute.push_back(inst);
672 }
673
674 // @todo: Figure out a better way to remove the squashed items from the
675 // lists. Checking the top item of each list to see if it's squashed
676 // wastes time and forces jumps.
677 template <class Impl>
678 void
679 InstructionQueue<Impl>::scheduleReadyInsts()
680 {
681 DPRINTF(IQ, "Attempting to schedule ready instructions from "
682 "the IQ.\n");
683
684 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
685
686 // Have iterator to head of the list
687 // While I haven't exceeded bandwidth or reached the end of the list,
688 // Try to get a FU that can do what this op needs.
689 // If successful, change the oldestInst to the new top of the list, put
690 // the queue in the proper place in the list.
691 // Increment the iterator.
692 // This will avoid trying to schedule a certain op class if there are no
693 // FUs that handle it.
694 ListOrderIt order_it = listOrder.begin();
695 ListOrderIt order_end_it = listOrder.end();
696 int total_issued = 0;
697
698 while (total_issued < totalWidth &&
699 iewStage->canIssue() &&
700 order_it != order_end_it) {
701 OpClass op_class = (*order_it).queueType;
702
703 assert(!readyInsts[op_class].empty());
704
705 DynInstPtr issuing_inst = readyInsts[op_class].top();
706
707 assert(issuing_inst->seqNum == (*order_it).oldestInst);
708
709 if (issuing_inst->isSquashed()) {
710 readyInsts[op_class].pop();
711
712 if (!readyInsts[op_class].empty()) {
713 moveToYoungerInst(order_it);
714 } else {
715 readyIt[op_class] = listOrder.end();
716 queueOnList[op_class] = false;
717 }
718
719 listOrder.erase(order_it++);
720
721 ++iqSquashedInstsIssued;
722
723 continue;
724 }
725
726 int idx = -2;
727 int op_latency = 1;
728 int tid = issuing_inst->threadNumber;
729
730 if (op_class != No_OpClass) {
731 idx = fuPool->getUnit(op_class);
732
733 if (idx > -1) {
734 op_latency = fuPool->getOpLatency(op_class);
735 }
736 }
737
738 // If we have an instruction that doesn't require a FU, or a
739 // valid FU, then schedule for execution.
740 if (idx == -2 || idx != -1) {
741 if (op_latency == 1) {
742 i2e_info->size++;
743 instsToExecute.push_back(issuing_inst);
744
745 // Add the FU onto the list of FU's to be freed next
746 // cycle if we used one.
747 if (idx >= 0)
748 fuPool->freeUnitNextCycle(idx);
749 } else {
750 int issue_latency = fuPool->getIssueLatency(op_class);
751 // Generate completion event for the FU
752 FUCompletion *execution = new FUCompletion(issuing_inst,
753 idx, this);
754
755 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
756
757 // @todo: Enforce that issue_latency == 1 or op_latency
758 if (issue_latency > 1) {
759 // If FU isn't pipelined, then it must be freed
760 // upon the execution completing.
761 execution->setFreeFU();
762 } else {
763 // Add the FU onto the list of FU's to be freed next cycle.
764 fuPool->freeUnitNextCycle(idx);
765 }
766 }
767
768 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
769 "[sn:%lli]\n",
770 tid, issuing_inst->readPC(),
771 issuing_inst->seqNum);
772
773 readyInsts[op_class].pop();
774
775 if (!readyInsts[op_class].empty()) {
776 moveToYoungerInst(order_it);
777 } else {
778 readyIt[op_class] = listOrder.end();
779 queueOnList[op_class] = false;
780 }
781
782 issuing_inst->setIssued();
783 ++total_issued;
784
785 if (!issuing_inst->isMemRef()) {
786 // Memory instructions can not be freed from the IQ until they
787 // complete.
788 ++freeEntries;
789 count[tid]--;
790 issuing_inst->clearInIQ();
791 } else {
792 memDepUnit[tid].issue(issuing_inst);
793 }
794
795 listOrder.erase(order_it++);
796 statIssuedInstType[tid][op_class]++;
797 iewStage->incrWb(issuing_inst->seqNum);
798 } else {
799 statFuBusy[op_class]++;
800 fuBusy[tid]++;
801 ++order_it;
802 }
803 }
804
805 numIssuedDist.sample(total_issued);
806 iqInstsIssued+= total_issued;
807
808 // If we issued any instructions, tell the CPU we had activity.
809 if (total_issued) {
810 cpu->activityThisCycle();
811 } else {
812 DPRINTF(IQ, "Not able to schedule any instructions.\n");
813 }
814 }
815
816 template <class Impl>
817 void
818 InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
819 {
820 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
821 "to execute.\n", inst);
822
823 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
824
825 assert(inst_it != nonSpecInsts.end());
826
827 unsigned tid = (*inst_it).second->threadNumber;
828
829 (*inst_it).second->setAtCommit();
830
831 (*inst_it).second->setCanIssue();
832
833 if (!(*inst_it).second->isMemRef()) {
834 addIfReady((*inst_it).second);
835 } else {
836 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
837 }
838
839 (*inst_it).second = NULL;
840
841 nonSpecInsts.erase(inst_it);
842 }
843
844 template <class Impl>
845 void
846 InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
847 {
848 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
849 tid,inst);
850
851 ListIt iq_it = instList[tid].begin();
852
853 while (iq_it != instList[tid].end() &&
854 (*iq_it)->seqNum <= inst) {
855 ++iq_it;
856 instList[tid].pop_front();
857 }
858
859 assert(freeEntries == (numEntries - countInsts()));
860 }
861
862 template <class Impl>
863 int
864 InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
865 {
866 int dependents = 0;
867
868 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
869
870 assert(!completed_inst->isSquashed());
871
872 // Tell the memory dependence unit to wake any dependents on this
873 // instruction if it is a memory instruction. Also complete the memory
874 // instruction at this point since we know it executed without issues.
875 // @todo: Might want to rename "completeMemInst" to something that
876 // indicates that it won't need to be replayed, and call this
877 // earlier. Might not be a big deal.
878 if (completed_inst->isMemRef()) {
879 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
880 completeMemInst(completed_inst);
881 } else if (completed_inst->isMemBarrier() ||
882 completed_inst->isWriteBarrier()) {
883 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
884 }
885
886 for (int dest_reg_idx = 0;
887 dest_reg_idx < completed_inst->numDestRegs();
888 dest_reg_idx++)
889 {
890 PhysRegIndex dest_reg =
891 completed_inst->renamedDestRegIdx(dest_reg_idx);
892
893 // Special case of uniq or control registers. They are not
894 // handled by the IQ and thus have no dependency graph entry.
895 // @todo Figure out a cleaner way to handle this.
896 if (dest_reg >= numPhysRegs) {
897 continue;
898 }
899
900 DPRINTF(IQ, "Waking any dependents on register %i.\n",
901 (int) dest_reg);
902
903 //Go through the dependency chain, marking the registers as
904 //ready within the waiting instructions.
905 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
906
907 while (dep_inst) {
908 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
909 dep_inst->readPC());
910
911 // Might want to give more information to the instruction
912 // so that it knows which of its source registers is
913 // ready. However that would mean that the dependency
914 // graph entries would need to hold the src_reg_idx.
915 dep_inst->markSrcRegReady();
916
917 addIfReady(dep_inst);
918
919 dep_inst = dependGraph.pop(dest_reg);
920
921 ++dependents;
922 }
923
924 // Reset the head node now that all of its dependents have
925 // been woken up.
926 assert(dependGraph.empty(dest_reg));
927 dependGraph.clearInst(dest_reg);
928
929 // Mark the scoreboard as having that register ready.
930 regScoreboard[dest_reg] = true;
931 }
932 return dependents;
933 }
934
935 template <class Impl>
936 void
937 InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
938 {
939 OpClass op_class = ready_inst->opClass();
940
941 readyInsts[op_class].push(ready_inst);
942
943 // Will need to reorder the list if either a queue is not on the list,
944 // or it has an older instruction than last time.
945 if (!queueOnList[op_class]) {
946 addToOrderList(op_class);
947 } else if (readyInsts[op_class].top()->seqNum <
948 (*readyIt[op_class]).oldestInst) {
949 listOrder.erase(readyIt[op_class]);
950 addToOrderList(op_class);
951 }
952
953 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
954 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
955 ready_inst->readPC(), op_class, ready_inst->seqNum);
956 }
957
958 template <class Impl>
959 void
960 InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
961 {
962 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
963 resched_inst->clearCanIssue();
964 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
965 }
966
967 template <class Impl>
968 void
969 InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
970 {
971 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
972 }
973
974 template <class Impl>
975 void
976 InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
977 {
978 int tid = completed_inst->threadNumber;
979
980 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
981 completed_inst->readPC(), completed_inst->seqNum);
982
983 ++freeEntries;
984
985 completed_inst->memOpDone = true;
986
987 memDepUnit[tid].completed(completed_inst);
988 count[tid]--;
989 }
990
991 template <class Impl>
992 void
993 InstructionQueue<Impl>::violation(DynInstPtr &store,
994 DynInstPtr &faulting_load)
995 {
996 memDepUnit[store->threadNumber].violation(store, faulting_load);
997 }
998
999 template <class Impl>
1000 void
1001 InstructionQueue<Impl>::squash(unsigned tid)
1002 {
1003 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1004 "the IQ.\n", tid);
1005
1006 // Read instruction sequence number of last instruction out of the
1007 // time buffer.
1008 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1009
1010 // Call doSquash if there are insts in the IQ
1011 if (count[tid] > 0) {
1012 doSquash(tid);
1013 }
1014
1015 // Also tell the memory dependence unit to squash.
1016 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1017 }
1018
1019 template <class Impl>
1020 void
1021 InstructionQueue<Impl>::doSquash(unsigned tid)
1022 {
1023 // Start at the tail.
1024 ListIt squash_it = instList[tid].end();
1025 --squash_it;
1026
1027 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1028 tid, squashedSeqNum[tid]);
1029
1030 // Squash any instructions younger than the squashed sequence number
1031 // given.
1032 while (squash_it != instList[tid].end() &&
1033 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1034
1035 DynInstPtr squashed_inst = (*squash_it);
1036
1037 // Only handle the instruction if it actually is in the IQ and
1038 // hasn't already been squashed in the IQ.
1039 if (squashed_inst->threadNumber != tid ||
1040 squashed_inst->isSquashedInIQ()) {
1041 --squash_it;
1042 continue;
1043 }
1044
1045 if (!squashed_inst->isIssued() ||
1046 (squashed_inst->isMemRef() &&
1047 !squashed_inst->memOpDone)) {
1048
1049 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1050 "squashed.\n",
1051 tid, squashed_inst->seqNum, squashed_inst->readPC());
1052
1053 // Remove the instruction from the dependency list.
1054 if (!squashed_inst->isNonSpeculative() &&
1055 !squashed_inst->isStoreConditional() &&
1056 !squashed_inst->isMemBarrier() &&
1057 !squashed_inst->isWriteBarrier()) {
1058
1059 for (int src_reg_idx = 0;
1060 src_reg_idx < squashed_inst->numSrcRegs();
1061 src_reg_idx++)
1062 {
1063 PhysRegIndex src_reg =
1064 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1065
1066 // Only remove it from the dependency graph if it
1067 // was placed there in the first place.
1068
1069 // Instead of doing a linked list traversal, we
1070 // can just remove these squashed instructions
1071 // either at issue time, or when the register is
1072 // overwritten. The only downside to this is it
1073 // leaves more room for error.
1074
1075 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1076 src_reg < numPhysRegs) {
1077 dependGraph.remove(src_reg, squashed_inst);
1078 }
1079
1080
1081 ++iqSquashedOperandsExamined;
1082 }
1083 } else if (!squashed_inst->isStoreConditional() ||
1084 !squashed_inst->isCompleted()) {
1085 NonSpecMapIt ns_inst_it =
1086 nonSpecInsts.find(squashed_inst->seqNum);
1087 assert(ns_inst_it != nonSpecInsts.end());
1088 if (ns_inst_it == nonSpecInsts.end()) {
1089 assert(squashed_inst->getFault() != NoFault);
1090 } else {
1091
1092 (*ns_inst_it).second = NULL;
1093
1094 nonSpecInsts.erase(ns_inst_it);
1095
1096 ++iqSquashedNonSpecRemoved;
1097 }
1098 }
1099
1100 // Might want to also clear out the head of the dependency graph.
1101
1102 // Mark it as squashed within the IQ.
1103 squashed_inst->setSquashedInIQ();
1104
1105 // @todo: Remove this hack where several statuses are set so the
1106 // inst will flow through the rest of the pipeline.
1107 squashed_inst->setIssued();
1108 squashed_inst->setCanCommit();
1109 squashed_inst->clearInIQ();
1110
1111 //Update Thread IQ Count
1112 count[squashed_inst->threadNumber]--;
1113
1114 ++freeEntries;
1115 }
1116
1117 instList[tid].erase(squash_it--);
1118 ++iqSquashedInstsExamined;
1119 }
1120 }
1121
1122 template <class Impl>
1123 bool
1124 InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1125 {
1126 // Loop through the instruction's source registers, adding
1127 // them to the dependency list if they are not ready.
1128 int8_t total_src_regs = new_inst->numSrcRegs();
1129 bool return_val = false;
1130
1131 for (int src_reg_idx = 0;
1132 src_reg_idx < total_src_regs;
1133 src_reg_idx++)
1134 {
1135 // Only add it to the dependency graph if it's not ready.
1136 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1137 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1138
1139 // Check the IQ's scoreboard to make sure the register
1140 // hasn't become ready while the instruction was in flight
1141 // between stages. Only if it really isn't ready should
1142 // it be added to the dependency graph.
1143 if (src_reg >= numPhysRegs) {
1144 continue;
1145 } else if (regScoreboard[src_reg] == false) {
1146 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1147 "is being added to the dependency chain.\n",
1148 new_inst->readPC(), src_reg);
1149
1150 dependGraph.insert(src_reg, new_inst);
1151
1152 // Change the return value to indicate that something
1153 // was added to the dependency graph.
1154 return_val = true;
1155 } else {
1156 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1157 "became ready before it reached the IQ.\n",
1158 new_inst->readPC(), src_reg);
1159 // Mark a register ready within the instruction.
1160 new_inst->markSrcRegReady(src_reg_idx);
1161 }
1162 }
1163 }
1164
1165 return return_val;
1166 }
1167
1168 template <class Impl>
1169 void
1170 InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1171 {
1172 // Nothing really needs to be marked when an instruction becomes
1173 // the producer of a register's value, but for convenience a ptr
1174 // to the producing instruction will be placed in the head node of
1175 // the dependency links.
1176 int8_t total_dest_regs = new_inst->numDestRegs();
1177
1178 for (int dest_reg_idx = 0;
1179 dest_reg_idx < total_dest_regs;
1180 dest_reg_idx++)
1181 {
1182 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1183
1184 // Instructions that use the misc regs will have a reg number
1185 // higher than the normal physical registers. In this case these
1186 // registers are not renamed, and there is no need to track
1187 // dependencies as these instructions must be executed at commit.
1188 if (dest_reg >= numPhysRegs) {
1189 continue;
1190 }
1191
1192 if (!dependGraph.empty(dest_reg)) {
1193 dependGraph.dump();
1194 panic("Dependency graph %i not empty!", dest_reg);
1195 }
1196
1197 dependGraph.setInst(dest_reg, new_inst);
1198
1199 // Mark the scoreboard to say it's not yet ready.
1200 regScoreboard[dest_reg] = false;
1201 }
1202 }
1203
1204 template <class Impl>
1205 void
1206 InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1207 {
1208 // If the instruction now has all of its source registers
1209 // available, then add it to the list of ready instructions.
1210 if (inst->readyToIssue()) {
1211
1212 //Add the instruction to the proper ready list.
1213 if (inst->isMemRef()) {
1214
1215 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1216
1217 // Message to the mem dependence unit that this instruction has
1218 // its registers ready.
1219 memDepUnit[inst->threadNumber].regsReady(inst);
1220
1221 return;
1222 }
1223
1224 OpClass op_class = inst->opClass();
1225
1226 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1227 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1228 inst->readPC(), op_class, inst->seqNum);
1229
1230 readyInsts[op_class].push(inst);
1231
1232 // Will need to reorder the list if either a queue is not on the list,
1233 // or it has an older instruction than last time.
1234 if (!queueOnList[op_class]) {
1235 addToOrderList(op_class);
1236 } else if (readyInsts[op_class].top()->seqNum <
1237 (*readyIt[op_class]).oldestInst) {
1238 listOrder.erase(readyIt[op_class]);
1239 addToOrderList(op_class);
1240 }
1241 }
1242 }
1243
1244 template <class Impl>
1245 int
1246 InstructionQueue<Impl>::countInsts()
1247 {
1248 #if 0
1249 //ksewell:This works but definitely could use a cleaner write
1250 //with a more intuitive way of counting. Right now it's
1251 //just brute force ....
1252 // Change the #if if you want to use this method.
1253 int total_insts = 0;
1254
1255 for (int i = 0; i < numThreads; ++i) {
1256 ListIt count_it = instList[i].begin();
1257
1258 while (count_it != instList[i].end()) {
1259 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1260 if (!(*count_it)->isIssued()) {
1261 ++total_insts;
1262 } else if ((*count_it)->isMemRef() &&
1263 !(*count_it)->memOpDone) {
1264 // Loads that have not been marked as executed still count
1265 // towards the total instructions.
1266 ++total_insts;
1267 }
1268 }
1269
1270 ++count_it;
1271 }
1272 }
1273
1274 return total_insts;
1275 #else
1276 return numEntries - freeEntries;
1277 #endif
1278 }
1279
1280 template <class Impl>
1281 void
1282 InstructionQueue<Impl>::dumpLists()
1283 {
1284 for (int i = 0; i < Num_OpClasses; ++i) {
1285 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1286
1287 cprintf("\n");
1288 }
1289
1290 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1291
1292 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1293 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1294
1295 cprintf("Non speculative list: ");
1296
1297 while (non_spec_it != non_spec_end_it) {
1298 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1299 (*non_spec_it).second->seqNum);
1300 ++non_spec_it;
1301 }
1302
1303 cprintf("\n");
1304
1305 ListOrderIt list_order_it = listOrder.begin();
1306 ListOrderIt list_order_end_it = listOrder.end();
1307 int i = 1;
1308
1309 cprintf("List order: ");
1310
1311 while (list_order_it != list_order_end_it) {
1312 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1313 (*list_order_it).oldestInst);
1314
1315 ++list_order_it;
1316 ++i;
1317 }
1318
1319 cprintf("\n");
1320 }
1321
1322
1323 template <class Impl>
1324 void
1325 InstructionQueue<Impl>::dumpInsts()
1326 {
1327 for (int i = 0; i < numThreads; ++i) {
1328 int num = 0;
1329 int valid_num = 0;
1330 ListIt inst_list_it = instList[i].begin();
1331
1332 while (inst_list_it != instList[i].end())
1333 {
1334 cprintf("Instruction:%i\n",
1335 num);
1336 if (!(*inst_list_it)->isSquashed()) {
1337 if (!(*inst_list_it)->isIssued()) {
1338 ++valid_num;
1339 cprintf("Count:%i\n", valid_num);
1340 } else if ((*inst_list_it)->isMemRef() &&
1341 !(*inst_list_it)->memOpDone) {
1342 // Loads that have not been marked as executed
1343 // still count towards the total instructions.
1344 ++valid_num;
1345 cprintf("Count:%i\n", valid_num);
1346 }
1347 }
1348
1349 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1350 "Issued:%i\nSquashed:%i\n",
1351 (*inst_list_it)->readPC(),
1352 (*inst_list_it)->seqNum,
1353 (*inst_list_it)->threadNumber,
1354 (*inst_list_it)->isIssued(),
1355 (*inst_list_it)->isSquashed());
1356
1357 if ((*inst_list_it)->isMemRef()) {
1358 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1359 }
1360
1361 cprintf("\n");
1362
1363 inst_list_it++;
1364 ++num;
1365 }
1366 }
1367
1368 cprintf("Insts to Execute list:\n");
1369
1370 int num = 0;
1371 int valid_num = 0;
1372 ListIt inst_list_it = instsToExecute.begin();
1373
1374 while (inst_list_it != instsToExecute.end())
1375 {
1376 cprintf("Instruction:%i\n",
1377 num);
1378 if (!(*inst_list_it)->isSquashed()) {
1379 if (!(*inst_list_it)->isIssued()) {
1380 ++valid_num;
1381 cprintf("Count:%i\n", valid_num);
1382 } else if ((*inst_list_it)->isMemRef() &&
1383 !(*inst_list_it)->memOpDone) {
1384 // Loads that have not been marked as executed
1385 // still count towards the total instructions.
1386 ++valid_num;
1387 cprintf("Count:%i\n", valid_num);
1388 }
1389 }
1390
1391 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1392 "Issued:%i\nSquashed:%i\n",
1393 (*inst_list_it)->readPC(),
1394 (*inst_list_it)->seqNum,
1395 (*inst_list_it)->threadNumber,
1396 (*inst_list_it)->isIssued(),
1397 (*inst_list_it)->isSquashed());
1398
1399 if ((*inst_list_it)->isMemRef()) {
1400 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1401 }
1402
1403 cprintf("\n");
1404
1405 inst_list_it++;
1406 ++num;
1407 }
1408 }