Updates for OzoneCPU.
[gem5.git] / cpu / ozone / inst_queue_impl.hh
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 // Todo:
30 // Current ordering allows for 0 cycle added-to-scheduled. Could maybe fake
31 // it; either do in reverse order, or have added instructions put into a
32 // different ready queue that, in scheduleRreadyInsts(), gets put onto the
33 // normal ready queue. This would however give only a one cycle delay,
34 // but probably is more flexible to actually add in a delay parameter than
35 // just running it backwards.
36
37 #include <vector>
38
39 #include "sim/root.hh"
40
41 #include "cpu/ozone/inst_queue.hh"
42 #if 0
43 template <class Impl>
44 InstQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
45 int fu_idx,
46 InstQueue<Impl> *iq_ptr)
47 : Event(&mainEventQueue, Stat_Event_Pri),
48 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr)
49 {
50 this->setFlags(Event::AutoDelete);
51 }
52
53 template <class Impl>
54 void
55 InstQueue<Impl>::FUCompletion::process()
56 {
57 iqPtr->processFUCompletion(inst, fuIdx);
58 }
59
60
61 template <class Impl>
62 const char *
63 InstQueue<Impl>::FUCompletion::description()
64 {
65 return "Functional unit completion event";
66 }
67 #endif
68 template <class Impl>
69 InstQueue<Impl>::InstQueue(Params *params)
70 : dcacheInterface(params->dcacheInterface),
71 // fuPool(params->fuPool),
72 numEntries(params->numIQEntries),
73 totalWidth(params->issueWidth),
74 // numPhysIntRegs(params->numPhysIntRegs),
75 // numPhysFloatRegs(params->numPhysFloatRegs),
76 commitToIEWDelay(params->commitToIEWDelay)
77 {
78 // assert(fuPool);
79
80 // numThreads = params->numberOfThreads;
81 numThreads = 1;
82
83 //Initialize thread IQ counts
84 for (int i = 0; i <numThreads; i++) {
85 count[i] = 0;
86 }
87
88 // Initialize the number of free IQ entries.
89 freeEntries = numEntries;
90
91 // Set the number of physical registers as the number of int + float
92 // numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
93
94 // DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
95
96 //Create an entry for each physical register within the
97 //dependency graph.
98 // dependGraph = new DependencyEntry[numPhysRegs];
99
100 // Resize the register scoreboard.
101 // regScoreboard.resize(numPhysRegs);
102 /*
103 //Initialize Mem Dependence Units
104 for (int i = 0; i < numThreads; i++) {
105 memDepUnit[i].init(params,i);
106 memDepUnit[i].setIQ(this);
107 }
108
109 // Initialize all the head pointers to point to NULL, and all the
110 // entries as unready.
111 // Note that in actuality, the registers corresponding to the logical
112 // registers start off as ready. However this doesn't matter for the
113 // IQ as the instruction should have been correctly told if those
114 // registers are ready in rename. Thus it can all be initialized as
115 // unready.
116 for (int i = 0; i < numPhysRegs; ++i) {
117 dependGraph[i].next = NULL;
118 dependGraph[i].inst = NULL;
119 regScoreboard[i] = false;
120 }
121 */
122 for (int i = 0; i < numThreads; ++i) {
123 squashedSeqNum[i] = 0;
124 }
125 /*
126 for (int i = 0; i < Num_OpClasses; ++i) {
127 queueOnList[i] = false;
128 readyIt[i] = listOrder.end();
129 }
130
131 string policy = params->smtIQPolicy;
132
133 //Convert string to lowercase
134 std::transform(policy.begin(), policy.end(), policy.begin(),
135 (int(*)(int)) tolower);
136
137 //Figure out resource sharing policy
138 if (policy == "dynamic") {
139 iqPolicy = Dynamic;
140
141 //Set Max Entries to Total ROB Capacity
142 for (int i = 0; i < numThreads; i++) {
143 maxEntries[i] = numEntries;
144 }
145
146 } else if (policy == "partitioned") {
147 iqPolicy = Partitioned;
148
149 //@todo:make work if part_amt doesnt divide evenly.
150 int part_amt = numEntries / numThreads;
151
152 //Divide ROB up evenly
153 for (int i = 0; i < numThreads; i++) {
154 maxEntries[i] = part_amt;
155 }
156
157 DPRINTF(Fetch, "IQ sharing policy set to Partitioned:"
158 "%i entries per thread.\n",part_amt);
159
160 } else if (policy == "threshold") {
161 iqPolicy = Threshold;
162
163 double threshold = (double)params->smtIQThreshold / 100;
164
165 int thresholdIQ = (int)((double)threshold * numEntries);
166
167 //Divide up by threshold amount
168 for (int i = 0; i < numThreads; i++) {
169 maxEntries[i] = thresholdIQ;
170 }
171
172 DPRINTF(Fetch, "IQ sharing policy set to Threshold:"
173 "%i entries per thread.\n",thresholdIQ);
174 } else {
175 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
176 "Partitioned, Threshold}");
177 }
178 */
179 }
180
181 template <class Impl>
182 InstQueue<Impl>::~InstQueue()
183 {
184 // Clear the dependency graph
185 /*
186 DependencyEntry *curr;
187 DependencyEntry *prev;
188
189 for (int i = 0; i < numPhysRegs; ++i) {
190 curr = dependGraph[i].next;
191
192 while (curr) {
193 DependencyEntry::mem_alloc_counter--;
194
195 prev = curr;
196 curr = prev->next;
197 prev->inst = NULL;
198
199 delete prev;
200 }
201
202 if (dependGraph[i].inst) {
203 dependGraph[i].inst = NULL;
204 }
205
206 dependGraph[i].next = NULL;
207 }
208
209 assert(DependencyEntry::mem_alloc_counter == 0);
210
211 delete [] dependGraph;
212 */
213 }
214
215 template <class Impl>
216 std::string
217 InstQueue<Impl>::name() const
218 {
219 return cpu->name() + ".iq";
220 }
221
222 template <class Impl>
223 void
224 InstQueue<Impl>::regStats()
225 {
226 iqInstsAdded
227 .name(name() + ".iqInstsAdded")
228 .desc("Number of instructions added to the IQ (excludes non-spec)")
229 .prereq(iqInstsAdded);
230
231 iqNonSpecInstsAdded
232 .name(name() + ".iqNonSpecInstsAdded")
233 .desc("Number of non-speculative instructions added to the IQ")
234 .prereq(iqNonSpecInstsAdded);
235
236 // iqIntInstsAdded;
237
238 iqIntInstsIssued
239 .name(name() + ".iqIntInstsIssued")
240 .desc("Number of integer instructions issued")
241 .prereq(iqIntInstsIssued);
242
243 // iqFloatInstsAdded;
244
245 iqFloatInstsIssued
246 .name(name() + ".iqFloatInstsIssued")
247 .desc("Number of float instructions issued")
248 .prereq(iqFloatInstsIssued);
249
250 // iqBranchInstsAdded;
251
252 iqBranchInstsIssued
253 .name(name() + ".iqBranchInstsIssued")
254 .desc("Number of branch instructions issued")
255 .prereq(iqBranchInstsIssued);
256
257 // iqMemInstsAdded;
258
259 iqMemInstsIssued
260 .name(name() + ".iqMemInstsIssued")
261 .desc("Number of memory instructions issued")
262 .prereq(iqMemInstsIssued);
263
264 // iqMiscInstsAdded;
265
266 iqMiscInstsIssued
267 .name(name() + ".iqMiscInstsIssued")
268 .desc("Number of miscellaneous instructions issued")
269 .prereq(iqMiscInstsIssued);
270
271 iqSquashedInstsIssued
272 .name(name() + ".iqSquashedInstsIssued")
273 .desc("Number of squashed instructions issued")
274 .prereq(iqSquashedInstsIssued);
275
276 iqSquashedInstsExamined
277 .name(name() + ".iqSquashedInstsExamined")
278 .desc("Number of squashed instructions iterated over during squash;"
279 " mainly for profiling")
280 .prereq(iqSquashedInstsExamined);
281
282 iqSquashedOperandsExamined
283 .name(name() + ".iqSquashedOperandsExamined")
284 .desc("Number of squashed operands that are examined and possibly "
285 "removed from graph")
286 .prereq(iqSquashedOperandsExamined);
287
288 iqSquashedNonSpecRemoved
289 .name(name() + ".iqSquashedNonSpecRemoved")
290 .desc("Number of squashed non-spec instructions that were removed")
291 .prereq(iqSquashedNonSpecRemoved);
292 /*
293 for ( int i=0; i < numThreads; i++) {
294 // Tell mem dependence unit to reg stats as well.
295 memDepUnit[i].regStats();
296 }
297 */
298 }
299 /*
300 template <class Impl>
301 void
302 InstQueue<Impl>::setActiveThreads(list<unsigned> *at_ptr)
303 {
304 DPRINTF(IQ, "Setting active threads list pointer.\n");
305 activeThreads = at_ptr;
306 }
307 */
308 template <class Impl>
309 void
310 InstQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
311 {
312 DPRINTF(IQ, "Set the issue to execute queue.\n");
313 issueToExecuteQueue = i2e_ptr;
314 }
315 /*
316 template <class Impl>
317 void
318 InstQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
319 {
320 DPRINTF(IQ, "Set the time buffer.\n");
321 timeBuffer = tb_ptr;
322
323 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
324 }
325
326 template <class Impl>
327 int
328 InstQueue<Impl>::entryAmount(int num_threads)
329 {
330 if (iqPolicy == Partitioned) {
331 return numEntries / num_threads;
332 } else {
333 return 0;
334 }
335 }
336
337
338 template <class Impl>
339 void
340 InstQueue<Impl>::resetEntries()
341 {
342 if (iqPolicy != Dynamic || numThreads > 1) {
343 int active_threads = (*activeThreads).size();
344
345 list<unsigned>::iterator threads = (*activeThreads).begin();
346 list<unsigned>::iterator list_end = (*activeThreads).end();
347
348 while (threads != list_end) {
349 if (iqPolicy == Partitioned) {
350 maxEntries[*threads++] = numEntries / active_threads;
351 } else if(iqPolicy == Threshold && active_threads == 1) {
352 maxEntries[*threads++] = numEntries;
353 }
354 }
355 }
356 }
357 */
358 template <class Impl>
359 unsigned
360 InstQueue<Impl>::numFreeEntries()
361 {
362 return freeEntries;
363 }
364
365 template <class Impl>
366 unsigned
367 InstQueue<Impl>::numFreeEntries(unsigned tid)
368 {
369 return maxEntries[tid] - count[tid];
370 }
371
372 // Might want to do something more complex if it knows how many instructions
373 // will be issued this cycle.
374 template <class Impl>
375 bool
376 InstQueue<Impl>::isFull()
377 {
378 if (freeEntries == 0) {
379 return(true);
380 } else {
381 return(false);
382 }
383 }
384
385 template <class Impl>
386 bool
387 InstQueue<Impl>::isFull(unsigned tid)
388 {
389 if (numFreeEntries(tid) == 0) {
390 return(true);
391 } else {
392 return(false);
393 }
394 }
395
396 template <class Impl>
397 bool
398 InstQueue<Impl>::hasReadyInsts()
399 {
400 /*
401 if (!listOrder.empty()) {
402 return true;
403 }
404
405 for (int i = 0; i < Num_OpClasses; ++i) {
406 if (!readyInsts[i].empty()) {
407 return true;
408 }
409 }
410
411 return false;
412 */
413 return readyInsts.empty();
414 }
415
416 template <class Impl>
417 void
418 InstQueue<Impl>::insert(DynInstPtr &new_inst)
419 {
420 // Make sure the instruction is valid
421 assert(new_inst);
422
423 DPRINTF(IQ, "Adding instruction PC %#x to the IQ.\n",
424 new_inst->readPC());
425
426 // Check if there are any free entries. Panic if there are none.
427 // Might want to have this return a fault in the future instead of
428 // panicing.
429 assert(freeEntries != 0);
430
431 instList[new_inst->threadNumber].push_back(new_inst);
432
433 // Decrease the number of free entries.
434 --freeEntries;
435
436 //Mark Instruction as in IQ
437 // new_inst->setInIQ();
438 /*
439 // Look through its source registers (physical regs), and mark any
440 // dependencies.
441 addToDependents(new_inst);
442
443 // Have this instruction set itself as the producer of its destination
444 // register(s).
445 createDependency(new_inst);
446 */
447 // If it's a memory instruction, add it to the memory dependency
448 // unit.
449 // if (new_inst->isMemRef()) {
450 // memDepUnit[new_inst->threadNumber].insert(new_inst);
451 // } else {
452 // If the instruction is ready then add it to the ready list.
453 addIfReady(new_inst);
454 // }
455
456 ++iqInstsAdded;
457
458
459 //Update Thread IQ Count
460 count[new_inst->threadNumber]++;
461
462 assert(freeEntries == (numEntries - countInsts()));
463 }
464
465 template <class Impl>
466 void
467 InstQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
468 {
469 nonSpecInsts[new_inst->seqNum] = new_inst;
470
471 // @todo: Clean up this code; can do it by setting inst as unable
472 // to issue, then calling normal insert on the inst.
473
474 // Make sure the instruction is valid
475 assert(new_inst);
476
477 DPRINTF(IQ, "Adding instruction PC %#x to the IQ.\n",
478 new_inst->readPC());
479
480 // Check if there are any free entries. Panic if there are none.
481 // Might want to have this return a fault in the future instead of
482 // panicing.
483 assert(freeEntries != 0);
484
485 instList[new_inst->threadNumber].push_back(new_inst);
486
487 // Decrease the number of free entries.
488 --freeEntries;
489
490 //Mark Instruction as in IQ
491 // new_inst->setInIQ();
492 /*
493 // Have this instruction set itself as the producer of its destination
494 // register(s).
495 createDependency(new_inst);
496
497 // If it's a memory instruction, add it to the memory dependency
498 // unit.
499 if (new_inst->isMemRef()) {
500 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
501 }
502 */
503 ++iqNonSpecInstsAdded;
504
505 //Update Thread IQ Count
506 count[new_inst->threadNumber]++;
507
508 assert(freeEntries == (numEntries - countInsts()));
509 }
510 /*
511 template <class Impl>
512 void
513 InstQueue<Impl>::advanceTail(DynInstPtr &inst)
514 {
515 // Have this instruction set itself as the producer of its destination
516 // register(s).
517 createDependency(inst);
518 }
519
520 template <class Impl>
521 void
522 InstQueue<Impl>::addToOrderList(OpClass op_class)
523 {
524 assert(!readyInsts[op_class].empty());
525
526 ListOrderEntry queue_entry;
527
528 queue_entry.queueType = op_class;
529
530 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
531
532 ListOrderIt list_it = listOrder.begin();
533 ListOrderIt list_end_it = listOrder.end();
534
535 while (list_it != list_end_it) {
536 if ((*list_it).oldestInst > queue_entry.oldestInst) {
537 break;
538 }
539
540 list_it++;
541 }
542
543 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
544 queueOnList[op_class] = true;
545 }
546
547 template <class Impl>
548 void
549 InstQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
550 {
551 // Get iterator of next item on the list
552 // Delete the original iterator
553 // Determine if the next item is either the end of the list or younger
554 // than the new instruction. If so, then add in a new iterator right here.
555 // If not, then move along.
556 ListOrderEntry queue_entry;
557 OpClass op_class = (*list_order_it).queueType;
558 ListOrderIt next_it = list_order_it;
559
560 ++next_it;
561
562 queue_entry.queueType = op_class;
563 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
564
565 while (next_it != listOrder.end() &&
566 (*next_it).oldestInst < queue_entry.oldestInst) {
567 ++next_it;
568 }
569
570 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
571 }
572
573 template <class Impl>
574 void
575 InstQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
576 {
577 // The CPU could have been sleeping until this op completed (*extremely*
578 // long latency op). Wake it if it was. This may be overkill.
579 iewStage->wakeCPU();
580
581 fuPool->freeUnit(fu_idx);
582
583 int &size = issueToExecuteQueue->access(0)->size;
584
585 issueToExecuteQueue->access(0)->insts[size++] = inst;
586 }
587 */
588 // @todo: Figure out a better way to remove the squashed items from the
589 // lists. Checking the top item of each list to see if it's squashed
590 // wastes time and forces jumps.
591 template <class Impl>
592 void
593 InstQueue<Impl>::scheduleReadyInsts()
594 {
595 DPRINTF(IQ, "Attempting to schedule ready instructions from "
596 "the IQ.\n");
597
598 // IssueStruct *i2e_info = issueToExecuteQueue->access(0);
599 /*
600 // Will need to reorder the list if either a queue is not on the list,
601 // or it has an older instruction than last time.
602 for (int i = 0; i < Num_OpClasses; ++i) {
603 if (!readyInsts[i].empty()) {
604 if (!queueOnList[i]) {
605 addToOrderList(OpClass(i));
606 } else if (readyInsts[i].top()->seqNum <
607 (*readyIt[i]).oldestInst) {
608 listOrder.erase(readyIt[i]);
609 addToOrderList(OpClass(i));
610 }
611 }
612 }
613
614 // Have iterator to head of the list
615 // While I haven't exceeded bandwidth or reached the end of the list,
616 // Try to get a FU that can do what this op needs.
617 // If successful, change the oldestInst to the new top of the list, put
618 // the queue in the proper place in the list.
619 // Increment the iterator.
620 // This will avoid trying to schedule a certain op class if there are no
621 // FUs that handle it.
622 ListOrderIt order_it = listOrder.begin();
623 ListOrderIt order_end_it = listOrder.end();
624 int total_issued = 0;
625 int exec_queue_slot = i2e_info->size;
626
627 while (exec_queue_slot < totalWidth && order_it != order_end_it) {
628 OpClass op_class = (*order_it).queueType;
629
630 assert(!readyInsts[op_class].empty());
631
632 DynInstPtr issuing_inst = readyInsts[op_class].top();
633
634 assert(issuing_inst->seqNum == (*order_it).oldestInst);
635
636 if (issuing_inst->isSquashed()) {
637 readyInsts[op_class].pop();
638
639 if (!readyInsts[op_class].empty()) {
640 moveToYoungerInst(order_it);
641 } else {
642 readyIt[op_class] = listOrder.end();
643 queueOnList[op_class] = false;
644 }
645
646 listOrder.erase(order_it++);
647
648 ++iqSquashedInstsIssued;
649
650 continue;
651 }
652
653 int idx = fuPool->getUnit(op_class);
654
655 if (idx != -1) {
656 int op_latency = fuPool->getOpLatency(op_class);
657
658 if (op_latency == 1) {
659 i2e_info->insts[exec_queue_slot++] = issuing_inst;
660 i2e_info->size++;
661
662 // Add the FU onto the list of FU's to be freed next cycle.
663 fuPool->freeUnit(idx);
664 } else {
665 int issue_latency = fuPool->getIssueLatency(op_class);
666
667 if (issue_latency > 1) {
668 // Generate completion event for the FU
669 FUCompletion *execution = new FUCompletion(issuing_inst,
670 idx, this);
671
672 execution->schedule(curTick + issue_latency - 1);
673 } else {
674 i2e_info->insts[exec_queue_slot++] = issuing_inst;
675 i2e_info->size++;
676
677 // Add the FU onto the list of FU's to be freed next cycle.
678 fuPool->freeUnit(idx);
679 }
680 }
681
682 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
683 "[sn:%lli]\n",
684 issuing_inst->threadNumber, issuing_inst->readPC(),
685 issuing_inst->seqNum);
686
687 readyInsts[op_class].pop();
688
689 if (!readyInsts[op_class].empty()) {
690 moveToYoungerInst(order_it);
691 } else {
692 readyIt[op_class] = listOrder.end();
693 queueOnList[op_class] = false;
694 }
695
696 issuing_inst->setIssued();
697 ++total_issued;
698
699 if (!issuing_inst->isMemRef()) {
700 // Memory instructions can not be freed from the IQ until they
701 // complete.
702 ++freeEntries;
703 count[issuing_inst->threadNumber]--;
704 issuing_inst->removeInIQ();
705 } else {
706 memDepUnit[issuing_inst->threadNumber].issue(issuing_inst);
707 }
708
709 listOrder.erase(order_it++);
710 } else {
711 ++order_it;
712 }
713 }
714
715 if (total_issued) {
716 cpu->activityThisCycle();
717 } else {
718 DPRINTF(IQ, "Not able to schedule any instructions.\n");
719 }
720 */
721 }
722
723 template <class Impl>
724 void
725 InstQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
726 {
727 DPRINTF(IQ, "Marking nonspeculative instruction with sequence "
728 "number %i as ready to execute.\n", inst);
729
730 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
731
732 assert(inst_it != nonSpecInsts.end());
733
734 // unsigned tid = (*inst_it).second->threadNumber;
735
736 // Mark this instruction as ready to issue.
737 (*inst_it).second->setCanIssue();
738
739 // Now schedule the instruction.
740 // if (!(*inst_it).second->isMemRef()) {
741 addIfReady((*inst_it).second);
742 // } else {
743 // memDepUnit[tid].nonSpecInstReady((*inst_it).second);
744 // }
745
746 nonSpecInsts.erase(inst_it);
747 }
748
749 template <class Impl>
750 void
751 InstQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
752 {
753 /*Need to go through each thread??*/
754 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
755 tid,inst);
756
757 ListIt iq_it = instList[tid].begin();
758
759 while (iq_it != instList[tid].end() &&
760 (*iq_it)->seqNum <= inst) {
761 ++iq_it;
762 instList[tid].pop_front();
763 }
764
765 assert(freeEntries == (numEntries - countInsts()));
766 }
767
768 template <class Impl>
769 void
770 InstQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
771 {
772 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
773 // Look at the physical destination register of the DynInst
774 // and look it up on the dependency graph. Then mark as ready
775 // any instructions within the instruction queue.
776 /*
777 DependencyEntry *curr;
778 DependencyEntry *prev;
779 */
780 // Tell the memory dependence unit to wake any dependents on this
781 // instruction if it is a memory instruction. Also complete the memory
782 // instruction at this point since we know it executed fine.
783 // @todo: Might want to rename "completeMemInst" to
784 // something that indicates that it won't need to be replayed, and call
785 // this earlier. Might not be a big deal.
786 if (completed_inst->isMemRef()) {
787 // memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
788 completeMemInst(completed_inst);
789 }
790 completed_inst->wakeDependents();
791 /*
792 for (int dest_reg_idx = 0;
793 dest_reg_idx < completed_inst->numDestRegs();
794 dest_reg_idx++)
795 {
796 PhysRegIndex dest_reg =
797 completed_inst->renamedDestRegIdx(dest_reg_idx);
798
799 // Special case of uniq or control registers. They are not
800 // handled by the IQ and thus have no dependency graph entry.
801 // @todo Figure out a cleaner way to handle this.
802 if (dest_reg >= numPhysRegs) {
803 continue;
804 }
805
806 DPRINTF(IQ, "Waking any dependents on register %i.\n",
807 (int) dest_reg);
808
809 //Maybe abstract this part into a function.
810 //Go through the dependency chain, marking the registers as ready
811 //within the waiting instructions.
812
813 curr = dependGraph[dest_reg].next;
814
815 while (curr) {
816 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
817 curr->inst->readPC());
818
819 // Might want to give more information to the instruction
820 // so that it knows which of its source registers is ready.
821 // However that would mean that the dependency graph entries
822 // would need to hold the src_reg_idx.
823 curr->inst->markSrcRegReady();
824
825 addIfReady(curr->inst);
826
827 DependencyEntry::mem_alloc_counter--;
828
829 prev = curr;
830 curr = prev->next;
831 prev->inst = NULL;
832
833 delete prev;
834 }
835
836 // Reset the head node now that all of its dependents have been woken
837 // up.
838 dependGraph[dest_reg].next = NULL;
839 dependGraph[dest_reg].inst = NULL;
840
841 // Mark the scoreboard as having that register ready.
842 regScoreboard[dest_reg] = true;
843 }
844 */
845 }
846
847 template <class Impl>
848 void
849 InstQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
850 {
851 OpClass op_class = ready_inst->opClass();
852
853 readyInsts.push(ready_inst);
854
855 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
856 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
857 ready_inst->readPC(), op_class, ready_inst->seqNum);
858 }
859 /*
860 template <class Impl>
861 void
862 InstQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
863 {
864 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
865 }
866
867 template <class Impl>
868 void
869 InstQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
870 {
871 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
872 }
873 */
874 template <class Impl>
875 void
876 InstQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
877 {
878 int tid = completed_inst->threadNumber;
879
880 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
881 completed_inst->readPC(), completed_inst->seqNum);
882
883 ++freeEntries;
884
885 // completed_inst->memOpDone = true;
886
887 // memDepUnit[tid].completed(completed_inst);
888
889 count[tid]--;
890 }
891 /*
892 template <class Impl>
893 void
894 InstQueue<Impl>::violation(DynInstPtr &store,
895 DynInstPtr &faulting_load)
896 {
897 memDepUnit[store->threadNumber].violation(store, faulting_load);
898 }
899 */
900 template <class Impl>
901 void
902 InstQueue<Impl>::squash(unsigned tid)
903 {
904 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
905 "the IQ.\n", tid);
906
907 // Read instruction sequence number of last instruction out of the
908 // time buffer.
909 // squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
910
911 // Setup the squash iterator to point to the tail.
912 squashIt[tid] = instList[tid].end();
913 --squashIt[tid];
914
915 // Call doSquash if there are insts in the IQ
916 if (count[tid] > 0) {
917 doSquash(tid);
918 }
919
920 // Also tell the memory dependence unit to squash.
921 // memDepUnit[tid].squash(squashedSeqNum[tid], tid);
922 }
923
924 template <class Impl>
925 void
926 InstQueue<Impl>::doSquash(unsigned tid)
927 {
928 // Make sure the squashed sequence number is valid.
929 assert(squashedSeqNum[tid] != 0);
930
931 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
932 tid, squashedSeqNum[tid]);
933
934 // Squash any instructions younger than the squashed sequence number
935 // given.
936 while (squashIt[tid] != instList[tid].end() &&
937 (*squashIt[tid])->seqNum > squashedSeqNum[tid]) {
938
939 DynInstPtr squashed_inst = (*squashIt[tid]);
940
941 // Only handle the instruction if it actually is in the IQ and
942 // hasn't already been squashed in the IQ.
943 if (squashed_inst->threadNumber != tid ||
944 squashed_inst->isSquashedInIQ()) {
945 --squashIt[tid];
946 continue;
947 }
948
949 if (!squashed_inst->isIssued() ||
950 (squashed_inst->isMemRef()/* &&
951 !squashed_inst->memOpDone*/)) {
952
953 // Remove the instruction from the dependency list.
954 if (!squashed_inst->isNonSpeculative()) {
955 /*
956 for (int src_reg_idx = 0;
957 src_reg_idx < squashed_inst->numSrcRegs();
958 src_reg_idx++)
959 {
960 PhysRegIndex src_reg =
961 squashed_inst->renamedSrcRegIdx(src_reg_idx);
962
963 // Only remove it from the dependency graph if it was
964 // placed there in the first place.
965 // HACK: This assumes that instructions woken up from the
966 // dependency chain aren't informed that a specific src
967 // register has become ready. This may not always be true
968 // in the future.
969 // Instead of doing a linked list traversal, we can just
970 // remove these squashed instructions either at issue time,
971 // or when the register is overwritten. The only downside
972 // to this is it leaves more room for error.
973
974 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
975 src_reg < numPhysRegs) {
976 dependGraph[src_reg].remove(squashed_inst);
977 }
978
979
980 ++iqSquashedOperandsExamined;
981 }
982 */
983 // Might want to remove producers as well.
984 } else {
985 nonSpecInsts[squashed_inst->seqNum] = NULL;
986
987 nonSpecInsts.erase(squashed_inst->seqNum);
988
989 ++iqSquashedNonSpecRemoved;
990 }
991
992 // Might want to also clear out the head of the dependency graph.
993
994 // Mark it as squashed within the IQ.
995 squashed_inst->setSquashedInIQ();
996
997 // @todo: Remove this hack where several statuses are set so the
998 // inst will flow through the rest of the pipeline.
999 squashed_inst->setIssued();
1000 squashed_inst->setCanCommit();
1001 // squashed_inst->removeInIQ();
1002
1003 //Update Thread IQ Count
1004 count[squashed_inst->threadNumber]--;
1005
1006 ++freeEntries;
1007
1008 if (numThreads > 1) {
1009 DPRINTF(IQ, "[tid:%i]: Instruction PC %#x squashed.\n",
1010 tid, squashed_inst->readPC());
1011 } else {
1012 DPRINTF(IQ, "Instruction PC %#x squashed.\n",
1013 squashed_inst->readPC());
1014 }
1015 }
1016
1017 --squashIt[tid];
1018 ++iqSquashedInstsExamined;
1019 }
1020 }
1021 /*
1022 template <class Impl>
1023 void
1024 InstQueue<Impl>::DependencyEntry::insert(DynInstPtr &new_inst)
1025 {
1026 //Add this new, dependent instruction at the head of the dependency
1027 //chain.
1028
1029 // First create the entry that will be added to the head of the
1030 // dependency chain.
1031 DependencyEntry *new_entry = new DependencyEntry;
1032 new_entry->next = this->next;
1033 new_entry->inst = new_inst;
1034
1035 // Then actually add it to the chain.
1036 this->next = new_entry;
1037
1038 ++mem_alloc_counter;
1039 }
1040
1041 template <class Impl>
1042 void
1043 InstQueue<Impl>::DependencyEntry::remove(DynInstPtr &inst_to_remove)
1044 {
1045 DependencyEntry *prev = this;
1046 DependencyEntry *curr = this->next;
1047
1048 // Make sure curr isn't NULL. Because this instruction is being
1049 // removed from a dependency list, it must have been placed there at
1050 // an earlier time. The dependency chain should not be empty,
1051 // unless the instruction dependent upon it is already ready.
1052 if (curr == NULL) {
1053 return;
1054 }
1055
1056 // Find the instruction to remove within the dependency linked list.
1057 while (curr->inst != inst_to_remove) {
1058 prev = curr;
1059 curr = curr->next;
1060
1061 assert(curr != NULL);
1062 }
1063
1064 // Now remove this instruction from the list.
1065 prev->next = curr->next;
1066
1067 --mem_alloc_counter;
1068
1069 // Could push this off to the destructor of DependencyEntry
1070 curr->inst = NULL;
1071
1072 delete curr;
1073 }
1074
1075 template <class Impl>
1076 bool
1077 InstQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1078 {
1079 // Loop through the instruction's source registers, adding
1080 // them to the dependency list if they are not ready.
1081 int8_t total_src_regs = new_inst->numSrcRegs();
1082 bool return_val = false;
1083
1084 for (int src_reg_idx = 0;
1085 src_reg_idx < total_src_regs;
1086 src_reg_idx++)
1087 {
1088 // Only add it to the dependency graph if it's not ready.
1089 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1090 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1091
1092 // Check the IQ's scoreboard to make sure the register
1093 // hasn't become ready while the instruction was in flight
1094 // between stages. Only if it really isn't ready should
1095 // it be added to the dependency graph.
1096 if (src_reg >= numPhysRegs) {
1097 continue;
1098 } else if (regScoreboard[src_reg] == false) {
1099 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1100 "is being added to the dependency chain.\n",
1101 new_inst->readPC(), src_reg);
1102
1103 dependGraph[src_reg].insert(new_inst);
1104
1105 // Change the return value to indicate that something
1106 // was added to the dependency graph.
1107 return_val = true;
1108 } else {
1109 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1110 "became ready before it reached the IQ.\n",
1111 new_inst->readPC(), src_reg);
1112 // Mark a register ready within the instruction.
1113 new_inst->markSrcRegReady();
1114 }
1115 }
1116 }
1117
1118 return return_val;
1119 }
1120
1121 template <class Impl>
1122 void
1123 InstQueue<Impl>::createDependency(DynInstPtr &new_inst)
1124 {
1125 //Actually nothing really needs to be marked when an
1126 //instruction becomes the producer of a register's value,
1127 //but for convenience a ptr to the producing instruction will
1128 //be placed in the head node of the dependency links.
1129 int8_t total_dest_regs = new_inst->numDestRegs();
1130
1131 for (int dest_reg_idx = 0;
1132 dest_reg_idx < total_dest_regs;
1133 dest_reg_idx++)
1134 {
1135 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1136
1137 // Instructions that use the misc regs will have a reg number
1138 // higher than the normal physical registers. In this case these
1139 // registers are not renamed, and there is no need to track
1140 // dependencies as these instructions must be executed at commit.
1141 if (dest_reg >= numPhysRegs) {
1142 continue;
1143 }
1144
1145 if (dependGraph[dest_reg].next) {
1146 dumpDependGraph();
1147 panic("Dependency graph %i not empty!", dest_reg);
1148 }
1149
1150 dependGraph[dest_reg].inst = new_inst;
1151
1152 // Mark the scoreboard to say it's not yet ready.
1153 regScoreboard[dest_reg] = false;
1154 }
1155 }
1156 */
1157 template <class Impl>
1158 void
1159 InstQueue<Impl>::addIfReady(DynInstPtr &inst)
1160 {
1161 //If the instruction now has all of its source registers
1162 // available, then add it to the list of ready instructions.
1163 if (inst->readyToIssue()) {
1164
1165 //Add the instruction to the proper ready list.
1166 if (inst->isMemRef()) {
1167
1168 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1169
1170 // Message to the mem dependence unit that this instruction has
1171 // its registers ready.
1172
1173 // memDepUnit[inst->threadNumber].regsReady(inst);
1174
1175 return;
1176 }
1177
1178 OpClass op_class = inst->opClass();
1179
1180 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1181 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1182 inst->readPC(), op_class, inst->seqNum);
1183
1184 readyInsts.push(inst);
1185 }
1186 }
1187
1188 template <class Impl>
1189 int
1190 InstQueue<Impl>::countInsts()
1191 {
1192 //ksewell:This works but definitely could use a cleaner write
1193 //with a more intuitive way of counting. Right now it's
1194 //just brute force ....
1195
1196 #if 0
1197 int total_insts = 0;
1198
1199 for (int i = 0; i < numThreads; ++i) {
1200 ListIt count_it = instList[i].begin();
1201
1202 while (count_it != instList[i].end()) {
1203 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1204 if (!(*count_it)->isIssued()) {
1205 ++total_insts;
1206 } else if ((*count_it)->isMemRef() &&
1207 !(*count_it)->memOpDone) {
1208 // Loads that have not been marked as executed still count
1209 // towards the total instructions.
1210 ++total_insts;
1211 }
1212 }
1213
1214 ++count_it;
1215 }
1216 }
1217
1218 return total_insts;
1219 #else
1220 return numEntries - freeEntries;
1221 #endif
1222 }
1223 /*
1224 template <class Impl>
1225 void
1226 InstQueue<Impl>::dumpDependGraph()
1227 {
1228 DependencyEntry *curr;
1229
1230 for (int i = 0; i < numPhysRegs; ++i)
1231 {
1232 curr = &dependGraph[i];
1233
1234 if (curr->inst) {
1235 cprintf("dependGraph[%i]: producer: %#x [sn:%lli] consumer: ",
1236 i, curr->inst->readPC(), curr->inst->seqNum);
1237 } else {
1238 cprintf("dependGraph[%i]: No producer. consumer: ", i);
1239 }
1240
1241 while (curr->next != NULL) {
1242 curr = curr->next;
1243
1244 cprintf("%#x [sn:%lli] ",
1245 curr->inst->readPC(), curr->inst->seqNum);
1246 }
1247
1248 cprintf("\n");
1249 }
1250 }
1251 */
1252 template <class Impl>
1253 void
1254 InstQueue<Impl>::dumpLists()
1255 {
1256 for (int i = 0; i < Num_OpClasses; ++i) {
1257 cprintf("Ready list %i size: %i\n", i, readyInsts.size());
1258
1259 cprintf("\n");
1260 }
1261
1262 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1263
1264 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1265 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1266
1267 cprintf("Non speculative list: ");
1268
1269 while (non_spec_it != non_spec_end_it) {
1270 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1271 (*non_spec_it).second->seqNum);
1272 ++non_spec_it;
1273 }
1274
1275 cprintf("\n");
1276 /*
1277 ListOrderIt list_order_it = listOrder.begin();
1278 ListOrderIt list_order_end_it = listOrder.end();
1279 int i = 1;
1280
1281 cprintf("List order: ");
1282
1283 while (list_order_it != list_order_end_it) {
1284 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1285 (*list_order_it).oldestInst);
1286
1287 ++list_order_it;
1288 ++i;
1289 }
1290 */
1291 cprintf("\n");
1292 }
1293
1294
1295 template <class Impl>
1296 void
1297 InstQueue<Impl>::dumpInsts()
1298 {
1299 for (int i = 0; i < numThreads; ++i) {
1300 // int num = 0;
1301 // int valid_num = 0;
1302 /*
1303 ListIt inst_list_it = instList[i].begin();
1304
1305 while (inst_list_it != instList[i].end())
1306 {
1307 cprintf("Instruction:%i\n",
1308 num);
1309 if (!(*inst_list_it)->isSquashed()) {
1310 if (!(*inst_list_it)->isIssued()) {
1311 ++valid_num;
1312 cprintf("Count:%i\n", valid_num);
1313 } else if ((*inst_list_it)->isMemRef() &&
1314 !(*inst_list_it)->memOpDone) {
1315 // Loads that have not been marked as executed still count
1316 // towards the total instructions.
1317 ++valid_num;
1318 cprintf("Count:%i\n", valid_num);
1319 }
1320 }
1321
1322 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1323 "Issued:%i\nSquashed:%i\n",
1324 (*inst_list_it)->readPC(),
1325 (*inst_list_it)->seqNum,
1326 (*inst_list_it)->threadNumber,
1327 (*inst_list_it)->isIssued(),
1328 (*inst_list_it)->isSquashed());
1329
1330 if ((*inst_list_it)->isMemRef()) {
1331 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1332 }
1333
1334 cprintf("\n");
1335
1336 inst_list_it++;
1337 ++num;
1338 }
1339 */
1340 }
1341 }