Merge ktlim@zizzer:/bk/newmem
[gem5.git] / src / cpu / ozone / front_end_impl.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31 #include "arch/faults.hh"
32 #include "arch/isa_traits.hh"
33 #include "base/statistics.hh"
34 #include "cpu/thread_context.hh"
35 #include "cpu/exetrace.hh"
36 #include "cpu/ozone/front_end.hh"
37 #include "mem/mem_interface.hh"
38
39 using namespace TheISA;
40
41 template <class Impl>
42 FrontEnd<Impl>::FrontEnd(Params *params)
43 : branchPred(params),
44 icacheInterface(params->icacheInterface),
45 instBufferSize(0),
46 maxInstBufferSize(params->maxInstBufferSize),
47 width(params->frontEndWidth),
48 freeRegs(params->numPhysicalRegs),
49 numPhysRegs(params->numPhysicalRegs),
50 serializeNext(false),
51 interruptPending(false)
52 {
53 switchedOut = false;
54
55 status = Idle;
56
57 memReq = NULL;
58 // Size of cache block.
59 cacheBlkSize = icacheInterface ? icacheInterface->getBlockSize() : 64;
60
61 assert(isPowerOf2(cacheBlkSize));
62
63 // Create mask to get rid of offset bits.
64 cacheBlkMask = (cacheBlkSize - 1);
65
66 // Create space to store a cache line.
67 cacheData = new uint8_t[cacheBlkSize];
68
69 fetchCacheLineNextCycle = true;
70
71 cacheBlkValid = false;
72
73 #if !FULL_SYSTEM
74 // pTable = params->pTable;
75 #endif
76 fetchFault = NoFault;
77 }
78
79 template <class Impl>
80 std::string
81 FrontEnd<Impl>::name() const
82 {
83 return cpu->name() + ".frontend";
84 }
85
86 template <class Impl>
87 void
88 FrontEnd<Impl>::setCommBuffer(TimeBuffer<CommStruct> *_comm)
89 {
90 comm = _comm;
91 // @todo: Hardcoded for now. Allow this to be set by a latency.
92 fromCommit = comm->getWire(-1);
93 }
94
95 template <class Impl>
96 void
97 FrontEnd<Impl>::setTC(ThreadContext *tc_ptr)
98 {
99 tc = tc_ptr;
100 }
101
102 template <class Impl>
103 void
104 FrontEnd<Impl>::regStats()
105 {
106 icacheStallCycles
107 .name(name() + ".icacheStallCycles")
108 .desc("Number of cycles fetch is stalled on an Icache miss")
109 .prereq(icacheStallCycles);
110
111 fetchedInsts
112 .name(name() + ".fetchedInsts")
113 .desc("Number of instructions fetch has processed")
114 .prereq(fetchedInsts);
115
116 fetchedBranches
117 .name(name() + ".fetchedBranches")
118 .desc("Number of fetched branches")
119 .prereq(fetchedBranches);
120
121 predictedBranches
122 .name(name() + ".predictedBranches")
123 .desc("Number of branches that fetch has predicted taken")
124 .prereq(predictedBranches);
125
126 fetchCycles
127 .name(name() + ".fetchCycles")
128 .desc("Number of cycles fetch has run and was not squashing or"
129 " blocked")
130 .prereq(fetchCycles);
131
132 fetchIdleCycles
133 .name(name() + ".fetchIdleCycles")
134 .desc("Number of cycles fetch was idle")
135 .prereq(fetchIdleCycles);
136
137 fetchSquashCycles
138 .name(name() + ".fetchSquashCycles")
139 .desc("Number of cycles fetch has spent squashing")
140 .prereq(fetchSquashCycles);
141
142 fetchBlockedCycles
143 .name(name() + ".fetchBlockedCycles")
144 .desc("Number of cycles fetch has spent blocked")
145 .prereq(fetchBlockedCycles);
146
147 fetchedCacheLines
148 .name(name() + ".fetchedCacheLines")
149 .desc("Number of cache lines fetched")
150 .prereq(fetchedCacheLines);
151
152 fetchIcacheSquashes
153 .name(name() + ".fetchIcacheSquashes")
154 .desc("Number of outstanding Icache misses that were squashed")
155 .prereq(fetchIcacheSquashes);
156
157 fetchNisnDist
158 .init(/* base value */ 0,
159 /* last value */ width,
160 /* bucket size */ 1)
161 .name(name() + ".rateDist")
162 .desc("Number of instructions fetched each cycle (Total)")
163 .flags(Stats::pdf);
164
165 idleRate
166 .name(name() + ".idleRate")
167 .desc("Percent of cycles fetch was idle")
168 .prereq(idleRate);
169 idleRate = fetchIdleCycles * 100 / cpu->numCycles;
170
171 branchRate
172 .name(name() + ".branchRate")
173 .desc("Number of branch fetches per cycle")
174 .flags(Stats::total);
175 branchRate = fetchedBranches / cpu->numCycles;
176
177 fetchRate
178 .name(name() + ".rate")
179 .desc("Number of inst fetches per cycle")
180 .flags(Stats::total);
181 fetchRate = fetchedInsts / cpu->numCycles;
182
183 IFQCount
184 .name(name() + ".IFQ:count")
185 .desc("cumulative IFQ occupancy")
186 ;
187
188 IFQFcount
189 .name(name() + ".IFQ:fullCount")
190 .desc("cumulative IFQ full count")
191 .flags(Stats::total)
192 ;
193
194 IFQOccupancy
195 .name(name() + ".IFQ:occupancy")
196 .desc("avg IFQ occupancy (inst's)")
197 ;
198 IFQOccupancy = IFQCount / cpu->numCycles;
199
200 IFQLatency
201 .name(name() + ".IFQ:latency")
202 .desc("avg IFQ occupant latency (cycle's)")
203 .flags(Stats::total)
204 ;
205
206 IFQFullRate
207 .name(name() + ".IFQ:fullRate")
208 .desc("fraction of time (cycles) IFQ was full")
209 .flags(Stats::total);
210 ;
211 IFQFullRate = IFQFcount * Stats::constant(100) / cpu->numCycles;
212
213 dispatchCountStat
214 .name(name() + ".DIS:count")
215 .desc("cumulative count of dispatched insts")
216 .flags(Stats::total)
217 ;
218
219 dispatchedSerializing
220 .name(name() + ".DIS:serializingInsts")
221 .desc("count of serializing insts dispatched")
222 .flags(Stats::total)
223 ;
224
225 dispatchedTempSerializing
226 .name(name() + ".DIS:tempSerializingInsts")
227 .desc("count of temporary serializing insts dispatched")
228 .flags(Stats::total)
229 ;
230
231 dispatchSerializeStallCycles
232 .name(name() + ".DIS:serializeStallCycles")
233 .desc("count of cycles dispatch stalled for serializing inst")
234 .flags(Stats::total)
235 ;
236
237 dispatchRate
238 .name(name() + ".DIS:rate")
239 .desc("dispatched insts per cycle")
240 .flags(Stats::total)
241 ;
242 dispatchRate = dispatchCountStat / cpu->numCycles;
243
244 regIntFull
245 .name(name() + ".REG:int:full")
246 .desc("number of cycles where there were no INT registers")
247 ;
248
249 regFpFull
250 .name(name() + ".REG:fp:full")
251 .desc("number of cycles where there were no FP registers")
252 ;
253 IFQLatency = IFQOccupancy / dispatchRate;
254
255 branchPred.regStats();
256 }
257
258 template <class Impl>
259 void
260 FrontEnd<Impl>::tick()
261 {
262 if (switchedOut)
263 return;
264
265 // @todo: Maybe I want to just have direct communication...
266 if (fromCommit->doneSeqNum) {
267 branchPred.update(fromCommit->doneSeqNum, 0);
268 }
269
270 IFQCount += instBufferSize;
271 IFQFcount += instBufferSize == maxInstBufferSize;
272
273 // Fetch cache line
274 if (status == IcacheMissComplete) {
275 cacheBlkValid = true;
276
277 status = Running;
278 if (barrierInst)
279 status = SerializeBlocked;
280 if (freeRegs <= 0)
281 status = RenameBlocked;
282 checkBE();
283 } else if (status == IcacheMissStall) {
284 DPRINTF(FE, "Still in Icache miss stall.\n");
285 icacheStallCycles++;
286 return;
287 }
288
289 if (status == RenameBlocked || status == SerializeBlocked ||
290 status == TrapPending || status == BEBlocked) {
291 // Will cause a one cycle bubble between changing state and
292 // restarting.
293 DPRINTF(FE, "In blocked status.\n");
294
295 fetchBlockedCycles++;
296
297 if (status == SerializeBlocked) {
298 dispatchSerializeStallCycles++;
299 }
300 updateStatus();
301 return;
302 } else if (status == QuiescePending) {
303 DPRINTF(FE, "Waiting for quiesce to execute or get squashed.\n");
304 return;
305 } else if (status != IcacheMissComplete) {
306 if (fetchCacheLineNextCycle) {
307 Fault fault = fetchCacheLine();
308 if (fault != NoFault) {
309 handleFault(fault);
310 fetchFault = fault;
311 return;
312 }
313 fetchCacheLineNextCycle = false;
314 }
315 // If miss, stall until it returns.
316 if (status == IcacheMissStall) {
317 // Tell CPU to not tick me for now.
318 return;
319 }
320 }
321
322 fetchCycles++;
323
324 int num_inst = 0;
325
326 // Otherwise loop and process instructions.
327 // One way to hack infinite width is to set width and maxInstBufferSize
328 // both really high. Inelegant, but probably will work.
329 while (num_inst < width &&
330 instBufferSize < maxInstBufferSize) {
331 // Get instruction from cache line.
332 DynInstPtr inst = getInstFromCacheline();
333
334 if (!inst) {
335 // PC is no longer in the cache line, end fetch.
336 // Might want to check this at the end of the cycle so that
337 // there's no cycle lost to checking for a new cache line.
338 DPRINTF(FE, "Need to get new cache line\n");
339 fetchCacheLineNextCycle = true;
340 break;
341 }
342
343 processInst(inst);
344
345 if (status == SerializeBlocked) {
346 break;
347 }
348
349 // Possibly push into a time buffer that estimates the front end
350 // latency
351 instBuffer.push_back(inst);
352 ++instBufferSize;
353 ++num_inst;
354
355 #if FULL_SYSTEM
356 if (inst->isQuiesce()) {
357 warn("%lli: Quiesce instruction encountered, halting fetch!", curTick);
358 status = QuiescePending;
359 break;
360 }
361 #endif
362
363 if (inst->predTaken()) {
364 // Start over with tick?
365 break;
366 } else if (freeRegs <= 0) {
367 DPRINTF(FE, "Ran out of free registers to rename to!\n");
368 status = RenameBlocked;
369 break;
370 } else if (serializeNext) {
371 break;
372 }
373 }
374
375 fetchNisnDist.sample(num_inst);
376 checkBE();
377
378 DPRINTF(FE, "Num insts processed: %i, Inst Buffer size: %i, Free "
379 "Regs %i\n", num_inst, instBufferSize, freeRegs);
380 }
381
382 template <class Impl>
383 Fault
384 FrontEnd<Impl>::fetchCacheLine()
385 {
386 // Read a cache line, based on the current PC.
387 #if FULL_SYSTEM
388 // Flag to say whether or not address is physical addr.
389 unsigned flags = cpu->inPalMode(PC) ? PHYSICAL : 0;
390 #else
391 unsigned flags = 0;
392 #endif // FULL_SYSTEM
393 Fault fault = NoFault;
394
395 if (interruptPending && flags == 0) {
396 return fault;
397 }
398
399 // Align the fetch PC so it's at the start of a cache block.
400 Addr fetch_PC = icacheBlockAlignPC(PC);
401
402 DPRINTF(FE, "Fetching cache line starting at %#x.\n", fetch_PC);
403
404 // Setup the memReq to do a read of the first isntruction's address.
405 // Set the appropriate read size and flags as well.
406 memReq = new MemReq();
407
408 memReq->asid = 0;
409 memReq->thread_num = 0;
410 memReq->data = new uint8_t[64];
411 memReq->tc = tc;
412 memReq->cmd = Read;
413 memReq->reset(fetch_PC, cacheBlkSize, flags);
414
415 // Translate the instruction request.
416 fault = cpu->translateInstReq(memReq);
417
418 // Now do the timing access to see whether or not the instruction
419 // exists within the cache.
420 if (icacheInterface && fault == NoFault) {
421 #if FULL_SYSTEM
422 if (cpu->system->memctrl->badaddr(memReq->paddr) ||
423 memReq->flags & UNCACHEABLE) {
424 DPRINTF(FE, "Fetch: Bad address %#x (hopefully on a "
425 "misspeculating path!",
426 memReq->paddr);
427 return TheISA::genMachineCheckFault();
428 }
429 #endif
430
431 memReq->completionEvent = NULL;
432
433 memReq->time = curTick;
434 fault = cpu->mem->read(memReq, cacheData);
435
436 MemAccessResult res = icacheInterface->access(memReq);
437
438 // If the cache missed then schedule an event to wake
439 // up this stage once the cache miss completes.
440 if (icacheInterface->doEvents() && res != MA_HIT) {
441 memReq->completionEvent = new ICacheCompletionEvent(memReq, this);
442
443 status = IcacheMissStall;
444
445 cacheBlkValid = false;
446
447 DPRINTF(FE, "Cache miss.\n");
448 } else {
449 DPRINTF(FE, "Cache hit.\n");
450
451 cacheBlkValid = true;
452
453 // memcpy(cacheData, memReq->data, memReq->size);
454 }
455 }
456
457 // Note that this will set the cache block PC a bit earlier than it should
458 // be set.
459 cacheBlkPC = fetch_PC;
460
461 ++fetchedCacheLines;
462
463 DPRINTF(FE, "Done fetching cache line.\n");
464
465 return fault;
466 }
467
468 template <class Impl>
469 void
470 FrontEnd<Impl>::processInst(DynInstPtr &inst)
471 {
472 if (processBarriers(inst)) {
473 return;
474 }
475
476 Addr inst_PC = inst->readPC();
477
478 if (!inst->isControl()) {
479 inst->setPredTarg(inst->readNextPC());
480 } else {
481 fetchedBranches++;
482 if (branchPred.predict(inst, inst_PC, inst->threadNumber)) {
483 predictedBranches++;
484 }
485 }
486
487 Addr next_PC = inst->readPredTarg();
488
489 DPRINTF(FE, "[sn:%lli] Predicted and processed inst PC %#x, next PC "
490 "%#x\n", inst->seqNum, inst_PC, next_PC);
491
492 // inst->setNextPC(next_PC);
493
494 // Not sure where I should set this
495 PC = next_PC;
496
497 renameInst(inst);
498 }
499
500 template <class Impl>
501 bool
502 FrontEnd<Impl>::processBarriers(DynInstPtr &inst)
503 {
504 if (serializeNext) {
505 inst->setSerializeBefore();
506 serializeNext = false;
507 } else if (!inst->isSerializing() &&
508 !inst->isIprAccess() &&
509 !inst->isStoreConditional()) {
510 return false;
511 }
512
513 if ((inst->isIprAccess() || inst->isSerializeBefore()) &&
514 !inst->isSerializeHandled()) {
515 DPRINTF(FE, "Serialize before instruction encountered.\n");
516
517 if (!inst->isTempSerializeBefore()) {
518 dispatchedSerializing++;
519 inst->setSerializeHandled();
520 } else {
521 dispatchedTempSerializing++;
522 }
523
524 // Change status over to SerializeBlocked so that other stages know
525 // what this is blocked on.
526 status = SerializeBlocked;
527
528 barrierInst = inst;
529 return true;
530 } else if ((inst->isStoreConditional() || inst->isSerializeAfter())
531 && !inst->isSerializeHandled()) {
532 DPRINTF(FE, "Serialize after instruction encountered.\n");
533
534 inst->setSerializeHandled();
535
536 dispatchedSerializing++;
537
538 serializeNext = true;
539 return false;
540 }
541 return false;
542 }
543
544 template <class Impl>
545 void
546 FrontEnd<Impl>::handleFault(Fault &fault)
547 {
548 DPRINTF(FE, "Fault at fetch, telling commit\n");
549
550 // We're blocked on the back end until it handles this fault.
551 status = TrapPending;
552
553 // Get a sequence number.
554 InstSeqNum inst_seq = getAndIncrementInstSeq();
555 // We will use a nop in order to carry the fault.
556 ExtMachInst ext_inst = TheISA::NoopMachInst;
557
558 // Create a new DynInst from the dummy nop.
559 DynInstPtr instruction = new DynInst(ext_inst, PC,
560 PC+sizeof(MachInst),
561 inst_seq, cpu);
562 instruction->setPredTarg(instruction->readNextPC());
563 // instruction->setThread(tid);
564
565 // instruction->setASID(tid);
566
567 instruction->setState(thread);
568
569 instruction->traceData = NULL;
570
571 instruction->fault = fault;
572 instruction->setCanIssue();
573 instBuffer.push_back(instruction);
574 ++instBufferSize;
575 }
576
577 template <class Impl>
578 void
579 FrontEnd<Impl>::squash(const InstSeqNum &squash_num, const Addr &next_PC,
580 const bool is_branch, const bool branch_taken)
581 {
582 DPRINTF(FE, "Squashing from [sn:%lli], setting PC to %#x\n",
583 squash_num, next_PC);
584
585 if (fetchFault != NoFault)
586 fetchFault = NoFault;
587
588 while (!instBuffer.empty() &&
589 instBuffer.back()->seqNum > squash_num) {
590 DynInstPtr inst = instBuffer.back();
591
592 DPRINTF(FE, "Squashing instruction [sn:%lli] PC %#x\n",
593 inst->seqNum, inst->readPC());
594
595 inst->clearDependents();
596
597 instBuffer.pop_back();
598 --instBufferSize;
599
600 freeRegs+= inst->numDestRegs();
601 }
602
603 // Copy over rename table from the back end.
604 renameTable.copyFrom(backEnd->renameTable);
605
606 PC = next_PC;
607
608 // Update BP with proper information.
609 if (is_branch) {
610 branchPred.squash(squash_num, next_PC, branch_taken, 0);
611 } else {
612 branchPred.squash(squash_num, 0);
613 }
614
615 // Clear the icache miss if it's outstanding.
616 if (status == IcacheMissStall && icacheInterface) {
617 DPRINTF(FE, "Squashing outstanding Icache miss.\n");
618 memReq = NULL;
619 }
620
621 if (status == SerializeBlocked) {
622 assert(barrierInst->seqNum > squash_num);
623 barrierInst = NULL;
624 }
625
626 // Unless this squash originated from the front end, we're probably
627 // in running mode now.
628 // Actually might want to make this latency dependent.
629 status = Running;
630 fetchCacheLineNextCycle = true;
631 }
632
633 template <class Impl>
634 typename Impl::DynInstPtr
635 FrontEnd<Impl>::getInst()
636 {
637 if (instBufferSize == 0) {
638 return NULL;
639 }
640
641 DynInstPtr inst = instBuffer.front();
642
643 instBuffer.pop_front();
644
645 --instBufferSize;
646
647 dispatchCountStat++;
648
649 return inst;
650 }
651
652 template <class Impl>
653 void
654 FrontEnd<Impl>::processCacheCompletion(MemReqPtr &req)
655 {
656 DPRINTF(FE, "Processing cache completion\n");
657
658 // Do something here.
659 if (status != IcacheMissStall ||
660 req != memReq ||
661 switchedOut) {
662 DPRINTF(FE, "Previous fetch was squashed.\n");
663 fetchIcacheSquashes++;
664 return;
665 }
666
667 status = IcacheMissComplete;
668
669 /* if (checkStall(tid)) {
670 fetchStatus[tid] = Blocked;
671 } else {
672 fetchStatus[tid] = IcacheMissComplete;
673 }
674 */
675 // memcpy(cacheData, memReq->data, memReq->size);
676
677 // Reset the completion event to NULL.
678 // memReq->completionEvent = NULL;
679 memReq = NULL;
680 }
681
682 template <class Impl>
683 void
684 FrontEnd<Impl>::addFreeRegs(int num_freed)
685 {
686 if (status == RenameBlocked && freeRegs + num_freed > 0) {
687 status = Running;
688 }
689
690 DPRINTF(FE, "Adding %i freed registers\n", num_freed);
691
692 freeRegs+= num_freed;
693
694 // assert(freeRegs <= numPhysRegs);
695 if (freeRegs > numPhysRegs)
696 freeRegs = numPhysRegs;
697 }
698
699 template <class Impl>
700 bool
701 FrontEnd<Impl>::updateStatus()
702 {
703 bool serialize_block = !backEnd->robEmpty() || instBufferSize;
704 bool be_block = cpu->decoupledFrontEnd ? false : backEnd->isBlocked();
705 bool ret_val = false;
706
707 if (status == SerializeBlocked && !serialize_block) {
708 status = SerializeComplete;
709 ret_val = true;
710 }
711
712 if (status == BEBlocked && !be_block) {
713 if (barrierInst) {
714 status = SerializeBlocked;
715 } else {
716 status = Running;
717 }
718 ret_val = true;
719 }
720 return ret_val;
721 }
722
723 template <class Impl>
724 void
725 FrontEnd<Impl>::checkBE()
726 {
727 bool be_block = cpu->decoupledFrontEnd ? false : backEnd->isBlocked();
728 if (be_block) {
729 if (status == Running || status == Idle) {
730 status = BEBlocked;
731 }
732 }
733 }
734
735 template <class Impl>
736 typename Impl::DynInstPtr
737 FrontEnd<Impl>::getInstFromCacheline()
738 {
739 if (status == SerializeComplete) {
740 DynInstPtr inst = barrierInst;
741 status = Running;
742 barrierInst = NULL;
743 inst->clearSerializeBefore();
744 return inst;
745 }
746
747 InstSeqNum inst_seq;
748 MachInst inst;
749 // @todo: Fix this magic number used here to handle word offset (and
750 // getting rid of PAL bit)
751 unsigned offset = (PC & cacheBlkMask) & ~3;
752
753 // PC of inst is not in this cache block
754 if (PC >= (cacheBlkPC + cacheBlkSize) || PC < cacheBlkPC || !cacheBlkValid) {
755 return NULL;
756 }
757
758 //////////////////////////
759 // Fetch one instruction
760 //////////////////////////
761
762 // Get a sequence number.
763 inst_seq = getAndIncrementInstSeq();
764
765 // Make sure this is a valid index.
766 assert(offset <= cacheBlkSize - sizeof(MachInst));
767
768 // Get the instruction from the array of the cache line.
769 inst = htog(*reinterpret_cast<MachInst *>(&cacheData[offset]));
770
771 ExtMachInst decode_inst = TheISA::makeExtMI(inst, PC);
772
773 // Create a new DynInst from the instruction fetched.
774 DynInstPtr instruction = new DynInst(decode_inst, PC, PC+sizeof(MachInst),
775 inst_seq, cpu);
776
777 instruction->setState(thread);
778
779 DPRINTF(FE, "Instruction [sn:%lli] created, with PC %#x\n%s\n",
780 inst_seq, instruction->readPC(),
781 instruction->staticInst->disassemble(PC));
782
783 instruction->traceData =
784 Trace::getInstRecord(curTick, tc, cpu,
785 instruction->staticInst,
786 instruction->readPC(), 0);
787
788 // Increment stat of fetched instructions.
789 ++fetchedInsts;
790
791 return instruction;
792 }
793
794 template <class Impl>
795 void
796 FrontEnd<Impl>::renameInst(DynInstPtr &inst)
797 {
798 DynInstPtr src_inst = NULL;
799 int num_src_regs = inst->numSrcRegs();
800 if (num_src_regs == 0) {
801 inst->setCanIssue();
802 } else {
803 for (int i = 0; i < num_src_regs; ++i) {
804 src_inst = renameTable[inst->srcRegIdx(i)];
805
806 inst->setSrcInst(src_inst, i);
807
808 DPRINTF(FE, "[sn:%lli]: Src reg %i is inst [sn:%lli]\n",
809 inst->seqNum, (int)inst->srcRegIdx(i), src_inst->seqNum);
810
811 if (src_inst->isResultReady()) {
812 DPRINTF(FE, "Reg ready.\n");
813 inst->markSrcRegReady(i);
814 } else {
815 DPRINTF(FE, "Adding to dependent list.\n");
816 src_inst->addDependent(inst);
817 }
818 }
819 }
820
821 for (int i = 0; i < inst->numDestRegs(); ++i) {
822 RegIndex idx = inst->destRegIdx(i);
823
824 DPRINTF(FE, "Dest reg %i is now inst [sn:%lli], was previously "
825 "[sn:%lli]\n",
826 (int)inst->destRegIdx(i), inst->seqNum,
827 renameTable[idx]->seqNum);
828
829 inst->setPrevDestInst(renameTable[idx], i);
830
831 renameTable[idx] = inst;
832 --freeRegs;
833 }
834 }
835
836 template <class Impl>
837 void
838 FrontEnd<Impl>::wakeFromQuiesce()
839 {
840 DPRINTF(FE, "Waking up from quiesce\n");
841 // Hopefully this is safe
842 status = Running;
843 }
844
845 template <class Impl>
846 void
847 FrontEnd<Impl>::switchOut()
848 {
849 switchedOut = true;
850 cpu->signalSwitched();
851 }
852
853 template <class Impl>
854 void
855 FrontEnd<Impl>::doSwitchOut()
856 {
857 memReq = NULL;
858 squash(0, 0);
859 instBuffer.clear();
860 instBufferSize = 0;
861 status = Idle;
862 }
863
864 template <class Impl>
865 void
866 FrontEnd<Impl>::takeOverFrom(ThreadContext *old_tc)
867 {
868 assert(freeRegs == numPhysRegs);
869 fetchCacheLineNextCycle = true;
870
871 cacheBlkValid = false;
872
873 #if !FULL_SYSTEM
874 // pTable = params->pTable;
875 #endif
876 fetchFault = NoFault;
877 serializeNext = false;
878 barrierInst = NULL;
879 status = Running;
880 switchedOut = false;
881 interruptPending = false;
882 }
883
884 template <class Impl>
885 void
886 FrontEnd<Impl>::dumpInsts()
887 {
888 cprintf("instBuffer size: %i\n", instBuffer.size());
889
890 InstBuffIt buff_it = instBuffer.begin();
891
892 for (int num = 0; buff_it != instBuffer.end(); num++) {
893 cprintf("Instruction:%i\nPC:%#x\n[tid:%i]\n[sn:%lli]\nIssued:%i\n"
894 "Squashed:%i\n\n",
895 num, (*buff_it)->readPC(), (*buff_it)->threadNumber,
896 (*buff_it)->seqNum, (*buff_it)->isIssued(),
897 (*buff_it)->isSquashed());
898 buff_it++;
899 }
900 }
901
902 template <class Impl>
903 FrontEnd<Impl>::ICacheCompletionEvent::ICacheCompletionEvent(MemReqPtr &_req, FrontEnd *fe)
904 : Event(&mainEventQueue, Delayed_Writeback_Pri), req(_req), frontEnd(fe)
905 {
906 this->setFlags(Event::AutoDelete);
907 }
908
909 template <class Impl>
910 void
911 FrontEnd<Impl>::ICacheCompletionEvent::process()
912 {
913 frontEnd->processCacheCompletion(req);
914 }
915
916 template <class Impl>
917 const char *
918 FrontEnd<Impl>::ICacheCompletionEvent::description()
919 {
920 return "ICache completion event";
921 }