Merge vm1.(none):/home/stever/bk/newmem
[gem5.git] / src / cpu / ozone / front_end_impl.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31 #include "arch/faults.hh"
32 #include "arch/isa_traits.hh"
33 #include "base/statistics.hh"
34 #include "cpu/thread_context.hh"
35 #include "cpu/exetrace.hh"
36 #include "cpu/ozone/front_end.hh"
37 #include "mem/mem_interface.hh"
38 #include "sim/byte_swap.hh"
39
40 using namespace TheISA;
41
42 template <class Impl>
43 FrontEnd<Impl>::FrontEnd(Params *params)
44 : branchPred(params),
45 icacheInterface(params->icacheInterface),
46 instBufferSize(0),
47 maxInstBufferSize(params->maxInstBufferSize),
48 width(params->frontEndWidth),
49 freeRegs(params->numPhysicalRegs),
50 numPhysRegs(params->numPhysicalRegs),
51 serializeNext(false),
52 interruptPending(false)
53 {
54 switchedOut = false;
55
56 status = Idle;
57
58 memReq = NULL;
59 // Size of cache block.
60 cacheBlkSize = icacheInterface ? icacheInterface->getBlockSize() : 64;
61
62 assert(isPowerOf2(cacheBlkSize));
63
64 // Create mask to get rid of offset bits.
65 cacheBlkMask = (cacheBlkSize - 1);
66
67 // Create space to store a cache line.
68 cacheData = new uint8_t[cacheBlkSize];
69
70 fetchCacheLineNextCycle = true;
71
72 cacheBlkValid = false;
73
74 #if !FULL_SYSTEM
75 // pTable = params->pTable;
76 #endif
77 fetchFault = NoFault;
78 }
79
80 template <class Impl>
81 std::string
82 FrontEnd<Impl>::name() const
83 {
84 return cpu->name() + ".frontend";
85 }
86
87 template <class Impl>
88 void
89 FrontEnd<Impl>::setCommBuffer(TimeBuffer<CommStruct> *_comm)
90 {
91 comm = _comm;
92 // @todo: Hardcoded for now. Allow this to be set by a latency.
93 fromCommit = comm->getWire(-1);
94 }
95
96 template <class Impl>
97 void
98 FrontEnd<Impl>::setTC(ThreadContext *tc_ptr)
99 {
100 tc = tc_ptr;
101 }
102
103 template <class Impl>
104 void
105 FrontEnd<Impl>::regStats()
106 {
107 icacheStallCycles
108 .name(name() + ".icacheStallCycles")
109 .desc("Number of cycles fetch is stalled on an Icache miss")
110 .prereq(icacheStallCycles);
111
112 fetchedInsts
113 .name(name() + ".fetchedInsts")
114 .desc("Number of instructions fetch has processed")
115 .prereq(fetchedInsts);
116
117 fetchedBranches
118 .name(name() + ".fetchedBranches")
119 .desc("Number of fetched branches")
120 .prereq(fetchedBranches);
121
122 predictedBranches
123 .name(name() + ".predictedBranches")
124 .desc("Number of branches that fetch has predicted taken")
125 .prereq(predictedBranches);
126
127 fetchCycles
128 .name(name() + ".fetchCycles")
129 .desc("Number of cycles fetch has run and was not squashing or"
130 " blocked")
131 .prereq(fetchCycles);
132
133 fetchIdleCycles
134 .name(name() + ".fetchIdleCycles")
135 .desc("Number of cycles fetch was idle")
136 .prereq(fetchIdleCycles);
137
138 fetchSquashCycles
139 .name(name() + ".fetchSquashCycles")
140 .desc("Number of cycles fetch has spent squashing")
141 .prereq(fetchSquashCycles);
142
143 fetchBlockedCycles
144 .name(name() + ".fetchBlockedCycles")
145 .desc("Number of cycles fetch has spent blocked")
146 .prereq(fetchBlockedCycles);
147
148 fetchedCacheLines
149 .name(name() + ".fetchedCacheLines")
150 .desc("Number of cache lines fetched")
151 .prereq(fetchedCacheLines);
152
153 fetchIcacheSquashes
154 .name(name() + ".fetchIcacheSquashes")
155 .desc("Number of outstanding Icache misses that were squashed")
156 .prereq(fetchIcacheSquashes);
157
158 fetchNisnDist
159 .init(/* base value */ 0,
160 /* last value */ width,
161 /* bucket size */ 1)
162 .name(name() + ".rateDist")
163 .desc("Number of instructions fetched each cycle (Total)")
164 .flags(Stats::pdf);
165
166 idleRate
167 .name(name() + ".idleRate")
168 .desc("Percent of cycles fetch was idle")
169 .prereq(idleRate);
170 idleRate = fetchIdleCycles * 100 / cpu->numCycles;
171
172 branchRate
173 .name(name() + ".branchRate")
174 .desc("Number of branch fetches per cycle")
175 .flags(Stats::total);
176 branchRate = fetchedBranches / cpu->numCycles;
177
178 fetchRate
179 .name(name() + ".rate")
180 .desc("Number of inst fetches per cycle")
181 .flags(Stats::total);
182 fetchRate = fetchedInsts / cpu->numCycles;
183
184 IFQCount
185 .name(name() + ".IFQ:count")
186 .desc("cumulative IFQ occupancy")
187 ;
188
189 IFQFcount
190 .name(name() + ".IFQ:fullCount")
191 .desc("cumulative IFQ full count")
192 .flags(Stats::total)
193 ;
194
195 IFQOccupancy
196 .name(name() + ".IFQ:occupancy")
197 .desc("avg IFQ occupancy (inst's)")
198 ;
199 IFQOccupancy = IFQCount / cpu->numCycles;
200
201 IFQLatency
202 .name(name() + ".IFQ:latency")
203 .desc("avg IFQ occupant latency (cycle's)")
204 .flags(Stats::total)
205 ;
206
207 IFQFullRate
208 .name(name() + ".IFQ:fullRate")
209 .desc("fraction of time (cycles) IFQ was full")
210 .flags(Stats::total);
211 ;
212 IFQFullRate = IFQFcount * Stats::constant(100) / cpu->numCycles;
213
214 dispatchCountStat
215 .name(name() + ".DIS:count")
216 .desc("cumulative count of dispatched insts")
217 .flags(Stats::total)
218 ;
219
220 dispatchedSerializing
221 .name(name() + ".DIS:serializingInsts")
222 .desc("count of serializing insts dispatched")
223 .flags(Stats::total)
224 ;
225
226 dispatchedTempSerializing
227 .name(name() + ".DIS:tempSerializingInsts")
228 .desc("count of temporary serializing insts dispatched")
229 .flags(Stats::total)
230 ;
231
232 dispatchSerializeStallCycles
233 .name(name() + ".DIS:serializeStallCycles")
234 .desc("count of cycles dispatch stalled for serializing inst")
235 .flags(Stats::total)
236 ;
237
238 dispatchRate
239 .name(name() + ".DIS:rate")
240 .desc("dispatched insts per cycle")
241 .flags(Stats::total)
242 ;
243 dispatchRate = dispatchCountStat / cpu->numCycles;
244
245 regIntFull
246 .name(name() + ".REG:int:full")
247 .desc("number of cycles where there were no INT registers")
248 ;
249
250 regFpFull
251 .name(name() + ".REG:fp:full")
252 .desc("number of cycles where there were no FP registers")
253 ;
254 IFQLatency = IFQOccupancy / dispatchRate;
255
256 branchPred.regStats();
257 }
258
259 template <class Impl>
260 void
261 FrontEnd<Impl>::tick()
262 {
263 if (switchedOut)
264 return;
265
266 // @todo: Maybe I want to just have direct communication...
267 if (fromCommit->doneSeqNum) {
268 branchPred.update(fromCommit->doneSeqNum, 0);
269 }
270
271 IFQCount += instBufferSize;
272 IFQFcount += instBufferSize == maxInstBufferSize;
273
274 // Fetch cache line
275 if (status == IcacheMissComplete) {
276 cacheBlkValid = true;
277
278 status = Running;
279 if (barrierInst)
280 status = SerializeBlocked;
281 if (freeRegs <= 0)
282 status = RenameBlocked;
283 checkBE();
284 } else if (status == IcacheMissStall) {
285 DPRINTF(FE, "Still in Icache miss stall.\n");
286 icacheStallCycles++;
287 return;
288 }
289
290 if (status == RenameBlocked || status == SerializeBlocked ||
291 status == TrapPending || status == BEBlocked) {
292 // Will cause a one cycle bubble between changing state and
293 // restarting.
294 DPRINTF(FE, "In blocked status.\n");
295
296 fetchBlockedCycles++;
297
298 if (status == SerializeBlocked) {
299 dispatchSerializeStallCycles++;
300 }
301 updateStatus();
302 return;
303 } else if (status == QuiescePending) {
304 DPRINTF(FE, "Waiting for quiesce to execute or get squashed.\n");
305 return;
306 } else if (status != IcacheMissComplete) {
307 if (fetchCacheLineNextCycle) {
308 Fault fault = fetchCacheLine();
309 if (fault != NoFault) {
310 handleFault(fault);
311 fetchFault = fault;
312 return;
313 }
314 fetchCacheLineNextCycle = false;
315 }
316 // If miss, stall until it returns.
317 if (status == IcacheMissStall) {
318 // Tell CPU to not tick me for now.
319 return;
320 }
321 }
322
323 fetchCycles++;
324
325 int num_inst = 0;
326
327 // Otherwise loop and process instructions.
328 // One way to hack infinite width is to set width and maxInstBufferSize
329 // both really high. Inelegant, but probably will work.
330 while (num_inst < width &&
331 instBufferSize < maxInstBufferSize) {
332 // Get instruction from cache line.
333 DynInstPtr inst = getInstFromCacheline();
334
335 if (!inst) {
336 // PC is no longer in the cache line, end fetch.
337 // Might want to check this at the end of the cycle so that
338 // there's no cycle lost to checking for a new cache line.
339 DPRINTF(FE, "Need to get new cache line\n");
340 fetchCacheLineNextCycle = true;
341 break;
342 }
343
344 processInst(inst);
345
346 if (status == SerializeBlocked) {
347 break;
348 }
349
350 // Possibly push into a time buffer that estimates the front end
351 // latency
352 instBuffer.push_back(inst);
353 ++instBufferSize;
354 ++num_inst;
355
356 #if FULL_SYSTEM
357 if (inst->isQuiesce()) {
358 warn("%lli: Quiesce instruction encountered, halting fetch!", curTick);
359 status = QuiescePending;
360 break;
361 }
362 #endif
363
364 if (inst->predTaken()) {
365 // Start over with tick?
366 break;
367 } else if (freeRegs <= 0) {
368 DPRINTF(FE, "Ran out of free registers to rename to!\n");
369 status = RenameBlocked;
370 break;
371 } else if (serializeNext) {
372 break;
373 }
374 }
375
376 fetchNisnDist.sample(num_inst);
377 checkBE();
378
379 DPRINTF(FE, "Num insts processed: %i, Inst Buffer size: %i, Free "
380 "Regs %i\n", num_inst, instBufferSize, freeRegs);
381 }
382
383 template <class Impl>
384 Fault
385 FrontEnd<Impl>::fetchCacheLine()
386 {
387 // Read a cache line, based on the current PC.
388 #if FULL_SYSTEM
389 // Flag to say whether or not address is physical addr.
390 unsigned flags = cpu->inPalMode(PC) ? PHYSICAL : 0;
391 #else
392 unsigned flags = 0;
393 #endif // FULL_SYSTEM
394 Fault fault = NoFault;
395
396 if (interruptPending && flags == 0) {
397 return fault;
398 }
399
400 // Align the fetch PC so it's at the start of a cache block.
401 Addr fetch_PC = icacheBlockAlignPC(PC);
402
403 DPRINTF(FE, "Fetching cache line starting at %#x.\n", fetch_PC);
404
405 // Setup the memReq to do a read of the first isntruction's address.
406 // Set the appropriate read size and flags as well.
407 memReq = new MemReq();
408
409 memReq->asid = 0;
410 memReq->thread_num = 0;
411 memReq->data = new uint8_t[64];
412 memReq->tc = tc;
413 memReq->cmd = Read;
414 memReq->reset(fetch_PC, cacheBlkSize, flags);
415
416 // Translate the instruction request.
417 fault = cpu->translateInstReq(memReq);
418
419 // Now do the timing access to see whether or not the instruction
420 // exists within the cache.
421 if (icacheInterface && fault == NoFault) {
422 #if FULL_SYSTEM
423 if (cpu->system->memctrl->badaddr(memReq->paddr) ||
424 memReq->flags & UNCACHEABLE) {
425 DPRINTF(FE, "Fetch: Bad address %#x (hopefully on a "
426 "misspeculating path!",
427 memReq->paddr);
428 return TheISA::genMachineCheckFault();
429 }
430 #endif
431
432 memReq->completionEvent = NULL;
433
434 memReq->time = curTick;
435 fault = cpu->mem->read(memReq, cacheData);
436
437 MemAccessResult res = icacheInterface->access(memReq);
438
439 // If the cache missed then schedule an event to wake
440 // up this stage once the cache miss completes.
441 if (icacheInterface->doEvents() && res != MA_HIT) {
442 memReq->completionEvent = new ICacheCompletionEvent(memReq, this);
443
444 status = IcacheMissStall;
445
446 cacheBlkValid = false;
447
448 DPRINTF(FE, "Cache miss.\n");
449 } else {
450 DPRINTF(FE, "Cache hit.\n");
451
452 cacheBlkValid = true;
453
454 // memcpy(cacheData, memReq->data, memReq->size);
455 }
456 }
457
458 // Note that this will set the cache block PC a bit earlier than it should
459 // be set.
460 cacheBlkPC = fetch_PC;
461
462 ++fetchedCacheLines;
463
464 DPRINTF(FE, "Done fetching cache line.\n");
465
466 return fault;
467 }
468
469 template <class Impl>
470 void
471 FrontEnd<Impl>::processInst(DynInstPtr &inst)
472 {
473 if (processBarriers(inst)) {
474 return;
475 }
476
477 Addr inst_PC = inst->readPC();
478
479 if (!inst->isControl()) {
480 inst->setPredTarg(inst->readNextPC());
481 } else {
482 fetchedBranches++;
483 if (branchPred.predict(inst, inst_PC, inst->threadNumber)) {
484 predictedBranches++;
485 }
486 }
487
488 Addr next_PC = inst->readPredTarg();
489
490 DPRINTF(FE, "[sn:%lli] Predicted and processed inst PC %#x, next PC "
491 "%#x\n", inst->seqNum, inst_PC, next_PC);
492
493 // inst->setNextPC(next_PC);
494
495 // Not sure where I should set this
496 PC = next_PC;
497
498 renameInst(inst);
499 }
500
501 template <class Impl>
502 bool
503 FrontEnd<Impl>::processBarriers(DynInstPtr &inst)
504 {
505 if (serializeNext) {
506 inst->setSerializeBefore();
507 serializeNext = false;
508 } else if (!inst->isSerializing() &&
509 !inst->isIprAccess() &&
510 !inst->isStoreConditional()) {
511 return false;
512 }
513
514 if ((inst->isIprAccess() || inst->isSerializeBefore()) &&
515 !inst->isSerializeHandled()) {
516 DPRINTF(FE, "Serialize before instruction encountered.\n");
517
518 if (!inst->isTempSerializeBefore()) {
519 dispatchedSerializing++;
520 inst->setSerializeHandled();
521 } else {
522 dispatchedTempSerializing++;
523 }
524
525 // Change status over to SerializeBlocked so that other stages know
526 // what this is blocked on.
527 status = SerializeBlocked;
528
529 barrierInst = inst;
530 return true;
531 } else if ((inst->isStoreConditional() || inst->isSerializeAfter())
532 && !inst->isSerializeHandled()) {
533 DPRINTF(FE, "Serialize after instruction encountered.\n");
534
535 inst->setSerializeHandled();
536
537 dispatchedSerializing++;
538
539 serializeNext = true;
540 return false;
541 }
542 return false;
543 }
544
545 template <class Impl>
546 void
547 FrontEnd<Impl>::handleFault(Fault &fault)
548 {
549 DPRINTF(FE, "Fault at fetch, telling commit\n");
550
551 // We're blocked on the back end until it handles this fault.
552 status = TrapPending;
553
554 // Get a sequence number.
555 InstSeqNum inst_seq = getAndIncrementInstSeq();
556 // We will use a nop in order to carry the fault.
557 ExtMachInst ext_inst = TheISA::NoopMachInst;
558
559 // Create a new DynInst from the dummy nop.
560 DynInstPtr instruction = new DynInst(ext_inst, PC,
561 PC+sizeof(MachInst),
562 inst_seq, cpu);
563 instruction->setPredTarg(instruction->readNextPC());
564 // instruction->setThread(tid);
565
566 // instruction->setASID(tid);
567
568 instruction->setState(thread);
569
570 instruction->traceData = NULL;
571
572 instruction->fault = fault;
573 instruction->setCanIssue();
574 instBuffer.push_back(instruction);
575 ++instBufferSize;
576 }
577
578 template <class Impl>
579 void
580 FrontEnd<Impl>::squash(const InstSeqNum &squash_num, const Addr &next_PC,
581 const bool is_branch, const bool branch_taken)
582 {
583 DPRINTF(FE, "Squashing from [sn:%lli], setting PC to %#x\n",
584 squash_num, next_PC);
585
586 if (fetchFault != NoFault)
587 fetchFault = NoFault;
588
589 while (!instBuffer.empty() &&
590 instBuffer.back()->seqNum > squash_num) {
591 DynInstPtr inst = instBuffer.back();
592
593 DPRINTF(FE, "Squashing instruction [sn:%lli] PC %#x\n",
594 inst->seqNum, inst->readPC());
595
596 inst->clearDependents();
597
598 instBuffer.pop_back();
599 --instBufferSize;
600
601 freeRegs+= inst->numDestRegs();
602 }
603
604 // Copy over rename table from the back end.
605 renameTable.copyFrom(backEnd->renameTable);
606
607 PC = next_PC;
608
609 // Update BP with proper information.
610 if (is_branch) {
611 branchPred.squash(squash_num, next_PC, branch_taken, 0);
612 } else {
613 branchPred.squash(squash_num, 0);
614 }
615
616 // Clear the icache miss if it's outstanding.
617 if (status == IcacheMissStall && icacheInterface) {
618 DPRINTF(FE, "Squashing outstanding Icache miss.\n");
619 memReq = NULL;
620 }
621
622 if (status == SerializeBlocked) {
623 assert(barrierInst->seqNum > squash_num);
624 barrierInst = NULL;
625 }
626
627 // Unless this squash originated from the front end, we're probably
628 // in running mode now.
629 // Actually might want to make this latency dependent.
630 status = Running;
631 fetchCacheLineNextCycle = true;
632 }
633
634 template <class Impl>
635 typename Impl::DynInstPtr
636 FrontEnd<Impl>::getInst()
637 {
638 if (instBufferSize == 0) {
639 return NULL;
640 }
641
642 DynInstPtr inst = instBuffer.front();
643
644 instBuffer.pop_front();
645
646 --instBufferSize;
647
648 dispatchCountStat++;
649
650 return inst;
651 }
652
653 template <class Impl>
654 void
655 FrontEnd<Impl>::processCacheCompletion(MemReqPtr &req)
656 {
657 DPRINTF(FE, "Processing cache completion\n");
658
659 // Do something here.
660 if (status != IcacheMissStall ||
661 req != memReq ||
662 switchedOut) {
663 DPRINTF(FE, "Previous fetch was squashed.\n");
664 fetchIcacheSquashes++;
665 return;
666 }
667
668 status = IcacheMissComplete;
669
670 /* if (checkStall(tid)) {
671 fetchStatus[tid] = Blocked;
672 } else {
673 fetchStatus[tid] = IcacheMissComplete;
674 }
675 */
676 // memcpy(cacheData, memReq->data, memReq->size);
677
678 // Reset the completion event to NULL.
679 // memReq->completionEvent = NULL;
680 memReq = NULL;
681 }
682
683 template <class Impl>
684 void
685 FrontEnd<Impl>::addFreeRegs(int num_freed)
686 {
687 if (status == RenameBlocked && freeRegs + num_freed > 0) {
688 status = Running;
689 }
690
691 DPRINTF(FE, "Adding %i freed registers\n", num_freed);
692
693 freeRegs+= num_freed;
694
695 // assert(freeRegs <= numPhysRegs);
696 if (freeRegs > numPhysRegs)
697 freeRegs = numPhysRegs;
698 }
699
700 template <class Impl>
701 bool
702 FrontEnd<Impl>::updateStatus()
703 {
704 bool serialize_block = !backEnd->robEmpty() || instBufferSize;
705 bool be_block = cpu->decoupledFrontEnd ? false : backEnd->isBlocked();
706 bool ret_val = false;
707
708 if (status == SerializeBlocked && !serialize_block) {
709 status = SerializeComplete;
710 ret_val = true;
711 }
712
713 if (status == BEBlocked && !be_block) {
714 if (barrierInst) {
715 status = SerializeBlocked;
716 } else {
717 status = Running;
718 }
719 ret_val = true;
720 }
721 return ret_val;
722 }
723
724 template <class Impl>
725 void
726 FrontEnd<Impl>::checkBE()
727 {
728 bool be_block = cpu->decoupledFrontEnd ? false : backEnd->isBlocked();
729 if (be_block) {
730 if (status == Running || status == Idle) {
731 status = BEBlocked;
732 }
733 }
734 }
735
736 template <class Impl>
737 typename Impl::DynInstPtr
738 FrontEnd<Impl>::getInstFromCacheline()
739 {
740 if (status == SerializeComplete) {
741 DynInstPtr inst = barrierInst;
742 status = Running;
743 barrierInst = NULL;
744 inst->clearSerializeBefore();
745 return inst;
746 }
747
748 InstSeqNum inst_seq;
749 MachInst inst;
750 // @todo: Fix this magic number used here to handle word offset (and
751 // getting rid of PAL bit)
752 unsigned offset = (PC & cacheBlkMask) & ~3;
753
754 // PC of inst is not in this cache block
755 if (PC >= (cacheBlkPC + cacheBlkSize) || PC < cacheBlkPC || !cacheBlkValid) {
756 return NULL;
757 }
758
759 //////////////////////////
760 // Fetch one instruction
761 //////////////////////////
762
763 // Get a sequence number.
764 inst_seq = getAndIncrementInstSeq();
765
766 // Make sure this is a valid index.
767 assert(offset <= cacheBlkSize - sizeof(MachInst));
768
769 // Get the instruction from the array of the cache line.
770 inst = htog(*reinterpret_cast<MachInst *>(&cacheData[offset]));
771
772 ExtMachInst decode_inst = TheISA::makeExtMI(inst, PC);
773
774 // Create a new DynInst from the instruction fetched.
775 DynInstPtr instruction = new DynInst(decode_inst, PC, PC+sizeof(MachInst),
776 inst_seq, cpu);
777
778 instruction->setState(thread);
779
780 DPRINTF(FE, "Instruction [sn:%lli] created, with PC %#x\n%s\n",
781 inst_seq, instruction->readPC(),
782 instruction->staticInst->disassemble(PC));
783
784 instruction->traceData =
785 Trace::getInstRecord(curTick, tc, cpu,
786 instruction->staticInst,
787 instruction->readPC(), 0);
788
789 // Increment stat of fetched instructions.
790 ++fetchedInsts;
791
792 return instruction;
793 }
794
795 template <class Impl>
796 void
797 FrontEnd<Impl>::renameInst(DynInstPtr &inst)
798 {
799 DynInstPtr src_inst = NULL;
800 int num_src_regs = inst->numSrcRegs();
801 if (num_src_regs == 0) {
802 inst->setCanIssue();
803 } else {
804 for (int i = 0; i < num_src_regs; ++i) {
805 src_inst = renameTable[inst->srcRegIdx(i)];
806
807 inst->setSrcInst(src_inst, i);
808
809 DPRINTF(FE, "[sn:%lli]: Src reg %i is inst [sn:%lli]\n",
810 inst->seqNum, (int)inst->srcRegIdx(i), src_inst->seqNum);
811
812 if (src_inst->isResultReady()) {
813 DPRINTF(FE, "Reg ready.\n");
814 inst->markSrcRegReady(i);
815 } else {
816 DPRINTF(FE, "Adding to dependent list.\n");
817 src_inst->addDependent(inst);
818 }
819 }
820 }
821
822 for (int i = 0; i < inst->numDestRegs(); ++i) {
823 RegIndex idx = inst->destRegIdx(i);
824
825 DPRINTF(FE, "Dest reg %i is now inst [sn:%lli], was previously "
826 "[sn:%lli]\n",
827 (int)inst->destRegIdx(i), inst->seqNum,
828 renameTable[idx]->seqNum);
829
830 inst->setPrevDestInst(renameTable[idx], i);
831
832 renameTable[idx] = inst;
833 --freeRegs;
834 }
835 }
836
837 template <class Impl>
838 void
839 FrontEnd<Impl>::wakeFromQuiesce()
840 {
841 DPRINTF(FE, "Waking up from quiesce\n");
842 // Hopefully this is safe
843 status = Running;
844 }
845
846 template <class Impl>
847 void
848 FrontEnd<Impl>::switchOut()
849 {
850 switchedOut = true;
851 cpu->signalSwitched();
852 }
853
854 template <class Impl>
855 void
856 FrontEnd<Impl>::doSwitchOut()
857 {
858 memReq = NULL;
859 squash(0, 0);
860 instBuffer.clear();
861 instBufferSize = 0;
862 status = Idle;
863 }
864
865 template <class Impl>
866 void
867 FrontEnd<Impl>::takeOverFrom(ThreadContext *old_tc)
868 {
869 assert(freeRegs == numPhysRegs);
870 fetchCacheLineNextCycle = true;
871
872 cacheBlkValid = false;
873
874 #if !FULL_SYSTEM
875 // pTable = params->pTable;
876 #endif
877 fetchFault = NoFault;
878 serializeNext = false;
879 barrierInst = NULL;
880 status = Running;
881 switchedOut = false;
882 interruptPending = false;
883 }
884
885 template <class Impl>
886 void
887 FrontEnd<Impl>::dumpInsts()
888 {
889 cprintf("instBuffer size: %i\n", instBuffer.size());
890
891 InstBuffIt buff_it = instBuffer.begin();
892
893 for (int num = 0; buff_it != instBuffer.end(); num++) {
894 cprintf("Instruction:%i\nPC:%#x\n[tid:%i]\n[sn:%lli]\nIssued:%i\n"
895 "Squashed:%i\n\n",
896 num, (*buff_it)->readPC(), (*buff_it)->threadNumber,
897 (*buff_it)->seqNum, (*buff_it)->isIssued(),
898 (*buff_it)->isSquashed());
899 buff_it++;
900 }
901 }
902
903 template <class Impl>
904 FrontEnd<Impl>::ICacheCompletionEvent::ICacheCompletionEvent(MemReqPtr &_req, FrontEnd *fe)
905 : Event(&mainEventQueue, Delayed_Writeback_Pri), req(_req), frontEnd(fe)
906 {
907 this->setFlags(Event::AutoDelete);
908 }
909
910 template <class Impl>
911 void
912 FrontEnd<Impl>::ICacheCompletionEvent::process()
913 {
914 frontEnd->processCacheCompletion(req);
915 }
916
917 template <class Impl>
918 const char *
919 FrontEnd<Impl>::ICacheCompletionEvent::description()
920 {
921 return "ICache completion event";
922 }