cpu, fastmodel: Remove the old getDTBPtr/getITBPtr virtual methods
[gem5.git] / src / cpu / o3 / fetch_impl.hh
1 /*
2 * Copyright (c) 2010-2014 ARM Limited
3 * Copyright (c) 2012-2013 AMD
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2004-2006 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #ifndef __CPU_O3_FETCH_IMPL_HH__
43 #define __CPU_O3_FETCH_IMPL_HH__
44
45 #include <algorithm>
46 #include <cstring>
47 #include <list>
48 #include <map>
49 #include <queue>
50
51 #include "arch/generic/tlb.hh"
52 #include "arch/utility.hh"
53 #include "base/random.hh"
54 #include "base/types.hh"
55 #include "config/the_isa.hh"
56 #include "cpu/base.hh"
57 #include "cpu/o3/cpu.hh"
58 #include "cpu/o3/fetch.hh"
59 #include "cpu/exetrace.hh"
60 #include "debug/Activity.hh"
61 #include "debug/Drain.hh"
62 #include "debug/Fetch.hh"
63 #include "debug/O3CPU.hh"
64 #include "debug/O3PipeView.hh"
65 #include "mem/packet.hh"
66 #include "params/DerivO3CPU.hh"
67 #include "sim/byteswap.hh"
68 #include "sim/core.hh"
69 #include "sim/eventq.hh"
70 #include "sim/full_system.hh"
71 #include "sim/system.hh"
72 #include "cpu/o3/isa_specific.hh"
73
74 using namespace std;
75
76 template<class Impl>
77 DefaultFetch<Impl>::DefaultFetch(O3CPU *_cpu, const DerivO3CPUParams &params)
78 : fetchPolicy(params.smtFetchPolicy),
79 cpu(_cpu),
80 branchPred(nullptr),
81 decodeToFetchDelay(params.decodeToFetchDelay),
82 renameToFetchDelay(params.renameToFetchDelay),
83 iewToFetchDelay(params.iewToFetchDelay),
84 commitToFetchDelay(params.commitToFetchDelay),
85 fetchWidth(params.fetchWidth),
86 decodeWidth(params.decodeWidth),
87 retryPkt(NULL),
88 retryTid(InvalidThreadID),
89 cacheBlkSize(cpu->cacheLineSize()),
90 fetchBufferSize(params.fetchBufferSize),
91 fetchBufferMask(fetchBufferSize - 1),
92 fetchQueueSize(params.fetchQueueSize),
93 numThreads(params.numThreads),
94 numFetchingThreads(params.smtNumFetchingThreads),
95 icachePort(this, _cpu),
96 finishTranslationEvent(this), fetchStats(_cpu, this)
97 {
98 if (numThreads > Impl::MaxThreads)
99 fatal("numThreads (%d) is larger than compiled limit (%d),\n"
100 "\tincrease MaxThreads in src/cpu/o3/impl.hh\n",
101 numThreads, static_cast<int>(Impl::MaxThreads));
102 if (fetchWidth > Impl::MaxWidth)
103 fatal("fetchWidth (%d) is larger than compiled limit (%d),\n"
104 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
105 fetchWidth, static_cast<int>(Impl::MaxWidth));
106 if (fetchBufferSize > cacheBlkSize)
107 fatal("fetch buffer size (%u bytes) is greater than the cache "
108 "block size (%u bytes)\n", fetchBufferSize, cacheBlkSize);
109 if (cacheBlkSize % fetchBufferSize)
110 fatal("cache block (%u bytes) is not a multiple of the "
111 "fetch buffer (%u bytes)\n", cacheBlkSize, fetchBufferSize);
112
113 // Get the size of an instruction.
114 instSize = sizeof(TheISA::MachInst);
115
116 for (int i = 0; i < Impl::MaxThreads; i++) {
117 fetchStatus[i] = Idle;
118 decoder[i] = nullptr;
119 pc[i] = 0;
120 fetchOffset[i] = 0;
121 macroop[i] = nullptr;
122 delayedCommit[i] = false;
123 memReq[i] = nullptr;
124 stalls[i] = {false, false};
125 fetchBuffer[i] = NULL;
126 fetchBufferPC[i] = 0;
127 fetchBufferValid[i] = false;
128 lastIcacheStall[i] = 0;
129 issuePipelinedIfetch[i] = false;
130 }
131
132 branchPred = params.branchPred;
133
134 for (ThreadID tid = 0; tid < numThreads; tid++) {
135 decoder[tid] = new TheISA::Decoder(
136 dynamic_cast<TheISA::ISA *>(params.isa[tid]));
137 // Create space to buffer the cache line data,
138 // which may not hold the entire cache line.
139 fetchBuffer[tid] = new uint8_t[fetchBufferSize];
140 }
141 }
142
143 template <class Impl>
144 std::string
145 DefaultFetch<Impl>::name() const
146 {
147 return cpu->name() + ".fetch";
148 }
149
150 template <class Impl>
151 void
152 DefaultFetch<Impl>::regProbePoints()
153 {
154 ppFetch = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Fetch");
155 ppFetchRequestSent = new ProbePointArg<RequestPtr>(cpu->getProbeManager(),
156 "FetchRequest");
157
158 }
159
160 template <class Impl>
161 DefaultFetch<Impl>::
162 FetchStatGroup::FetchStatGroup(O3CPU *cpu, DefaultFetch *fetch)
163 : Stats::Group(cpu, "fetch"),
164 ADD_STAT(icacheStallCycles,
165 "Number of cycles fetch is stalled on an Icache miss"),
166 ADD_STAT(insts, "Number of instructions fetch has processed"),
167 ADD_STAT(branches, "Number of branches that fetch encountered"),
168 ADD_STAT(predictedBranches,
169 "Number of branches that fetch has predicted taken"),
170 ADD_STAT(cycles,
171 "Number of cycles fetch has run and was not squashing or blocked"),
172 ADD_STAT(squashCycles, "Number of cycles fetch has spent squashing"),
173 ADD_STAT(tlbCycles,
174 "Number of cycles fetch has spent waiting for tlb"),
175 ADD_STAT(idleCycles, "Number of cycles fetch was idle"),
176 ADD_STAT(blockedCycles, "Number of cycles fetch has spent blocked"),
177 ADD_STAT(miscStallCycles,
178 "Number of cycles fetch has spent waiting on interrupts,"
179 "or bad addresses, or out of MSHRs"),
180 ADD_STAT(pendingDrainCycles,
181 "Number of cycles fetch has spent waiting on pipes to drain"),
182 ADD_STAT(noActiveThreadStallCycles,
183 "Number of stall cycles due to no active thread to fetch from"),
184 ADD_STAT(pendingTrapStallCycles,
185 "Number of stall cycles due to pending traps"),
186 ADD_STAT(pendingQuiesceStallCycles,
187 "Number of stall cycles due to pending quiesce instructions"),
188 ADD_STAT(icacheWaitRetryStallCycles,
189 "Number of stall cycles due to full MSHR"),
190 ADD_STAT(cacheLines, "Number of cache lines fetched"),
191 ADD_STAT(icacheSquashes,
192 "Number of outstanding Icache misses that were squashed"),
193 ADD_STAT(tlbSquashes,
194 "Number of outstanding ITLB misses that were squashed"),
195 ADD_STAT(nisnDist,
196 "Number of instructions fetched each cycle (Total)"),
197 ADD_STAT(idleRate, "Percent of cycles fetch was idle",
198 idleCycles * 100 / cpu->numCycles),
199 ADD_STAT(branchRate, "Number of branch fetches per cycle",
200 branches / cpu->numCycles),
201 ADD_STAT(rate, "Number of inst fetches per cycle",
202 insts / cpu->numCycles)
203 {
204 icacheStallCycles
205 .prereq(icacheStallCycles);
206 insts
207 .prereq(insts);
208 branches
209 .prereq(branches);
210 predictedBranches
211 .prereq(predictedBranches);
212 cycles
213 .prereq(cycles);
214 squashCycles
215 .prereq(squashCycles);
216 tlbCycles
217 .prereq(tlbCycles);
218 idleCycles
219 .prereq(idleCycles);
220 blockedCycles
221 .prereq(blockedCycles);
222 cacheLines
223 .prereq(cacheLines);
224 miscStallCycles
225 .prereq(miscStallCycles);
226 pendingDrainCycles
227 .prereq(pendingDrainCycles);
228 noActiveThreadStallCycles
229 .prereq(noActiveThreadStallCycles);
230 pendingTrapStallCycles
231 .prereq(pendingTrapStallCycles);
232 pendingQuiesceStallCycles
233 .prereq(pendingQuiesceStallCycles);
234 icacheWaitRetryStallCycles
235 .prereq(icacheWaitRetryStallCycles);
236 icacheSquashes
237 .prereq(icacheSquashes);
238 tlbSquashes
239 .prereq(tlbSquashes);
240 nisnDist
241 .init(/* base value */ 0,
242 /* last value */ fetch->fetchWidth,
243 /* bucket size */ 1)
244 .flags(Stats::pdf);
245 idleRate
246 .prereq(idleRate);
247 branchRate
248 .flags(Stats::total);
249 rate
250 .flags(Stats::total);
251 }
252 template<class Impl>
253 void
254 DefaultFetch<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer)
255 {
256 timeBuffer = time_buffer;
257
258 // Create wires to get information from proper places in time buffer.
259 fromDecode = timeBuffer->getWire(-decodeToFetchDelay);
260 fromRename = timeBuffer->getWire(-renameToFetchDelay);
261 fromIEW = timeBuffer->getWire(-iewToFetchDelay);
262 fromCommit = timeBuffer->getWire(-commitToFetchDelay);
263 }
264
265 template<class Impl>
266 void
267 DefaultFetch<Impl>::setActiveThreads(std::list<ThreadID> *at_ptr)
268 {
269 activeThreads = at_ptr;
270 }
271
272 template<class Impl>
273 void
274 DefaultFetch<Impl>::setFetchQueue(TimeBuffer<FetchStruct> *ftb_ptr)
275 {
276 // Create wire to write information to proper place in fetch time buf.
277 toDecode = ftb_ptr->getWire(0);
278 }
279
280 template<class Impl>
281 void
282 DefaultFetch<Impl>::startupStage()
283 {
284 assert(priorityList.empty());
285 resetStage();
286
287 // Fetch needs to start fetching instructions at the very beginning,
288 // so it must start up in active state.
289 switchToActive();
290 }
291
292 template<class Impl>
293 void
294 DefaultFetch<Impl>::clearStates(ThreadID tid)
295 {
296 fetchStatus[tid] = Running;
297 pc[tid] = cpu->pcState(tid);
298 fetchOffset[tid] = 0;
299 macroop[tid] = NULL;
300 delayedCommit[tid] = false;
301 memReq[tid] = NULL;
302 stalls[tid].decode = false;
303 stalls[tid].drain = false;
304 fetchBufferPC[tid] = 0;
305 fetchBufferValid[tid] = false;
306 fetchQueue[tid].clear();
307
308 // TODO not sure what to do with priorityList for now
309 // priorityList.push_back(tid);
310 }
311
312 template<class Impl>
313 void
314 DefaultFetch<Impl>::resetStage()
315 {
316 numInst = 0;
317 interruptPending = false;
318 cacheBlocked = false;
319
320 priorityList.clear();
321
322 // Setup PC and nextPC with initial state.
323 for (ThreadID tid = 0; tid < numThreads; ++tid) {
324 fetchStatus[tid] = Running;
325 pc[tid] = cpu->pcState(tid);
326 fetchOffset[tid] = 0;
327 macroop[tid] = NULL;
328
329 delayedCommit[tid] = false;
330 memReq[tid] = NULL;
331
332 stalls[tid].decode = false;
333 stalls[tid].drain = false;
334
335 fetchBufferPC[tid] = 0;
336 fetchBufferValid[tid] = false;
337
338 fetchQueue[tid].clear();
339
340 priorityList.push_back(tid);
341 }
342
343 wroteToTimeBuffer = false;
344 _status = Inactive;
345 }
346
347 template<class Impl>
348 void
349 DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
350 {
351 ThreadID tid = cpu->contextToThread(pkt->req->contextId());
352
353 DPRINTF(Fetch, "[tid:%i] Waking up from cache miss.\n", tid);
354 assert(!cpu->switchedOut());
355
356 // Only change the status if it's still waiting on the icache access
357 // to return.
358 if (fetchStatus[tid] != IcacheWaitResponse ||
359 pkt->req != memReq[tid]) {
360 ++fetchStats.icacheSquashes;
361 delete pkt;
362 return;
363 }
364
365 memcpy(fetchBuffer[tid], pkt->getConstPtr<uint8_t>(), fetchBufferSize);
366 fetchBufferValid[tid] = true;
367
368 // Wake up the CPU (if it went to sleep and was waiting on
369 // this completion event).
370 cpu->wakeCPU();
371
372 DPRINTF(Activity, "[tid:%i] Activating fetch due to cache completion\n",
373 tid);
374
375 switchToActive();
376
377 // Only switch to IcacheAccessComplete if we're not stalled as well.
378 if (checkStall(tid)) {
379 fetchStatus[tid] = Blocked;
380 } else {
381 fetchStatus[tid] = IcacheAccessComplete;
382 }
383
384 pkt->req->setAccessLatency();
385 cpu->ppInstAccessComplete->notify(pkt);
386 // Reset the mem req to NULL.
387 delete pkt;
388 memReq[tid] = NULL;
389 }
390
391 template <class Impl>
392 void
393 DefaultFetch<Impl>::drainResume()
394 {
395 for (ThreadID i = 0; i < numThreads; ++i) {
396 stalls[i].decode = false;
397 stalls[i].drain = false;
398 }
399 }
400
401 template <class Impl>
402 void
403 DefaultFetch<Impl>::drainSanityCheck() const
404 {
405 assert(isDrained());
406 assert(retryPkt == NULL);
407 assert(retryTid == InvalidThreadID);
408 assert(!cacheBlocked);
409 assert(!interruptPending);
410
411 for (ThreadID i = 0; i < numThreads; ++i) {
412 assert(!memReq[i]);
413 assert(fetchStatus[i] == Idle || stalls[i].drain);
414 }
415
416 branchPred->drainSanityCheck();
417 }
418
419 template <class Impl>
420 bool
421 DefaultFetch<Impl>::isDrained() const
422 {
423 /* Make sure that threads are either idle of that the commit stage
424 * has signaled that draining has completed by setting the drain
425 * stall flag. This effectively forces the pipeline to be disabled
426 * until the whole system is drained (simulation may continue to
427 * drain other components).
428 */
429 for (ThreadID i = 0; i < numThreads; ++i) {
430 // Verify fetch queues are drained
431 if (!fetchQueue[i].empty())
432 return false;
433
434 // Return false if not idle or drain stalled
435 if (fetchStatus[i] != Idle) {
436 if (fetchStatus[i] == Blocked && stalls[i].drain)
437 continue;
438 else
439 return false;
440 }
441 }
442
443 /* The pipeline might start up again in the middle of the drain
444 * cycle if the finish translation event is scheduled, so make
445 * sure that's not the case.
446 */
447 return !finishTranslationEvent.scheduled();
448 }
449
450 template <class Impl>
451 void
452 DefaultFetch<Impl>::takeOverFrom()
453 {
454 assert(cpu->getInstPort().isConnected());
455 resetStage();
456
457 }
458
459 template <class Impl>
460 void
461 DefaultFetch<Impl>::drainStall(ThreadID tid)
462 {
463 assert(cpu->isDraining());
464 assert(!stalls[tid].drain);
465 DPRINTF(Drain, "%i: Thread drained.\n", tid);
466 stalls[tid].drain = true;
467 }
468
469 template <class Impl>
470 void
471 DefaultFetch<Impl>::wakeFromQuiesce()
472 {
473 DPRINTF(Fetch, "Waking up from quiesce\n");
474 // Hopefully this is safe
475 // @todo: Allow other threads to wake from quiesce.
476 fetchStatus[0] = Running;
477 }
478
479 template <class Impl>
480 inline void
481 DefaultFetch<Impl>::switchToActive()
482 {
483 if (_status == Inactive) {
484 DPRINTF(Activity, "Activating stage.\n");
485
486 cpu->activateStage(O3CPU::FetchIdx);
487
488 _status = Active;
489 }
490 }
491
492 template <class Impl>
493 inline void
494 DefaultFetch<Impl>::switchToInactive()
495 {
496 if (_status == Active) {
497 DPRINTF(Activity, "Deactivating stage.\n");
498
499 cpu->deactivateStage(O3CPU::FetchIdx);
500
501 _status = Inactive;
502 }
503 }
504
505 template <class Impl>
506 void
507 DefaultFetch<Impl>::deactivateThread(ThreadID tid)
508 {
509 // Update priority list
510 auto thread_it = std::find(priorityList.begin(), priorityList.end(), tid);
511 if (thread_it != priorityList.end()) {
512 priorityList.erase(thread_it);
513 }
514 }
515
516 template <class Impl>
517 bool
518 DefaultFetch<Impl>::lookupAndUpdateNextPC(
519 const DynInstPtr &inst, TheISA::PCState &nextPC)
520 {
521 // Do branch prediction check here.
522 // A bit of a misnomer...next_PC is actually the current PC until
523 // this function updates it.
524 bool predict_taken;
525
526 if (!inst->isControl()) {
527 TheISA::advancePC(nextPC, inst->staticInst);
528 inst->setPredTarg(nextPC);
529 inst->setPredTaken(false);
530 return false;
531 }
532
533 ThreadID tid = inst->threadNumber;
534 predict_taken = branchPred->predict(inst->staticInst, inst->seqNum,
535 nextPC, tid);
536
537 if (predict_taken) {
538 DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
539 "predicted to be taken to %s\n",
540 tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
541 } else {
542 DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
543 "predicted to be not taken\n",
544 tid, inst->seqNum, inst->pcState().instAddr());
545 }
546
547 DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
548 "predicted to go to %s\n",
549 tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
550 inst->setPredTarg(nextPC);
551 inst->setPredTaken(predict_taken);
552
553 ++fetchStats.branches;
554
555 if (predict_taken) {
556 ++fetchStats.predictedBranches;
557 }
558
559 return predict_taken;
560 }
561
562 template <class Impl>
563 bool
564 DefaultFetch<Impl>::fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc)
565 {
566 Fault fault = NoFault;
567
568 assert(!cpu->switchedOut());
569
570 // @todo: not sure if these should block translation.
571 //AlphaDep
572 if (cacheBlocked) {
573 DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, cache blocked\n",
574 tid);
575 return false;
576 } else if (checkInterrupt(pc) && !delayedCommit[tid]) {
577 // Hold off fetch from getting new instructions when:
578 // Cache is blocked, or
579 // while an interrupt is pending and we're not in PAL mode, or
580 // fetch is switched out.
581 DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, interrupt pending\n",
582 tid);
583 return false;
584 }
585
586 // Align the fetch address to the start of a fetch buffer segment.
587 Addr fetchBufferBlockPC = fetchBufferAlignPC(vaddr);
588
589 DPRINTF(Fetch, "[tid:%i] Fetching cache line %#x for addr %#x\n",
590 tid, fetchBufferBlockPC, vaddr);
591
592 // Setup the memReq to do a read of the first instruction's address.
593 // Set the appropriate read size and flags as well.
594 // Build request here.
595 RequestPtr mem_req = std::make_shared<Request>(
596 fetchBufferBlockPC, fetchBufferSize,
597 Request::INST_FETCH, cpu->instRequestorId(), pc,
598 cpu->thread[tid]->contextId());
599
600 mem_req->taskId(cpu->taskId());
601
602 memReq[tid] = mem_req;
603
604 // Initiate translation of the icache block
605 fetchStatus[tid] = ItlbWait;
606 FetchTranslation *trans = new FetchTranslation(this);
607 cpu->mmu->translateTiming(mem_req, cpu->thread[tid]->getTC(),
608 trans, BaseTLB::Execute);
609 return true;
610 }
611
612 template <class Impl>
613 void
614 DefaultFetch<Impl>::finishTranslation(const Fault &fault,
615 const RequestPtr &mem_req)
616 {
617 ThreadID tid = cpu->contextToThread(mem_req->contextId());
618 Addr fetchBufferBlockPC = mem_req->getVaddr();
619
620 assert(!cpu->switchedOut());
621
622 // Wake up CPU if it was idle
623 cpu->wakeCPU();
624
625 if (fetchStatus[tid] != ItlbWait || mem_req != memReq[tid] ||
626 mem_req->getVaddr() != memReq[tid]->getVaddr()) {
627 DPRINTF(Fetch, "[tid:%i] Ignoring itlb completed after squash\n",
628 tid);
629 ++fetchStats.tlbSquashes;
630 return;
631 }
632
633
634 // If translation was successful, attempt to read the icache block.
635 if (fault == NoFault) {
636 // Check that we're not going off into random memory
637 // If we have, just wait around for commit to squash something and put
638 // us on the right track
639 if (!cpu->system->isMemAddr(mem_req->getPaddr())) {
640 warn("Address %#x is outside of physical memory, stopping fetch\n",
641 mem_req->getPaddr());
642 fetchStatus[tid] = NoGoodAddr;
643 memReq[tid] = NULL;
644 return;
645 }
646
647 // Build packet here.
648 PacketPtr data_pkt = new Packet(mem_req, MemCmd::ReadReq);
649 data_pkt->dataDynamic(new uint8_t[fetchBufferSize]);
650
651 fetchBufferPC[tid] = fetchBufferBlockPC;
652 fetchBufferValid[tid] = false;
653 DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
654
655 fetchStats.cacheLines++;
656
657 // Access the cache.
658 if (!icachePort.sendTimingReq(data_pkt)) {
659 assert(retryPkt == NULL);
660 assert(retryTid == InvalidThreadID);
661 DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
662
663 fetchStatus[tid] = IcacheWaitRetry;
664 retryPkt = data_pkt;
665 retryTid = tid;
666 cacheBlocked = true;
667 } else {
668 DPRINTF(Fetch, "[tid:%i] Doing Icache access.\n", tid);
669 DPRINTF(Activity, "[tid:%i] Activity: Waiting on I-cache "
670 "response.\n", tid);
671 lastIcacheStall[tid] = curTick();
672 fetchStatus[tid] = IcacheWaitResponse;
673 // Notify Fetch Request probe when a packet containing a fetch
674 // request is successfully sent
675 ppFetchRequestSent->notify(mem_req);
676 }
677 } else {
678 // Don't send an instruction to decode if we can't handle it.
679 if (!(numInst < fetchWidth) || !(fetchQueue[tid].size() < fetchQueueSize)) {
680 assert(!finishTranslationEvent.scheduled());
681 finishTranslationEvent.setFault(fault);
682 finishTranslationEvent.setReq(mem_req);
683 cpu->schedule(finishTranslationEvent,
684 cpu->clockEdge(Cycles(1)));
685 return;
686 }
687 DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
688 tid, mem_req->getVaddr(), memReq[tid]->getVaddr());
689 // Translation faulted, icache request won't be sent.
690 memReq[tid] = NULL;
691
692 // Send the fault to commit. This thread will not do anything
693 // until commit handles the fault. The only other way it can
694 // wake up is if a squash comes along and changes the PC.
695 TheISA::PCState fetchPC = pc[tid];
696
697 DPRINTF(Fetch, "[tid:%i] Translation faulted, building noop.\n", tid);
698 // We will use a nop in ordier to carry the fault.
699 DynInstPtr instruction = buildInst(tid, StaticInst::nopStaticInstPtr,
700 NULL, fetchPC, fetchPC, false);
701 instruction->setNotAnInst();
702
703 instruction->setPredTarg(fetchPC);
704 instruction->fault = fault;
705 wroteToTimeBuffer = true;
706
707 DPRINTF(Activity, "Activity this cycle.\n");
708 cpu->activityThisCycle();
709
710 fetchStatus[tid] = TrapPending;
711
712 DPRINTF(Fetch, "[tid:%i] Blocked, need to handle the trap.\n", tid);
713 DPRINTF(Fetch, "[tid:%i] fault (%s) detected @ PC %s.\n",
714 tid, fault->name(), pc[tid]);
715 }
716 _status = updateFetchStatus();
717 }
718
719 template <class Impl>
720 inline void
721 DefaultFetch<Impl>::doSquash(const TheISA::PCState &newPC,
722 const DynInstPtr squashInst, ThreadID tid)
723 {
724 DPRINTF(Fetch, "[tid:%i] Squashing, setting PC to: %s.\n",
725 tid, newPC);
726
727 pc[tid] = newPC;
728 fetchOffset[tid] = 0;
729 if (squashInst && squashInst->pcState().instAddr() == newPC.instAddr())
730 macroop[tid] = squashInst->macroop;
731 else
732 macroop[tid] = NULL;
733 decoder[tid]->reset();
734
735 // Clear the icache miss if it's outstanding.
736 if (fetchStatus[tid] == IcacheWaitResponse) {
737 DPRINTF(Fetch, "[tid:%i] Squashing outstanding Icache miss.\n",
738 tid);
739 memReq[tid] = NULL;
740 } else if (fetchStatus[tid] == ItlbWait) {
741 DPRINTF(Fetch, "[tid:%i] Squashing outstanding ITLB miss.\n",
742 tid);
743 memReq[tid] = NULL;
744 }
745
746 // Get rid of the retrying packet if it was from this thread.
747 if (retryTid == tid) {
748 assert(cacheBlocked);
749 if (retryPkt) {
750 delete retryPkt;
751 }
752 retryPkt = NULL;
753 retryTid = InvalidThreadID;
754 }
755
756 fetchStatus[tid] = Squashing;
757
758 // Empty fetch queue
759 fetchQueue[tid].clear();
760
761 // microops are being squashed, it is not known wheather the
762 // youngest non-squashed microop was marked delayed commit
763 // or not. Setting the flag to true ensures that the
764 // interrupts are not handled when they cannot be, though
765 // some opportunities to handle interrupts may be missed.
766 delayedCommit[tid] = true;
767
768 ++fetchStats.squashCycles;
769 }
770
771 template<class Impl>
772 void
773 DefaultFetch<Impl>::squashFromDecode(const TheISA::PCState &newPC,
774 const DynInstPtr squashInst,
775 const InstSeqNum seq_num, ThreadID tid)
776 {
777 DPRINTF(Fetch, "[tid:%i] Squashing from decode.\n", tid);
778
779 doSquash(newPC, squashInst, tid);
780
781 // Tell the CPU to remove any instructions that are in flight between
782 // fetch and decode.
783 cpu->removeInstsUntil(seq_num, tid);
784 }
785
786 template<class Impl>
787 bool
788 DefaultFetch<Impl>::checkStall(ThreadID tid) const
789 {
790 bool ret_val = false;
791
792 if (stalls[tid].drain) {
793 assert(cpu->isDraining());
794 DPRINTF(Fetch,"[tid:%i] Drain stall detected.\n",tid);
795 ret_val = true;
796 }
797
798 return ret_val;
799 }
800
801 template<class Impl>
802 typename DefaultFetch<Impl>::FetchStatus
803 DefaultFetch<Impl>::updateFetchStatus()
804 {
805 //Check Running
806 list<ThreadID>::iterator threads = activeThreads->begin();
807 list<ThreadID>::iterator end = activeThreads->end();
808
809 while (threads != end) {
810 ThreadID tid = *threads++;
811
812 if (fetchStatus[tid] == Running ||
813 fetchStatus[tid] == Squashing ||
814 fetchStatus[tid] == IcacheAccessComplete) {
815
816 if (_status == Inactive) {
817 DPRINTF(Activity, "[tid:%i] Activating stage.\n",tid);
818
819 if (fetchStatus[tid] == IcacheAccessComplete) {
820 DPRINTF(Activity, "[tid:%i] Activating fetch due to cache"
821 "completion\n",tid);
822 }
823
824 cpu->activateStage(O3CPU::FetchIdx);
825 }
826
827 return Active;
828 }
829 }
830
831 // Stage is switching from active to inactive, notify CPU of it.
832 if (_status == Active) {
833 DPRINTF(Activity, "Deactivating stage.\n");
834
835 cpu->deactivateStage(O3CPU::FetchIdx);
836 }
837
838 return Inactive;
839 }
840
841 template <class Impl>
842 void
843 DefaultFetch<Impl>::squash(const TheISA::PCState &newPC,
844 const InstSeqNum seq_num, DynInstPtr squashInst,
845 ThreadID tid)
846 {
847 DPRINTF(Fetch, "[tid:%i] Squash from commit.\n", tid);
848
849 doSquash(newPC, squashInst, tid);
850
851 // Tell the CPU to remove any instructions that are not in the ROB.
852 cpu->removeInstsNotInROB(tid);
853 }
854
855 template <class Impl>
856 void
857 DefaultFetch<Impl>::tick()
858 {
859 list<ThreadID>::iterator threads = activeThreads->begin();
860 list<ThreadID>::iterator end = activeThreads->end();
861 bool status_change = false;
862
863 wroteToTimeBuffer = false;
864
865 for (ThreadID i = 0; i < numThreads; ++i) {
866 issuePipelinedIfetch[i] = false;
867 }
868
869 while (threads != end) {
870 ThreadID tid = *threads++;
871
872 // Check the signals for each thread to determine the proper status
873 // for each thread.
874 bool updated_status = checkSignalsAndUpdate(tid);
875 status_change = status_change || updated_status;
876 }
877
878 DPRINTF(Fetch, "Running stage.\n");
879
880 if (FullSystem) {
881 if (fromCommit->commitInfo[0].interruptPending) {
882 interruptPending = true;
883 }
884
885 if (fromCommit->commitInfo[0].clearInterrupt) {
886 interruptPending = false;
887 }
888 }
889
890 for (threadFetched = 0; threadFetched < numFetchingThreads;
891 threadFetched++) {
892 // Fetch each of the actively fetching threads.
893 fetch(status_change);
894 }
895
896 // Record number of instructions fetched this cycle for distribution.
897 fetchStats.nisnDist.sample(numInst);
898
899 if (status_change) {
900 // Change the fetch stage status if there was a status change.
901 _status = updateFetchStatus();
902 }
903
904 // Issue the next I-cache request if possible.
905 for (ThreadID i = 0; i < numThreads; ++i) {
906 if (issuePipelinedIfetch[i]) {
907 pipelineIcacheAccesses(i);
908 }
909 }
910
911 // Send instructions enqueued into the fetch queue to decode.
912 // Limit rate by fetchWidth. Stall if decode is stalled.
913 unsigned insts_to_decode = 0;
914 unsigned available_insts = 0;
915
916 for (auto tid : *activeThreads) {
917 if (!stalls[tid].decode) {
918 available_insts += fetchQueue[tid].size();
919 }
920 }
921
922 // Pick a random thread to start trying to grab instructions from
923 auto tid_itr = activeThreads->begin();
924 std::advance(tid_itr, random_mt.random<uint8_t>(0, activeThreads->size() - 1));
925
926 while (available_insts != 0 && insts_to_decode < decodeWidth) {
927 ThreadID tid = *tid_itr;
928 if (!stalls[tid].decode && !fetchQueue[tid].empty()) {
929 const auto& inst = fetchQueue[tid].front();
930 toDecode->insts[toDecode->size++] = inst;
931 DPRINTF(Fetch, "[tid:%i] [sn:%llu] Sending instruction to decode "
932 "from fetch queue. Fetch queue size: %i.\n",
933 tid, inst->seqNum, fetchQueue[tid].size());
934
935 wroteToTimeBuffer = true;
936 fetchQueue[tid].pop_front();
937 insts_to_decode++;
938 available_insts--;
939 }
940
941 tid_itr++;
942 // Wrap around if at end of active threads list
943 if (tid_itr == activeThreads->end())
944 tid_itr = activeThreads->begin();
945 }
946
947 // If there was activity this cycle, inform the CPU of it.
948 if (wroteToTimeBuffer) {
949 DPRINTF(Activity, "Activity this cycle.\n");
950 cpu->activityThisCycle();
951 }
952
953 // Reset the number of the instruction we've fetched.
954 numInst = 0;
955 }
956
957 template <class Impl>
958 bool
959 DefaultFetch<Impl>::checkSignalsAndUpdate(ThreadID tid)
960 {
961 // Update the per thread stall statuses.
962 if (fromDecode->decodeBlock[tid]) {
963 stalls[tid].decode = true;
964 }
965
966 if (fromDecode->decodeUnblock[tid]) {
967 assert(stalls[tid].decode);
968 assert(!fromDecode->decodeBlock[tid]);
969 stalls[tid].decode = false;
970 }
971
972 // Check squash signals from commit.
973 if (fromCommit->commitInfo[tid].squash) {
974
975 DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
976 "from commit.\n",tid);
977 // In any case, squash.
978 squash(fromCommit->commitInfo[tid].pc,
979 fromCommit->commitInfo[tid].doneSeqNum,
980 fromCommit->commitInfo[tid].squashInst, tid);
981
982 // If it was a branch mispredict on a control instruction, update the
983 // branch predictor with that instruction, otherwise just kill the
984 // invalid state we generated in after sequence number
985 if (fromCommit->commitInfo[tid].mispredictInst &&
986 fromCommit->commitInfo[tid].mispredictInst->isControl()) {
987 branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
988 fromCommit->commitInfo[tid].pc,
989 fromCommit->commitInfo[tid].branchTaken,
990 tid);
991 } else {
992 branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
993 tid);
994 }
995
996 return true;
997 } else if (fromCommit->commitInfo[tid].doneSeqNum) {
998 // Update the branch predictor if it wasn't a squashed instruction
999 // that was broadcasted.
1000 branchPred->update(fromCommit->commitInfo[tid].doneSeqNum, tid);
1001 }
1002
1003 // Check squash signals from decode.
1004 if (fromDecode->decodeInfo[tid].squash) {
1005 DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
1006 "from decode.\n",tid);
1007
1008 // Update the branch predictor.
1009 if (fromDecode->decodeInfo[tid].branchMispredict) {
1010 branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1011 fromDecode->decodeInfo[tid].nextPC,
1012 fromDecode->decodeInfo[tid].branchTaken,
1013 tid);
1014 } else {
1015 branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1016 tid);
1017 }
1018
1019 if (fetchStatus[tid] != Squashing) {
1020
1021 DPRINTF(Fetch, "Squashing from decode with PC = %s\n",
1022 fromDecode->decodeInfo[tid].nextPC);
1023 // Squash unless we're already squashing
1024 squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
1025 fromDecode->decodeInfo[tid].squashInst,
1026 fromDecode->decodeInfo[tid].doneSeqNum,
1027 tid);
1028
1029 return true;
1030 }
1031 }
1032
1033 if (checkStall(tid) &&
1034 fetchStatus[tid] != IcacheWaitResponse &&
1035 fetchStatus[tid] != IcacheWaitRetry &&
1036 fetchStatus[tid] != ItlbWait &&
1037 fetchStatus[tid] != QuiescePending) {
1038 DPRINTF(Fetch, "[tid:%i] Setting to blocked\n",tid);
1039
1040 fetchStatus[tid] = Blocked;
1041
1042 return true;
1043 }
1044
1045 if (fetchStatus[tid] == Blocked ||
1046 fetchStatus[tid] == Squashing) {
1047 // Switch status to running if fetch isn't being told to block or
1048 // squash this cycle.
1049 DPRINTF(Fetch, "[tid:%i] Done squashing, switching to running.\n",
1050 tid);
1051
1052 fetchStatus[tid] = Running;
1053
1054 return true;
1055 }
1056
1057 // If we've reached this point, we have not gotten any signals that
1058 // cause fetch to change its status. Fetch remains the same as before.
1059 return false;
1060 }
1061
1062 template<class Impl>
1063 typename Impl::DynInstPtr
1064 DefaultFetch<Impl>::buildInst(ThreadID tid, StaticInstPtr staticInst,
1065 StaticInstPtr curMacroop, TheISA::PCState thisPC,
1066 TheISA::PCState nextPC, bool trace)
1067 {
1068 // Get a sequence number.
1069 InstSeqNum seq = cpu->getAndIncrementInstSeq();
1070
1071 // Create a new DynInst from the instruction fetched.
1072 DynInstPtr instruction =
1073 new DynInst(staticInst, curMacroop, thisPC, nextPC, seq, cpu);
1074 instruction->setTid(tid);
1075
1076 instruction->setThreadState(cpu->thread[tid]);
1077
1078 DPRINTF(Fetch, "[tid:%i] Instruction PC %#x (%d) created "
1079 "[sn:%lli].\n", tid, thisPC.instAddr(),
1080 thisPC.microPC(), seq);
1081
1082 DPRINTF(Fetch, "[tid:%i] Instruction is: %s\n", tid,
1083 instruction->staticInst->
1084 disassemble(thisPC.instAddr()));
1085
1086 #if TRACING_ON
1087 if (trace) {
1088 instruction->traceData =
1089 cpu->getTracer()->getInstRecord(curTick(), cpu->tcBase(tid),
1090 instruction->staticInst, thisPC, curMacroop);
1091 }
1092 #else
1093 instruction->traceData = NULL;
1094 #endif
1095
1096 // Add instruction to the CPU's list of instructions.
1097 instruction->setInstListIt(cpu->addInst(instruction));
1098
1099 // Write the instruction to the first slot in the queue
1100 // that heads to decode.
1101 assert(numInst < fetchWidth);
1102 fetchQueue[tid].push_back(instruction);
1103 assert(fetchQueue[tid].size() <= fetchQueueSize);
1104 DPRINTF(Fetch, "[tid:%i] Fetch queue entry created (%i/%i).\n",
1105 tid, fetchQueue[tid].size(), fetchQueueSize);
1106 //toDecode->insts[toDecode->size++] = instruction;
1107
1108 // Keep track of if we can take an interrupt at this boundary
1109 delayedCommit[tid] = instruction->isDelayedCommit();
1110
1111 return instruction;
1112 }
1113
1114 template<class Impl>
1115 void
1116 DefaultFetch<Impl>::fetch(bool &status_change)
1117 {
1118 //////////////////////////////////////////
1119 // Start actual fetch
1120 //////////////////////////////////////////
1121 ThreadID tid = getFetchingThread();
1122
1123 assert(!cpu->switchedOut());
1124
1125 if (tid == InvalidThreadID) {
1126 // Breaks looping condition in tick()
1127 threadFetched = numFetchingThreads;
1128
1129 if (numThreads == 1) { // @todo Per-thread stats
1130 profileStall(0);
1131 }
1132
1133 return;
1134 }
1135
1136 DPRINTF(Fetch, "Attempting to fetch from [tid:%i]\n", tid);
1137
1138 // The current PC.
1139 TheISA::PCState thisPC = pc[tid];
1140
1141 Addr pcOffset = fetchOffset[tid];
1142 Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1143
1144 bool inRom = isRomMicroPC(thisPC.microPC());
1145
1146 // If returning from the delay of a cache miss, then update the status
1147 // to running, otherwise do the cache access. Possibly move this up
1148 // to tick() function.
1149 if (fetchStatus[tid] == IcacheAccessComplete) {
1150 DPRINTF(Fetch, "[tid:%i] Icache miss is complete.\n", tid);
1151
1152 fetchStatus[tid] = Running;
1153 status_change = true;
1154 } else if (fetchStatus[tid] == Running) {
1155 // Align the fetch PC so its at the start of a fetch buffer segment.
1156 Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1157
1158 // If buffer is no longer valid or fetchAddr has moved to point
1159 // to the next cache block, AND we have no remaining ucode
1160 // from a macro-op, then start fetch from icache.
1161 if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])
1162 && !inRom && !macroop[tid]) {
1163 DPRINTF(Fetch, "[tid:%i] Attempting to translate and read "
1164 "instruction, starting at PC %s.\n", tid, thisPC);
1165
1166 fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1167
1168 if (fetchStatus[tid] == IcacheWaitResponse)
1169 ++fetchStats.icacheStallCycles;
1170 else if (fetchStatus[tid] == ItlbWait)
1171 ++fetchStats.tlbCycles;
1172 else
1173 ++fetchStats.miscStallCycles;
1174 return;
1175 } else if ((checkInterrupt(thisPC.instAddr()) && !delayedCommit[tid])) {
1176 // Stall CPU if an interrupt is posted and we're not issuing
1177 // an delayed commit micro-op currently (delayed commit instructions
1178 // are not interruptable by interrupts, only faults)
1179 ++fetchStats.miscStallCycles;
1180 DPRINTF(Fetch, "[tid:%i] Fetch is stalled!\n", tid);
1181 return;
1182 }
1183 } else {
1184 if (fetchStatus[tid] == Idle) {
1185 ++fetchStats.idleCycles;
1186 DPRINTF(Fetch, "[tid:%i] Fetch is idle!\n", tid);
1187 }
1188
1189 // Status is Idle, so fetch should do nothing.
1190 return;
1191 }
1192
1193 ++fetchStats.cycles;
1194
1195 TheISA::PCState nextPC = thisPC;
1196
1197 StaticInstPtr staticInst = NULL;
1198 StaticInstPtr curMacroop = macroop[tid];
1199
1200 // If the read of the first instruction was successful, then grab the
1201 // instructions from the rest of the cache line and put them into the
1202 // queue heading to decode.
1203
1204 DPRINTF(Fetch, "[tid:%i] Adding instructions to queue to "
1205 "decode.\n", tid);
1206
1207 // Need to keep track of whether or not a predicted branch
1208 // ended this fetch block.
1209 bool predictedBranch = false;
1210
1211 // Need to halt fetch if quiesce instruction detected
1212 bool quiesce = false;
1213
1214 TheISA::MachInst *cacheInsts =
1215 reinterpret_cast<TheISA::MachInst *>(fetchBuffer[tid]);
1216
1217 const unsigned numInsts = fetchBufferSize / instSize;
1218 unsigned blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1219
1220 // Loop through instruction memory from the cache.
1221 // Keep issuing while fetchWidth is available and branch is not
1222 // predicted taken
1223 while (numInst < fetchWidth && fetchQueue[tid].size() < fetchQueueSize
1224 && !predictedBranch && !quiesce) {
1225 // We need to process more memory if we aren't going to get a
1226 // StaticInst from the rom, the current macroop, or what's already
1227 // in the decoder.
1228 bool needMem = !inRom && !curMacroop &&
1229 !decoder[tid]->instReady();
1230 fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1231 Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1232
1233 if (needMem) {
1234 // If buffer is no longer valid or fetchAddr has moved to point
1235 // to the next cache block then start fetch from icache.
1236 if (!fetchBufferValid[tid] ||
1237 fetchBufferBlockPC != fetchBufferPC[tid])
1238 break;
1239
1240 if (blkOffset >= numInsts) {
1241 // We need to process more memory, but we've run out of the
1242 // current block.
1243 break;
1244 }
1245
1246 decoder[tid]->moreBytes(thisPC, fetchAddr, cacheInsts[blkOffset]);
1247
1248 if (decoder[tid]->needMoreBytes()) {
1249 blkOffset++;
1250 fetchAddr += instSize;
1251 pcOffset += instSize;
1252 }
1253 }
1254
1255 // Extract as many instructions and/or microops as we can from
1256 // the memory we've processed so far.
1257 do {
1258 if (!(curMacroop || inRom)) {
1259 if (decoder[tid]->instReady()) {
1260 staticInst = decoder[tid]->decode(thisPC);
1261
1262 // Increment stat of fetched instructions.
1263 ++fetchStats.insts;
1264
1265 if (staticInst->isMacroop()) {
1266 curMacroop = staticInst;
1267 } else {
1268 pcOffset = 0;
1269 }
1270 } else {
1271 // We need more bytes for this instruction so blkOffset and
1272 // pcOffset will be updated
1273 break;
1274 }
1275 }
1276 // Whether we're moving to a new macroop because we're at the
1277 // end of the current one, or the branch predictor incorrectly
1278 // thinks we are...
1279 bool newMacro = false;
1280 if (curMacroop || inRom) {
1281 if (inRom) {
1282 staticInst = decoder[tid]->fetchRomMicroop(
1283 thisPC.microPC(), curMacroop);
1284 } else {
1285 staticInst = curMacroop->fetchMicroop(thisPC.microPC());
1286 }
1287 newMacro |= staticInst->isLastMicroop();
1288 }
1289
1290 DynInstPtr instruction =
1291 buildInst(tid, staticInst, curMacroop,
1292 thisPC, nextPC, true);
1293
1294 ppFetch->notify(instruction);
1295 numInst++;
1296
1297 #if TRACING_ON
1298 if (DTRACE(O3PipeView)) {
1299 instruction->fetchTick = curTick();
1300 }
1301 #endif
1302
1303 nextPC = thisPC;
1304
1305 // If we're branching after this instruction, quit fetching
1306 // from the same block.
1307 predictedBranch |= thisPC.branching();
1308 predictedBranch |=
1309 lookupAndUpdateNextPC(instruction, nextPC);
1310 if (predictedBranch) {
1311 DPRINTF(Fetch, "Branch detected with PC = %s\n", thisPC);
1312 }
1313
1314 newMacro |= thisPC.instAddr() != nextPC.instAddr();
1315
1316 // Move to the next instruction, unless we have a branch.
1317 thisPC = nextPC;
1318 inRom = isRomMicroPC(thisPC.microPC());
1319
1320 if (newMacro) {
1321 fetchAddr = thisPC.instAddr() & BaseCPU::PCMask;
1322 blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1323 pcOffset = 0;
1324 curMacroop = NULL;
1325 }
1326
1327 if (instruction->isQuiesce()) {
1328 DPRINTF(Fetch,
1329 "Quiesce instruction encountered, halting fetch!\n");
1330 fetchStatus[tid] = QuiescePending;
1331 status_change = true;
1332 quiesce = true;
1333 break;
1334 }
1335 } while ((curMacroop || decoder[tid]->instReady()) &&
1336 numInst < fetchWidth &&
1337 fetchQueue[tid].size() < fetchQueueSize);
1338
1339 // Re-evaluate whether the next instruction to fetch is in micro-op ROM
1340 // or not.
1341 inRom = isRomMicroPC(thisPC.microPC());
1342 }
1343
1344 if (predictedBranch) {
1345 DPRINTF(Fetch, "[tid:%i] Done fetching, predicted branch "
1346 "instruction encountered.\n", tid);
1347 } else if (numInst >= fetchWidth) {
1348 DPRINTF(Fetch, "[tid:%i] Done fetching, reached fetch bandwidth "
1349 "for this cycle.\n", tid);
1350 } else if (blkOffset >= fetchBufferSize) {
1351 DPRINTF(Fetch, "[tid:%i] Done fetching, reached the end of the"
1352 "fetch buffer.\n", tid);
1353 }
1354
1355 macroop[tid] = curMacroop;
1356 fetchOffset[tid] = pcOffset;
1357
1358 if (numInst > 0) {
1359 wroteToTimeBuffer = true;
1360 }
1361
1362 pc[tid] = thisPC;
1363
1364 // pipeline a fetch if we're crossing a fetch buffer boundary and not in
1365 // a state that would preclude fetching
1366 fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1367 Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1368 issuePipelinedIfetch[tid] = fetchBufferBlockPC != fetchBufferPC[tid] &&
1369 fetchStatus[tid] != IcacheWaitResponse &&
1370 fetchStatus[tid] != ItlbWait &&
1371 fetchStatus[tid] != IcacheWaitRetry &&
1372 fetchStatus[tid] != QuiescePending &&
1373 !curMacroop;
1374 }
1375
1376 template<class Impl>
1377 void
1378 DefaultFetch<Impl>::recvReqRetry()
1379 {
1380 if (retryPkt != NULL) {
1381 assert(cacheBlocked);
1382 assert(retryTid != InvalidThreadID);
1383 assert(fetchStatus[retryTid] == IcacheWaitRetry);
1384
1385 if (icachePort.sendTimingReq(retryPkt)) {
1386 fetchStatus[retryTid] = IcacheWaitResponse;
1387 // Notify Fetch Request probe when a retryPkt is successfully sent.
1388 // Note that notify must be called before retryPkt is set to NULL.
1389 ppFetchRequestSent->notify(retryPkt->req);
1390 retryPkt = NULL;
1391 retryTid = InvalidThreadID;
1392 cacheBlocked = false;
1393 }
1394 } else {
1395 assert(retryTid == InvalidThreadID);
1396 // Access has been squashed since it was sent out. Just clear
1397 // the cache being blocked.
1398 cacheBlocked = false;
1399 }
1400 }
1401
1402 ///////////////////////////////////////
1403 // //
1404 // SMT FETCH POLICY MAINTAINED HERE //
1405 // //
1406 ///////////////////////////////////////
1407 template<class Impl>
1408 ThreadID
1409 DefaultFetch<Impl>::getFetchingThread()
1410 {
1411 if (numThreads > 1) {
1412 switch (fetchPolicy) {
1413 case SMTFetchPolicy::RoundRobin:
1414 return roundRobin();
1415 case SMTFetchPolicy::IQCount:
1416 return iqCount();
1417 case SMTFetchPolicy::LSQCount:
1418 return lsqCount();
1419 case SMTFetchPolicy::Branch:
1420 return branchCount();
1421 default:
1422 return InvalidThreadID;
1423 }
1424 } else {
1425 list<ThreadID>::iterator thread = activeThreads->begin();
1426 if (thread == activeThreads->end()) {
1427 return InvalidThreadID;
1428 }
1429
1430 ThreadID tid = *thread;
1431
1432 if (fetchStatus[tid] == Running ||
1433 fetchStatus[tid] == IcacheAccessComplete ||
1434 fetchStatus[tid] == Idle) {
1435 return tid;
1436 } else {
1437 return InvalidThreadID;
1438 }
1439 }
1440 }
1441
1442
1443 template<class Impl>
1444 ThreadID
1445 DefaultFetch<Impl>::roundRobin()
1446 {
1447 list<ThreadID>::iterator pri_iter = priorityList.begin();
1448 list<ThreadID>::iterator end = priorityList.end();
1449
1450 ThreadID high_pri;
1451
1452 while (pri_iter != end) {
1453 high_pri = *pri_iter;
1454
1455 assert(high_pri <= numThreads);
1456
1457 if (fetchStatus[high_pri] == Running ||
1458 fetchStatus[high_pri] == IcacheAccessComplete ||
1459 fetchStatus[high_pri] == Idle) {
1460
1461 priorityList.erase(pri_iter);
1462 priorityList.push_back(high_pri);
1463
1464 return high_pri;
1465 }
1466
1467 pri_iter++;
1468 }
1469
1470 return InvalidThreadID;
1471 }
1472
1473 template<class Impl>
1474 ThreadID
1475 DefaultFetch<Impl>::iqCount()
1476 {
1477 //sorted from lowest->highest
1478 std::priority_queue<unsigned,vector<unsigned>,
1479 std::greater<unsigned> > PQ;
1480 std::map<unsigned, ThreadID> threadMap;
1481
1482 list<ThreadID>::iterator threads = activeThreads->begin();
1483 list<ThreadID>::iterator end = activeThreads->end();
1484
1485 while (threads != end) {
1486 ThreadID tid = *threads++;
1487 unsigned iqCount = fromIEW->iewInfo[tid].iqCount;
1488
1489 //we can potentially get tid collisions if two threads
1490 //have the same iqCount, but this should be rare.
1491 PQ.push(iqCount);
1492 threadMap[iqCount] = tid;
1493 }
1494
1495 while (!PQ.empty()) {
1496 ThreadID high_pri = threadMap[PQ.top()];
1497
1498 if (fetchStatus[high_pri] == Running ||
1499 fetchStatus[high_pri] == IcacheAccessComplete ||
1500 fetchStatus[high_pri] == Idle)
1501 return high_pri;
1502 else
1503 PQ.pop();
1504
1505 }
1506
1507 return InvalidThreadID;
1508 }
1509
1510 template<class Impl>
1511 ThreadID
1512 DefaultFetch<Impl>::lsqCount()
1513 {
1514 //sorted from lowest->highest
1515 std::priority_queue<unsigned,vector<unsigned>,
1516 std::greater<unsigned> > PQ;
1517 std::map<unsigned, ThreadID> threadMap;
1518
1519 list<ThreadID>::iterator threads = activeThreads->begin();
1520 list<ThreadID>::iterator end = activeThreads->end();
1521
1522 while (threads != end) {
1523 ThreadID tid = *threads++;
1524 unsigned ldstqCount = fromIEW->iewInfo[tid].ldstqCount;
1525
1526 //we can potentially get tid collisions if two threads
1527 //have the same iqCount, but this should be rare.
1528 PQ.push(ldstqCount);
1529 threadMap[ldstqCount] = tid;
1530 }
1531
1532 while (!PQ.empty()) {
1533 ThreadID high_pri = threadMap[PQ.top()];
1534
1535 if (fetchStatus[high_pri] == Running ||
1536 fetchStatus[high_pri] == IcacheAccessComplete ||
1537 fetchStatus[high_pri] == Idle)
1538 return high_pri;
1539 else
1540 PQ.pop();
1541 }
1542
1543 return InvalidThreadID;
1544 }
1545
1546 template<class Impl>
1547 ThreadID
1548 DefaultFetch<Impl>::branchCount()
1549 {
1550 panic("Branch Count Fetch policy unimplemented\n");
1551 return InvalidThreadID;
1552 }
1553
1554 template<class Impl>
1555 void
1556 DefaultFetch<Impl>::pipelineIcacheAccesses(ThreadID tid)
1557 {
1558 if (!issuePipelinedIfetch[tid]) {
1559 return;
1560 }
1561
1562 // The next PC to access.
1563 TheISA::PCState thisPC = pc[tid];
1564
1565 if (isRomMicroPC(thisPC.microPC())) {
1566 return;
1567 }
1568
1569 Addr pcOffset = fetchOffset[tid];
1570 Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1571
1572 // Align the fetch PC so its at the start of a fetch buffer segment.
1573 Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1574
1575 // Unless buffer already got the block, fetch it from icache.
1576 if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])) {
1577 DPRINTF(Fetch, "[tid:%i] Issuing a pipelined I-cache access, "
1578 "starting at PC %s.\n", tid, thisPC);
1579
1580 fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1581 }
1582 }
1583
1584 template<class Impl>
1585 void
1586 DefaultFetch<Impl>::profileStall(ThreadID tid) {
1587 DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
1588
1589 // @todo Per-thread stats
1590
1591 if (stalls[tid].drain) {
1592 ++fetchStats.pendingDrainCycles;
1593 DPRINTF(Fetch, "Fetch is waiting for a drain!\n");
1594 } else if (activeThreads->empty()) {
1595 ++fetchStats.noActiveThreadStallCycles;
1596 DPRINTF(Fetch, "Fetch has no active thread!\n");
1597 } else if (fetchStatus[tid] == Blocked) {
1598 ++fetchStats.blockedCycles;
1599 DPRINTF(Fetch, "[tid:%i] Fetch is blocked!\n", tid);
1600 } else if (fetchStatus[tid] == Squashing) {
1601 ++fetchStats.squashCycles;
1602 DPRINTF(Fetch, "[tid:%i] Fetch is squashing!\n", tid);
1603 } else if (fetchStatus[tid] == IcacheWaitResponse) {
1604 ++fetchStats.icacheStallCycles;
1605 DPRINTF(Fetch, "[tid:%i] Fetch is waiting cache response!\n",
1606 tid);
1607 } else if (fetchStatus[tid] == ItlbWait) {
1608 ++fetchStats.tlbCycles;
1609 DPRINTF(Fetch, "[tid:%i] Fetch is waiting ITLB walk to "
1610 "finish!\n", tid);
1611 } else if (fetchStatus[tid] == TrapPending) {
1612 ++fetchStats.pendingTrapStallCycles;
1613 DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending trap!\n",
1614 tid);
1615 } else if (fetchStatus[tid] == QuiescePending) {
1616 ++fetchStats.pendingQuiesceStallCycles;
1617 DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending quiesce "
1618 "instruction!\n", tid);
1619 } else if (fetchStatus[tid] == IcacheWaitRetry) {
1620 ++fetchStats.icacheWaitRetryStallCycles;
1621 DPRINTF(Fetch, "[tid:%i] Fetch is waiting for an I-cache retry!\n",
1622 tid);
1623 } else if (fetchStatus[tid] == NoGoodAddr) {
1624 DPRINTF(Fetch, "[tid:%i] Fetch predicted non-executable address\n",
1625 tid);
1626 } else {
1627 DPRINTF(Fetch, "[tid:%i] Unexpected fetch stall reason "
1628 "(Status: %i)\n",
1629 tid, fetchStatus[tid]);
1630 }
1631 }
1632
1633 template<class Impl>
1634 bool
1635 DefaultFetch<Impl>::IcachePort::recvTimingResp(PacketPtr pkt)
1636 {
1637 DPRINTF(O3CPU, "Fetch unit received timing\n");
1638 // We shouldn't ever get a cacheable block in Modified state
1639 assert(pkt->req->isUncacheable() ||
1640 !(pkt->cacheResponding() && !pkt->hasSharers()));
1641 fetch->processCacheCompletion(pkt);
1642
1643 return true;
1644 }
1645
1646 template<class Impl>
1647 void
1648 DefaultFetch<Impl>::IcachePort::recvReqRetry()
1649 {
1650 fetch->recvReqRetry();
1651 }
1652
1653 #endif//__CPU_O3_FETCH_IMPL_HH__