cpu: HTM Implementation for O3CPU
[gem5.git] / src / cpu / minor / fetch1.cc
1 /*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "cpu/minor/fetch1.hh"
39
40 #include <cstring>
41 #include <iomanip>
42 #include <sstream>
43
44 #include "base/cast.hh"
45 #include "cpu/minor/pipeline.hh"
46 #include "debug/Drain.hh"
47 #include "debug/Fetch.hh"
48 #include "debug/MinorTrace.hh"
49
50 namespace Minor
51 {
52
53 Fetch1::Fetch1(const std::string &name_,
54 MinorCPU &cpu_,
55 MinorCPUParams &params,
56 Latch<BranchData>::Output inp_,
57 Latch<ForwardLineData>::Input out_,
58 Latch<BranchData>::Output prediction_,
59 std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) :
60 Named(name_),
61 cpu(cpu_),
62 inp(inp_),
63 out(out_),
64 prediction(prediction_),
65 nextStageReserve(next_stage_input_buffer),
66 icachePort(name_ + ".icache_port", *this, cpu_),
67 lineSnap(params.fetch1LineSnapWidth),
68 maxLineWidth(params.fetch1LineWidth),
69 fetchLimit(params.fetch1FetchLimit),
70 fetchInfo(params.numThreads),
71 threadPriority(0),
72 requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
73 transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
74 icacheState(IcacheRunning),
75 lineSeqNum(InstId::firstLineSeqNum),
76 numFetchesInMemorySystem(0),
77 numFetchesInITLB(0)
78 {
79 if (lineSnap == 0) {
80 lineSnap = cpu.cacheLineSize();
81 DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
82 lineSnap);
83 }
84
85 if (maxLineWidth == 0) {
86 maxLineWidth = cpu.cacheLineSize();
87 DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
88 maxLineWidth);
89 }
90
91 /* These assertions should be copied to the Python config. as well */
92 if ((lineSnap % sizeof(TheISA::MachInst)) != 0) {
93 fatal("%s: fetch1LineSnapWidth must be a multiple "
94 "of sizeof(TheISA::MachInst) (%d)\n", name_,
95 sizeof(TheISA::MachInst));
96 }
97
98 if (!(maxLineWidth >= lineSnap &&
99 (maxLineWidth % sizeof(TheISA::MachInst)) == 0))
100 {
101 fatal("%s: fetch1LineWidth must be a multiple of"
102 " sizeof(TheISA::MachInst)"
103 " (%d), and >= fetch1LineSnapWidth (%d)\n",
104 name_, sizeof(TheISA::MachInst), lineSnap);
105 }
106
107 if (fetchLimit < 1) {
108 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
109 fetchLimit);
110 }
111 }
112
113 inline ThreadID
114 Fetch1::getScheduledThread()
115 {
116 /* Select thread via policy. */
117 std::vector<ThreadID> priority_list;
118
119 switch (cpu.threadPolicy) {
120 case Enums::SingleThreaded:
121 priority_list.push_back(0);
122 break;
123 case Enums::RoundRobin:
124 priority_list = cpu.roundRobinPriority(threadPriority);
125 break;
126 case Enums::Random:
127 priority_list = cpu.randomPriority();
128 break;
129 default:
130 panic("Unknown fetch policy");
131 }
132
133 for (auto tid : priority_list) {
134 if (cpu.getContext(tid)->status() == ThreadContext::Active &&
135 !fetchInfo[tid].blocked &&
136 fetchInfo[tid].state == FetchRunning) {
137 threadPriority = tid;
138 return tid;
139 }
140 }
141
142 return InvalidThreadID;
143 }
144
145 void
146 Fetch1::fetchLine(ThreadID tid)
147 {
148 /* Reference the currently used thread state. */
149 Fetch1ThreadInfo &thread = fetchInfo[tid];
150
151 /* If line_offset != 0, a request is pushed for the remainder of the
152 * line. */
153 /* Use a lower, sizeof(MachInst) aligned address for the fetch */
154 Addr aligned_pc = thread.pc.instAddr() & ~((Addr) lineSnap - 1);
155 unsigned int line_offset = aligned_pc % lineSnap;
156 unsigned int request_size = maxLineWidth - line_offset;
157
158 /* Fill in the line's id */
159 InstId request_id(tid,
160 thread.streamSeqNum, thread.predictionSeqNum,
161 lineSeqNum);
162
163 FetchRequestPtr request = new FetchRequest(*this, request_id, thread.pc);
164
165 DPRINTF(Fetch, "Inserting fetch into the fetch queue "
166 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
167 request_id, aligned_pc, thread.pc, line_offset, request_size);
168
169 request->request->setContext(cpu.threads[tid]->getTC()->contextId());
170 request->request->setVirt(
171 aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
172 /* I've no idea why we need the PC, but give it */
173 thread.pc.instAddr());
174
175 DPRINTF(Fetch, "Submitting ITLB request\n");
176 numFetchesInITLB++;
177
178 request->state = FetchRequest::InTranslation;
179
180 /* Reserve space in the queues upstream of requests for results */
181 transfers.reserve();
182 requests.push(request);
183
184 /* Submit the translation request. The response will come
185 * through finish/markDelayed on this request as it bears
186 * the Translation interface */
187 cpu.threads[request->id.threadId]->itb->translateTiming(
188 request->request,
189 cpu.getContext(request->id.threadId),
190 request, BaseTLB::Execute);
191
192 lineSeqNum++;
193
194 /* Step the PC for the next line onto the line aligned next address.
195 * Note that as instructions can span lines, this PC is only a
196 * reliable 'new' PC if the next line has a new stream sequence number. */
197 thread.pc.set(aligned_pc + request_size);
198 }
199
200 std::ostream &
201 operator <<(std::ostream &os, Fetch1::IcacheState state)
202 {
203 switch (state) {
204 case Fetch1::IcacheRunning:
205 os << "IcacheRunning";
206 break;
207 case Fetch1::IcacheNeedsRetry:
208 os << "IcacheNeedsRetry";
209 break;
210 default:
211 os << "IcacheState-" << static_cast<int>(state);
212 break;
213 }
214 return os;
215 }
216
217 void
218 Fetch1::FetchRequest::makePacket()
219 {
220 /* Make the necessary packet for a memory transaction */
221 packet = new Packet(request, MemCmd::ReadReq);
222 packet->allocate();
223
224 /* This FetchRequest becomes SenderState to allow the response to be
225 * identified */
226 packet->pushSenderState(this);
227 }
228
229 void
230 Fetch1::FetchRequest::finish(const Fault &fault_, const RequestPtr &request_,
231 ThreadContext *tc, BaseTLB::Mode mode)
232 {
233 fault = fault_;
234
235 state = Translated;
236 fetch.handleTLBResponse(this);
237
238 /* Let's try and wake up the processor for the next cycle */
239 fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
240 }
241
242 void
243 Fetch1::handleTLBResponse(FetchRequestPtr response)
244 {
245 numFetchesInITLB--;
246
247 if (response->fault != NoFault) {
248 DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
249 "paddr: 0x%x, vaddr: 0x%x\n",
250 response->fault->name(),
251 (response->request->hasPaddr() ?
252 response->request->getPaddr() : 0),
253 response->request->getVaddr());
254
255 if (DTRACE(MinorTrace))
256 minorTraceResponseLine(name(), response);
257 } else {
258 DPRINTF(Fetch, "Got ITLB response\n");
259 }
260
261 response->state = FetchRequest::Translated;
262
263 tryToSendToTransfers(response);
264 }
265
266 Fetch1::FetchRequest::~FetchRequest()
267 {
268 if (packet)
269 delete packet;
270 }
271
272 void
273 Fetch1::tryToSendToTransfers(FetchRequestPtr request)
274 {
275 if (!requests.empty() && requests.front() != request) {
276 DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
277 " issue to memory\n");
278 return;
279 }
280
281 if (request->state == FetchRequest::InTranslation) {
282 DPRINTF(Fetch, "Fetch still in translation, not issuing to"
283 " memory\n");
284 return;
285 }
286
287 if (request->isDiscardable() || request->fault != NoFault) {
288 /* Discarded and faulting requests carry on through transfers
289 * as Complete/packet == NULL */
290
291 request->state = FetchRequest::Complete;
292 moveFromRequestsToTransfers(request);
293
294 /* Wake up the pipeline next cycle as there will be no event
295 * for this queue->queue transfer */
296 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
297 } else if (request->state == FetchRequest::Translated) {
298 if (!request->packet)
299 request->makePacket();
300
301 /* Ensure that the packet won't delete the request */
302 assert(request->packet->needsResponse());
303
304 if (tryToSend(request))
305 moveFromRequestsToTransfers(request);
306 } else {
307 DPRINTF(Fetch, "Not advancing line fetch\n");
308 }
309 }
310
311 void
312 Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request)
313 {
314 assert(!requests.empty() && requests.front() == request);
315
316 requests.pop();
317 transfers.push(request);
318 }
319
320 bool
321 Fetch1::tryToSend(FetchRequestPtr request)
322 {
323 bool ret = false;
324
325 if (icachePort.sendTimingReq(request->packet)) {
326 /* Invalidate the fetch_requests packet so we don't
327 * accidentally fail to deallocate it (or use it!)
328 * later by overwriting it */
329 request->packet = NULL;
330 request->state = FetchRequest::RequestIssuing;
331 numFetchesInMemorySystem++;
332
333 ret = true;
334
335 DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
336 request->id);
337 } else {
338 /* Needs to be resent, wait for that */
339 icacheState = IcacheNeedsRetry;
340
341 DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
342 request->id);
343 }
344
345 return ret;
346 }
347
348 void
349 Fetch1::stepQueues()
350 {
351 IcacheState old_icache_state = icacheState;
352
353 switch (icacheState) {
354 case IcacheRunning:
355 /* Move ITLB results on to the memory system */
356 if (!requests.empty()) {
357 tryToSendToTransfers(requests.front());
358 }
359 break;
360 case IcacheNeedsRetry:
361 break;
362 }
363
364 if (icacheState != old_icache_state) {
365 DPRINTF(Fetch, "Step in state %s moving to state %s\n",
366 old_icache_state, icacheState);
367 }
368 }
369
370 void
371 Fetch1::popAndDiscard(FetchQueue &queue)
372 {
373 if (!queue.empty()) {
374 delete queue.front();
375 queue.pop();
376 }
377 }
378
379 unsigned int
380 Fetch1::numInFlightFetches()
381 {
382 return requests.occupiedSpace() +
383 transfers.occupiedSpace();
384 }
385
386 /** Print the appropriate MinorLine line for a fetch response */
387 void
388 Fetch1::minorTraceResponseLine(const std::string &name,
389 Fetch1::FetchRequestPtr response) const
390 {
391 const RequestPtr &request M5_VAR_USED = response->request;
392
393 if (response->packet && response->packet->isError()) {
394 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
395 response->id, request->getVaddr());
396 } else if (response->fault != NoFault) {
397 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
398 response->id, request->getVaddr(), response->fault->name());
399 } else {
400 MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
401 response->id, request->getSize(),
402 request->getVaddr(), request->getPaddr());
403 }
404 }
405
406 bool
407 Fetch1::recvTimingResp(PacketPtr response)
408 {
409 DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
410
411 /* Only push the response if we didn't change stream? No, all responses
412 * should hit the responses queue. It's the job of 'step' to throw them
413 * away. */
414 FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr>
415 (response->popSenderState());
416
417 /* Fixup packet in fetch_request as this may have changed */
418 assert(!fetch_request->packet);
419 fetch_request->packet = response;
420
421 numFetchesInMemorySystem--;
422 fetch_request->state = FetchRequest::Complete;
423
424 if (DTRACE(MinorTrace))
425 minorTraceResponseLine(name(), fetch_request);
426
427 if (response->isError()) {
428 DPRINTF(Fetch, "Received error response packet: %s\n",
429 fetch_request->id);
430 }
431
432 /* We go to idle even if there are more things to do on the queues as
433 * it's the job of step to actually step us on to the next transaction */
434
435 /* Let's try and wake up the processor for the next cycle to move on
436 * queues */
437 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
438
439 /* Never busy */
440 return true;
441 }
442
443 void
444 Fetch1::recvReqRetry()
445 {
446 DPRINTF(Fetch, "recvRetry\n");
447 assert(icacheState == IcacheNeedsRetry);
448 assert(!requests.empty());
449
450 FetchRequestPtr retryRequest = requests.front();
451
452 icacheState = IcacheRunning;
453
454 if (tryToSend(retryRequest))
455 moveFromRequestsToTransfers(retryRequest);
456 }
457
458 std::ostream &
459 operator <<(std::ostream &os, Fetch1::FetchState state)
460 {
461 switch (state) {
462 case Fetch1::FetchHalted:
463 os << "FetchHalted";
464 break;
465 case Fetch1::FetchWaitingForPC:
466 os << "FetchWaitingForPC";
467 break;
468 case Fetch1::FetchRunning:
469 os << "FetchRunning";
470 break;
471 default:
472 os << "FetchState-" << static_cast<int>(state);
473 break;
474 }
475 return os;
476 }
477
478 void
479 Fetch1::changeStream(const BranchData &branch)
480 {
481 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
482
483 updateExpectedSeqNums(branch);
484
485 /* Start fetching again if we were stopped */
486 switch (branch.reason) {
487 case BranchData::SuspendThread:
488 {
489 if (thread.wakeupGuard) {
490 DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n",
491 branch);
492 } else {
493 DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
494 thread.state = FetchWaitingForPC;
495 }
496 }
497 break;
498 case BranchData::HaltFetch:
499 DPRINTF(Fetch, "Halting fetch\n");
500 thread.state = FetchHalted;
501 break;
502 default:
503 DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
504 thread.state = FetchRunning;
505 break;
506 }
507 thread.pc = branch.target;
508 }
509
510 void
511 Fetch1::updateExpectedSeqNums(const BranchData &branch)
512 {
513 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
514
515 DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
516 " predictionSeqNum from: %d to %d\n",
517 thread.streamSeqNum, branch.newStreamSeqNum,
518 thread.predictionSeqNum, branch.newPredictionSeqNum);
519
520 /* Change the stream */
521 thread.streamSeqNum = branch.newStreamSeqNum;
522 /* Update the prediction. Note that it's possible for this to
523 * actually set the prediction to an *older* value if new
524 * predictions have been discarded by execute */
525 thread.predictionSeqNum = branch.newPredictionSeqNum;
526 }
527
528 void
529 Fetch1::processResponse(Fetch1::FetchRequestPtr response,
530 ForwardLineData &line)
531 {
532 Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId];
533 PacketPtr packet = response->packet;
534
535 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
536 * structure */
537 line.setFault(response->fault);
538 /* Make sequence numbers valid in return */
539 line.id = response->id;
540 /* Set PC to virtual address */
541 line.pc = response->pc;
542 /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
543 * pc.instAddr() */
544 line.lineBaseAddr = response->request->getVaddr();
545
546 if (response->fault != NoFault) {
547 /* Stop fetching if there was a fault */
548 /* Should probably try to flush the queues as well, but we
549 * can't be sure that this fault will actually reach Execute, and we
550 * can't (currently) selectively remove this stream from the queues */
551 DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
552 response->fault->name());
553 thread.state = Fetch1::FetchWaitingForPC;
554 } else {
555 line.adoptPacketData(packet);
556 /* Null the response's packet to prevent the response from trying to
557 * deallocate the packet */
558 response->packet = NULL;
559 }
560 }
561
562 void
563 Fetch1::evaluate()
564 {
565 const BranchData &execute_branch = *inp.outputWire;
566 const BranchData &fetch2_branch = *prediction.outputWire;
567 ForwardLineData &line_out = *out.inputWire;
568
569 assert(line_out.isBubble());
570
571 for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
572 fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve();
573
574 /** Are both branches from later stages valid and for the same thread? */
575 if (execute_branch.threadId != InvalidThreadID &&
576 execute_branch.threadId == fetch2_branch.threadId) {
577
578 Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId];
579
580 /* Are we changing stream? Look to the Execute branches first, then
581 * to predicted changes of stream from Fetch2 */
582 if (execute_branch.isStreamChange()) {
583 if (thread.state == FetchHalted) {
584 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
585 } else {
586 changeStream(execute_branch);
587 }
588
589 if (!fetch2_branch.isBubble()) {
590 DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
591 fetch2_branch);
592 }
593
594 /* The streamSeqNum tagging in request/response ->req should handle
595 * discarding those requests when we get to them. */
596 } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) {
597 /* Handle branch predictions by changing the instruction source
598 * if we're still processing the same stream (as set by streamSeqNum)
599 * as the one of the prediction.
600 */
601 if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) {
602 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
603 " streamSeqNum mismatch\n",
604 fetch2_branch);
605 } else {
606 changeStream(fetch2_branch);
607 }
608 }
609 } else {
610 /* Fetch2 and Execute branches are for different threads */
611 if (execute_branch.threadId != InvalidThreadID &&
612 execute_branch.isStreamChange()) {
613
614 if (fetchInfo[execute_branch.threadId].state == FetchHalted) {
615 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
616 } else {
617 changeStream(execute_branch);
618 }
619 }
620
621 if (fetch2_branch.threadId != InvalidThreadID &&
622 fetch2_branch.isStreamChange()) {
623
624 if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) {
625 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch);
626 } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) {
627 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
628 " streamSeqNum mismatch\n", fetch2_branch);
629 } else {
630 changeStream(fetch2_branch);
631 }
632 }
633 }
634
635 if (numInFlightFetches() < fetchLimit) {
636 ThreadID fetch_tid = getScheduledThread();
637
638 if (fetch_tid != InvalidThreadID) {
639 DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid);
640
641 /* Generate fetch to selected thread */
642 fetchLine(fetch_tid);
643 /* Take up a slot in the fetch queue */
644 nextStageReserve[fetch_tid].reserve();
645 } else {
646 DPRINTF(Fetch, "No active threads available to fetch from\n");
647 }
648 }
649
650
651 /* Halting shouldn't prevent fetches in flight from being processed */
652 /* Step fetches through the icachePort queues and memory system */
653 stepQueues();
654
655 /* As we've thrown away early lines, if there is a line, it must
656 * be from the right stream */
657 if (!transfers.empty() &&
658 transfers.front()->isComplete())
659 {
660 Fetch1::FetchRequestPtr response = transfers.front();
661
662 if (response->isDiscardable()) {
663 nextStageReserve[response->id.threadId].freeReservation();
664
665 DPRINTF(Fetch, "Discarding translated fetch as it's for"
666 " an old stream\n");
667
668 /* Wake up next cycle just in case there was some other
669 * action to do */
670 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
671 } else {
672 DPRINTF(Fetch, "Processing fetched line: %s\n",
673 response->id);
674
675 processResponse(response, line_out);
676 }
677
678 popAndDiscard(transfers);
679 }
680
681 /* If we generated output, and mark the stage as being active
682 * to encourage that output on to the next stage */
683 if (!line_out.isBubble())
684 cpu.activityRecorder->activity();
685
686 /* Fetch1 has no inputBuffer so the only activity we can have is to
687 * generate a line output (tested just above) or to initiate a memory
688 * fetch which will signal activity when it returns/needs stepping
689 * between queues */
690
691
692 /* This looks hackish. And it is, but there doesn't seem to be a better
693 * way to do this. The signal from commit to suspend fetch takes 1
694 * clock cycle to propagate to fetch. However, a legitimate wakeup
695 * may occur between cycles from the memory system. Thus wakeup guard
696 * prevents us from suspending in that case. */
697
698 for (auto& thread : fetchInfo) {
699 thread.wakeupGuard = false;
700 }
701 }
702
703 void
704 Fetch1::wakeupFetch(ThreadID tid)
705 {
706 ThreadContext *thread_ctx = cpu.getContext(tid);
707 Fetch1ThreadInfo &thread = fetchInfo[tid];
708 thread.pc = thread_ctx->pcState();
709 thread.state = FetchRunning;
710 thread.wakeupGuard = true;
711 DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n",
712 tid, thread_ctx->pcState());
713
714 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
715 }
716
717 bool
718 Fetch1::isDrained()
719 {
720 bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble();
721 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
722 Fetch1ThreadInfo &thread = fetchInfo[tid];
723 DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n",
724 tid,
725 thread.state == FetchHalted,
726 (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
727 ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
728
729 drained = drained && (thread.state != FetchRunning);
730 }
731
732 return drained;
733 }
734
735 void
736 Fetch1::FetchRequest::reportData(std::ostream &os) const
737 {
738 os << id;
739 }
740
741 bool Fetch1::FetchRequest::isDiscardable() const
742 {
743 Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId];
744
745 /* Can't discard lines in TLB/memory */
746 return state != InTranslation && state != RequestIssuing &&
747 (id.streamSeqNum != thread.streamSeqNum ||
748 id.predictionSeqNum != thread.predictionSeqNum);
749 }
750
751 void
752 Fetch1::minorTrace() const
753 {
754 // TODO: Un-bork minorTrace for THREADS
755 // bork bork bork
756 const Fetch1ThreadInfo &thread = fetchInfo[0];
757
758 std::ostringstream data;
759
760 if (thread.blocked)
761 data << 'B';
762 else
763 (*out.inputWire).reportData(data);
764
765 MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s"
766 " streamSeqNum=%d lines=%s\n", thread.state, icacheState,
767 numFetchesInITLB, numFetchesInMemorySystem,
768 thread.streamSeqNum, data.str());
769 requests.minorTrace();
770 transfers.minorTrace();
771 }
772
773 }