cpu,mem: Add support for partial loads/stores and wide mem. accesses
[gem5.git] / src / cpu / minor / fetch1.cc
1 /*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andrew Bardsley
38 */
39
40 #include "cpu/minor/fetch1.hh"
41
42 #include <cstring>
43 #include <iomanip>
44 #include <sstream>
45
46 #include "base/cast.hh"
47 #include "cpu/minor/pipeline.hh"
48 #include "debug/Drain.hh"
49 #include "debug/Fetch.hh"
50 #include "debug/MinorTrace.hh"
51
52 namespace Minor
53 {
54
55 Fetch1::Fetch1(const std::string &name_,
56 MinorCPU &cpu_,
57 MinorCPUParams &params,
58 Latch<BranchData>::Output inp_,
59 Latch<ForwardLineData>::Input out_,
60 Latch<BranchData>::Output prediction_,
61 std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) :
62 Named(name_),
63 cpu(cpu_),
64 inp(inp_),
65 out(out_),
66 prediction(prediction_),
67 nextStageReserve(next_stage_input_buffer),
68 icachePort(name_ + ".icache_port", *this, cpu_),
69 lineSnap(params.fetch1LineSnapWidth),
70 maxLineWidth(params.fetch1LineWidth),
71 fetchLimit(params.fetch1FetchLimit),
72 fetchInfo(params.numThreads),
73 threadPriority(0),
74 requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
75 transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
76 icacheState(IcacheRunning),
77 lineSeqNum(InstId::firstLineSeqNum),
78 numFetchesInMemorySystem(0),
79 numFetchesInITLB(0)
80 {
81 if (lineSnap == 0) {
82 lineSnap = cpu.cacheLineSize();
83 DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
84 lineSnap);
85 }
86
87 if (maxLineWidth == 0) {
88 maxLineWidth = cpu.cacheLineSize();
89 DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
90 maxLineWidth);
91 }
92
93 /* These assertions should be copied to the Python config. as well */
94 if ((lineSnap % sizeof(TheISA::MachInst)) != 0) {
95 fatal("%s: fetch1LineSnapWidth must be a multiple "
96 "of sizeof(TheISA::MachInst) (%d)\n", name_,
97 sizeof(TheISA::MachInst));
98 }
99
100 if (!(maxLineWidth >= lineSnap &&
101 (maxLineWidth % sizeof(TheISA::MachInst)) == 0))
102 {
103 fatal("%s: fetch1LineWidth must be a multiple of"
104 " sizeof(TheISA::MachInst)"
105 " (%d), and >= fetch1LineSnapWidth (%d)\n",
106 name_, sizeof(TheISA::MachInst), lineSnap);
107 }
108
109 if (fetchLimit < 1) {
110 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
111 fetchLimit);
112 }
113 }
114
115 inline ThreadID
116 Fetch1::getScheduledThread()
117 {
118 /* Select thread via policy. */
119 std::vector<ThreadID> priority_list;
120
121 switch (cpu.threadPolicy) {
122 case Enums::SingleThreaded:
123 priority_list.push_back(0);
124 break;
125 case Enums::RoundRobin:
126 priority_list = cpu.roundRobinPriority(threadPriority);
127 break;
128 case Enums::Random:
129 priority_list = cpu.randomPriority();
130 break;
131 default:
132 panic("Unknown fetch policy");
133 }
134
135 for (auto tid : priority_list) {
136 if (cpu.getContext(tid)->status() == ThreadContext::Active &&
137 !fetchInfo[tid].blocked &&
138 fetchInfo[tid].state == FetchRunning) {
139 threadPriority = tid;
140 return tid;
141 }
142 }
143
144 return InvalidThreadID;
145 }
146
147 void
148 Fetch1::fetchLine(ThreadID tid)
149 {
150 /* Reference the currently used thread state. */
151 Fetch1ThreadInfo &thread = fetchInfo[tid];
152
153 /* If line_offset != 0, a request is pushed for the remainder of the
154 * line. */
155 /* Use a lower, sizeof(MachInst) aligned address for the fetch */
156 Addr aligned_pc = thread.pc.instAddr() & ~((Addr) lineSnap - 1);
157 unsigned int line_offset = aligned_pc % lineSnap;
158 unsigned int request_size = maxLineWidth - line_offset;
159
160 /* Fill in the line's id */
161 InstId request_id(tid,
162 thread.streamSeqNum, thread.predictionSeqNum,
163 lineSeqNum);
164
165 FetchRequestPtr request = new FetchRequest(*this, request_id, thread.pc);
166
167 DPRINTF(Fetch, "Inserting fetch into the fetch queue "
168 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
169 request_id, aligned_pc, thread.pc, line_offset, request_size);
170
171 request->request->setContext(cpu.threads[tid]->getTC()->contextId());
172 request->request->setVirt(0 /* asid */,
173 aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
174 /* I've no idea why we need the PC, but give it */
175 thread.pc.instAddr());
176
177 DPRINTF(Fetch, "Submitting ITLB request\n");
178 numFetchesInITLB++;
179
180 request->state = FetchRequest::InTranslation;
181
182 /* Reserve space in the queues upstream of requests for results */
183 transfers.reserve();
184 requests.push(request);
185
186 /* Submit the translation request. The response will come
187 * through finish/markDelayed on this request as it bears
188 * the Translation interface */
189 cpu.threads[request->id.threadId]->itb->translateTiming(
190 request->request,
191 cpu.getContext(request->id.threadId),
192 request, BaseTLB::Execute);
193
194 lineSeqNum++;
195
196 /* Step the PC for the next line onto the line aligned next address.
197 * Note that as instructions can span lines, this PC is only a
198 * reliable 'new' PC if the next line has a new stream sequence number. */
199 #if THE_ISA == ALPHA_ISA
200 /* Restore the low bits of the PC used as address space flags */
201 Addr pc_low_bits = thread.pc.instAddr() &
202 ((Addr) (1 << sizeof(TheISA::MachInst)) - 1);
203
204 thread.pc.set(aligned_pc + request_size + pc_low_bits);
205 #else
206 thread.pc.set(aligned_pc + request_size);
207 #endif
208 }
209
210 std::ostream &
211 operator <<(std::ostream &os, Fetch1::IcacheState state)
212 {
213 switch (state) {
214 case Fetch1::IcacheRunning:
215 os << "IcacheRunning";
216 break;
217 case Fetch1::IcacheNeedsRetry:
218 os << "IcacheNeedsRetry";
219 break;
220 default:
221 os << "IcacheState-" << static_cast<int>(state);
222 break;
223 }
224 return os;
225 }
226
227 void
228 Fetch1::FetchRequest::makePacket()
229 {
230 /* Make the necessary packet for a memory transaction */
231 packet = new Packet(request, MemCmd::ReadReq);
232 packet->allocate();
233
234 /* This FetchRequest becomes SenderState to allow the response to be
235 * identified */
236 packet->pushSenderState(this);
237 }
238
239 void
240 Fetch1::FetchRequest::finish(const Fault &fault_, const RequestPtr &request_,
241 ThreadContext *tc, BaseTLB::Mode mode)
242 {
243 fault = fault_;
244
245 state = Translated;
246 fetch.handleTLBResponse(this);
247
248 /* Let's try and wake up the processor for the next cycle */
249 fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
250 }
251
252 void
253 Fetch1::handleTLBResponse(FetchRequestPtr response)
254 {
255 numFetchesInITLB--;
256
257 if (response->fault != NoFault) {
258 DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
259 "paddr: 0x%x, vaddr: 0x%x\n",
260 response->fault->name(),
261 (response->request->hasPaddr() ?
262 response->request->getPaddr() : 0),
263 response->request->getVaddr());
264
265 if (DTRACE(MinorTrace))
266 minorTraceResponseLine(name(), response);
267 } else {
268 DPRINTF(Fetch, "Got ITLB response\n");
269 }
270
271 response->state = FetchRequest::Translated;
272
273 tryToSendToTransfers(response);
274 }
275
276 Fetch1::FetchRequest::~FetchRequest()
277 {
278 if (packet)
279 delete packet;
280 }
281
282 void
283 Fetch1::tryToSendToTransfers(FetchRequestPtr request)
284 {
285 if (!requests.empty() && requests.front() != request) {
286 DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
287 " issue to memory\n");
288 return;
289 }
290
291 if (request->state == FetchRequest::InTranslation) {
292 DPRINTF(Fetch, "Fetch still in translation, not issuing to"
293 " memory\n");
294 return;
295 }
296
297 if (request->isDiscardable() || request->fault != NoFault) {
298 /* Discarded and faulting requests carry on through transfers
299 * as Complete/packet == NULL */
300
301 request->state = FetchRequest::Complete;
302 moveFromRequestsToTransfers(request);
303
304 /* Wake up the pipeline next cycle as there will be no event
305 * for this queue->queue transfer */
306 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
307 } else if (request->state == FetchRequest::Translated) {
308 if (!request->packet)
309 request->makePacket();
310
311 /* Ensure that the packet won't delete the request */
312 assert(request->packet->needsResponse());
313
314 if (tryToSend(request))
315 moveFromRequestsToTransfers(request);
316 } else {
317 DPRINTF(Fetch, "Not advancing line fetch\n");
318 }
319 }
320
321 void
322 Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request)
323 {
324 assert(!requests.empty() && requests.front() == request);
325
326 requests.pop();
327 transfers.push(request);
328 }
329
330 bool
331 Fetch1::tryToSend(FetchRequestPtr request)
332 {
333 bool ret = false;
334
335 if (icachePort.sendTimingReq(request->packet)) {
336 /* Invalidate the fetch_requests packet so we don't
337 * accidentally fail to deallocate it (or use it!)
338 * later by overwriting it */
339 request->packet = NULL;
340 request->state = FetchRequest::RequestIssuing;
341 numFetchesInMemorySystem++;
342
343 ret = true;
344
345 DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
346 request->id);
347 } else {
348 /* Needs to be resent, wait for that */
349 icacheState = IcacheNeedsRetry;
350
351 DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
352 request->id);
353 }
354
355 return ret;
356 }
357
358 void
359 Fetch1::stepQueues()
360 {
361 IcacheState old_icache_state = icacheState;
362
363 switch (icacheState) {
364 case IcacheRunning:
365 /* Move ITLB results on to the memory system */
366 if (!requests.empty()) {
367 tryToSendToTransfers(requests.front());
368 }
369 break;
370 case IcacheNeedsRetry:
371 break;
372 }
373
374 if (icacheState != old_icache_state) {
375 DPRINTF(Fetch, "Step in state %s moving to state %s\n",
376 old_icache_state, icacheState);
377 }
378 }
379
380 void
381 Fetch1::popAndDiscard(FetchQueue &queue)
382 {
383 if (!queue.empty()) {
384 delete queue.front();
385 queue.pop();
386 }
387 }
388
389 unsigned int
390 Fetch1::numInFlightFetches()
391 {
392 return requests.occupiedSpace() +
393 transfers.occupiedSpace();
394 }
395
396 /** Print the appropriate MinorLine line for a fetch response */
397 void
398 Fetch1::minorTraceResponseLine(const std::string &name,
399 Fetch1::FetchRequestPtr response) const
400 {
401 const RequestPtr &request M5_VAR_USED = response->request;
402
403 if (response->packet && response->packet->isError()) {
404 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
405 response->id, request->getVaddr());
406 } else if (response->fault != NoFault) {
407 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
408 response->id, request->getVaddr(), response->fault->name());
409 } else {
410 MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
411 response->id, request->getSize(),
412 request->getVaddr(), request->getPaddr());
413 }
414 }
415
416 bool
417 Fetch1::recvTimingResp(PacketPtr response)
418 {
419 DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
420
421 /* Only push the response if we didn't change stream? No, all responses
422 * should hit the responses queue. It's the job of 'step' to throw them
423 * away. */
424 FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr>
425 (response->popSenderState());
426
427 /* Fixup packet in fetch_request as this may have changed */
428 assert(!fetch_request->packet);
429 fetch_request->packet = response;
430
431 numFetchesInMemorySystem--;
432 fetch_request->state = FetchRequest::Complete;
433
434 if (DTRACE(MinorTrace))
435 minorTraceResponseLine(name(), fetch_request);
436
437 if (response->isError()) {
438 DPRINTF(Fetch, "Received error response packet: %s\n",
439 fetch_request->id);
440 }
441
442 /* We go to idle even if there are more things to do on the queues as
443 * it's the job of step to actually step us on to the next transaction */
444
445 /* Let's try and wake up the processor for the next cycle to move on
446 * queues */
447 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
448
449 /* Never busy */
450 return true;
451 }
452
453 void
454 Fetch1::recvReqRetry()
455 {
456 DPRINTF(Fetch, "recvRetry\n");
457 assert(icacheState == IcacheNeedsRetry);
458 assert(!requests.empty());
459
460 FetchRequestPtr retryRequest = requests.front();
461
462 icacheState = IcacheRunning;
463
464 if (tryToSend(retryRequest))
465 moveFromRequestsToTransfers(retryRequest);
466 }
467
468 std::ostream &
469 operator <<(std::ostream &os, Fetch1::FetchState state)
470 {
471 switch (state) {
472 case Fetch1::FetchHalted:
473 os << "FetchHalted";
474 break;
475 case Fetch1::FetchWaitingForPC:
476 os << "FetchWaitingForPC";
477 break;
478 case Fetch1::FetchRunning:
479 os << "FetchRunning";
480 break;
481 default:
482 os << "FetchState-" << static_cast<int>(state);
483 break;
484 }
485 return os;
486 }
487
488 void
489 Fetch1::changeStream(const BranchData &branch)
490 {
491 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
492
493 updateExpectedSeqNums(branch);
494
495 /* Start fetching again if we were stopped */
496 switch (branch.reason) {
497 case BranchData::SuspendThread:
498 {
499 if (thread.wakeupGuard) {
500 DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n",
501 branch);
502 } else {
503 DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
504 thread.state = FetchWaitingForPC;
505 }
506 }
507 break;
508 case BranchData::HaltFetch:
509 DPRINTF(Fetch, "Halting fetch\n");
510 thread.state = FetchHalted;
511 break;
512 default:
513 DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
514 thread.state = FetchRunning;
515 break;
516 }
517 thread.pc = branch.target;
518 }
519
520 void
521 Fetch1::updateExpectedSeqNums(const BranchData &branch)
522 {
523 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
524
525 DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
526 " predictionSeqNum from: %d to %d\n",
527 thread.streamSeqNum, branch.newStreamSeqNum,
528 thread.predictionSeqNum, branch.newPredictionSeqNum);
529
530 /* Change the stream */
531 thread.streamSeqNum = branch.newStreamSeqNum;
532 /* Update the prediction. Note that it's possible for this to
533 * actually set the prediction to an *older* value if new
534 * predictions have been discarded by execute */
535 thread.predictionSeqNum = branch.newPredictionSeqNum;
536 }
537
538 void
539 Fetch1::processResponse(Fetch1::FetchRequestPtr response,
540 ForwardLineData &line)
541 {
542 Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId];
543 PacketPtr packet = response->packet;
544
545 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
546 * structure */
547 line.setFault(response->fault);
548 /* Make sequence numbers valid in return */
549 line.id = response->id;
550 /* Set PC to virtual address */
551 line.pc = response->pc;
552 /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
553 * pc.instAddr() */
554 line.lineBaseAddr = response->request->getVaddr();
555
556 if (response->fault != NoFault) {
557 /* Stop fetching if there was a fault */
558 /* Should probably try to flush the queues as well, but we
559 * can't be sure that this fault will actually reach Execute, and we
560 * can't (currently) selectively remove this stream from the queues */
561 DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
562 response->fault->name());
563 thread.state = Fetch1::FetchWaitingForPC;
564 } else {
565 line.adoptPacketData(packet);
566 /* Null the response's packet to prevent the response from trying to
567 * deallocate the packet */
568 response->packet = NULL;
569 }
570 }
571
572 void
573 Fetch1::evaluate()
574 {
575 const BranchData &execute_branch = *inp.outputWire;
576 const BranchData &fetch2_branch = *prediction.outputWire;
577 ForwardLineData &line_out = *out.inputWire;
578
579 assert(line_out.isBubble());
580
581 for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
582 fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve();
583
584 /** Are both branches from later stages valid and for the same thread? */
585 if (execute_branch.threadId != InvalidThreadID &&
586 execute_branch.threadId == fetch2_branch.threadId) {
587
588 Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId];
589
590 /* Are we changing stream? Look to the Execute branches first, then
591 * to predicted changes of stream from Fetch2 */
592 if (execute_branch.isStreamChange()) {
593 if (thread.state == FetchHalted) {
594 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
595 } else {
596 changeStream(execute_branch);
597 }
598
599 if (!fetch2_branch.isBubble()) {
600 DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
601 fetch2_branch);
602 }
603
604 /* The streamSeqNum tagging in request/response ->req should handle
605 * discarding those requests when we get to them. */
606 } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) {
607 /* Handle branch predictions by changing the instruction source
608 * if we're still processing the same stream (as set by streamSeqNum)
609 * as the one of the prediction.
610 */
611 if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) {
612 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
613 " streamSeqNum mismatch\n",
614 fetch2_branch);
615 } else {
616 changeStream(fetch2_branch);
617 }
618 }
619 } else {
620 /* Fetch2 and Execute branches are for different threads */
621 if (execute_branch.threadId != InvalidThreadID &&
622 execute_branch.isStreamChange()) {
623
624 if (fetchInfo[execute_branch.threadId].state == FetchHalted) {
625 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
626 } else {
627 changeStream(execute_branch);
628 }
629 }
630
631 if (fetch2_branch.threadId != InvalidThreadID &&
632 fetch2_branch.isStreamChange()) {
633
634 if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) {
635 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch);
636 } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) {
637 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
638 " streamSeqNum mismatch\n", fetch2_branch);
639 } else {
640 changeStream(fetch2_branch);
641 }
642 }
643 }
644
645 if (numInFlightFetches() < fetchLimit) {
646 ThreadID fetch_tid = getScheduledThread();
647
648 if (fetch_tid != InvalidThreadID) {
649 DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid);
650
651 /* Generate fetch to selected thread */
652 fetchLine(fetch_tid);
653 /* Take up a slot in the fetch queue */
654 nextStageReserve[fetch_tid].reserve();
655 } else {
656 DPRINTF(Fetch, "No active threads available to fetch from\n");
657 }
658 }
659
660
661 /* Halting shouldn't prevent fetches in flight from being processed */
662 /* Step fetches through the icachePort queues and memory system */
663 stepQueues();
664
665 /* As we've thrown away early lines, if there is a line, it must
666 * be from the right stream */
667 if (!transfers.empty() &&
668 transfers.front()->isComplete())
669 {
670 Fetch1::FetchRequestPtr response = transfers.front();
671
672 if (response->isDiscardable()) {
673 nextStageReserve[response->id.threadId].freeReservation();
674
675 DPRINTF(Fetch, "Discarding translated fetch as it's for"
676 " an old stream\n");
677
678 /* Wake up next cycle just in case there was some other
679 * action to do */
680 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
681 } else {
682 DPRINTF(Fetch, "Processing fetched line: %s\n",
683 response->id);
684
685 processResponse(response, line_out);
686 }
687
688 popAndDiscard(transfers);
689 }
690
691 /* If we generated output, and mark the stage as being active
692 * to encourage that output on to the next stage */
693 if (!line_out.isBubble())
694 cpu.activityRecorder->activity();
695
696 /* Fetch1 has no inputBuffer so the only activity we can have is to
697 * generate a line output (tested just above) or to initiate a memory
698 * fetch which will signal activity when it returns/needs stepping
699 * between queues */
700
701
702 /* This looks hackish. And it is, but there doesn't seem to be a better
703 * way to do this. The signal from commit to suspend fetch takes 1
704 * clock cycle to propagate to fetch. However, a legitimate wakeup
705 * may occur between cycles from the memory system. Thus wakeup guard
706 * prevents us from suspending in that case. */
707
708 for (auto& thread : fetchInfo) {
709 thread.wakeupGuard = false;
710 }
711 }
712
713 void
714 Fetch1::wakeupFetch(ThreadID tid)
715 {
716 ThreadContext *thread_ctx = cpu.getContext(tid);
717 Fetch1ThreadInfo &thread = fetchInfo[tid];
718 thread.pc = thread_ctx->pcState();
719 thread.state = FetchRunning;
720 thread.wakeupGuard = true;
721 DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n",
722 tid, thread_ctx->pcState());
723
724 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
725 }
726
727 bool
728 Fetch1::isDrained()
729 {
730 bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble();
731 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
732 Fetch1ThreadInfo &thread = fetchInfo[tid];
733 DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n",
734 tid,
735 thread.state == FetchHalted,
736 (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
737 ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
738
739 drained = drained && (thread.state != FetchRunning);
740 }
741
742 return drained;
743 }
744
745 void
746 Fetch1::FetchRequest::reportData(std::ostream &os) const
747 {
748 os << id;
749 }
750
751 bool Fetch1::FetchRequest::isDiscardable() const
752 {
753 Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId];
754
755 /* Can't discard lines in TLB/memory */
756 return state != InTranslation && state != RequestIssuing &&
757 (id.streamSeqNum != thread.streamSeqNum ||
758 id.predictionSeqNum != thread.predictionSeqNum);
759 }
760
761 void
762 Fetch1::minorTrace() const
763 {
764 // TODO: Un-bork minorTrace for THREADS
765 // bork bork bork
766 const Fetch1ThreadInfo &thread = fetchInfo[0];
767
768 std::ostringstream data;
769
770 if (thread.blocked)
771 data << 'B';
772 else
773 (*out.inputWire).reportData(data);
774
775 MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s"
776 " streamSeqNum=%d lines=%s\n", thread.state, icacheState,
777 numFetchesInITLB, numFetchesInMemorySystem,
778 thread.streamSeqNum, data.str());
779 requests.minorTrace();
780 transfers.minorTrace();
781 }
782
783 }