Revert power patch sets with unexpected interactions
[gem5.git] / src / cpu / minor / fetch1.cc
1 /*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andrew Bardsley
38 */
39
40 #include <cstring>
41 #include <iomanip>
42 #include <sstream>
43
44 #include "base/cast.hh"
45 #include "cpu/minor/fetch1.hh"
46 #include "cpu/minor/pipeline.hh"
47 #include "debug/Drain.hh"
48 #include "debug/Fetch.hh"
49 #include "debug/MinorTrace.hh"
50
51 namespace Minor
52 {
53
54 Fetch1::Fetch1(const std::string &name_,
55 MinorCPU &cpu_,
56 MinorCPUParams &params,
57 Latch<BranchData>::Output inp_,
58 Latch<ForwardLineData>::Input out_,
59 Latch<BranchData>::Output prediction_,
60 Reservable &next_stage_input_buffer) :
61 Named(name_),
62 cpu(cpu_),
63 inp(inp_),
64 out(out_),
65 prediction(prediction_),
66 nextStageReserve(next_stage_input_buffer),
67 icachePort(name_ + ".icache_port", *this, cpu_),
68 lineSnap(params.fetch1LineSnapWidth),
69 maxLineWidth(params.fetch1LineWidth),
70 fetchLimit(params.fetch1FetchLimit),
71 state(FetchWaitingForPC),
72 pc(0),
73 streamSeqNum(InstId::firstStreamSeqNum),
74 predictionSeqNum(InstId::firstPredictionSeqNum),
75 blocked(false),
76 requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
77 transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
78 icacheState(IcacheRunning),
79 lineSeqNum(InstId::firstLineSeqNum),
80 numFetchesInMemorySystem(0),
81 numFetchesInITLB(0)
82 {
83 if (lineSnap == 0) {
84 lineSnap = cpu.cacheLineSize();
85 DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
86 lineSnap);
87 }
88
89 if (maxLineWidth == 0) {
90 maxLineWidth = cpu.cacheLineSize();
91 DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
92 maxLineWidth);
93 }
94
95 /* These assertions should be copied to the Python config. as well */
96 if ((lineSnap % sizeof(TheISA::MachInst)) != 0) {
97 fatal("%s: fetch1LineSnapWidth must be a multiple "
98 "of sizeof(TheISA::MachInst) (%d)\n", name_,
99 sizeof(TheISA::MachInst));
100 }
101
102 if (!(maxLineWidth >= lineSnap &&
103 (maxLineWidth % sizeof(TheISA::MachInst)) == 0))
104 {
105 fatal("%s: fetch1LineWidth must be a multiple of"
106 " sizeof(TheISA::MachInst)"
107 " (%d), and >= fetch1LineSnapWidth (%d)\n",
108 name_, sizeof(TheISA::MachInst), lineSnap);
109 }
110
111 if (fetchLimit < 1) {
112 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
113 fetchLimit);
114 }
115 }
116
117 void
118 Fetch1::fetchLine()
119 {
120 /* If line_offset != 0, a request is pushed for the remainder of the
121 * line. */
122 /* Use a lower, sizeof(MachInst) aligned address for the fetch */
123 Addr aligned_pc = pc.instAddr() & ~((Addr) lineSnap - 1);
124 unsigned int line_offset = aligned_pc % lineSnap;
125 unsigned int request_size = maxLineWidth - line_offset;
126
127 /* Fill in the line's id */
128 InstId request_id(0 /* thread */,
129 streamSeqNum, predictionSeqNum,
130 lineSeqNum);
131
132 FetchRequestPtr request = new FetchRequest(*this, request_id, pc);
133
134 DPRINTF(Fetch, "Inserting fetch into the fetch queue "
135 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
136 request_id, aligned_pc, pc, line_offset, request_size);
137
138 request->request.setThreadContext(cpu.threads[0]->getTC()->contextId(),
139 /* thread id */ 0);
140 request->request.setVirt(0 /* asid */,
141 aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
142 /* I've no idea why we need the PC, but give it */
143 pc.instAddr());
144
145 DPRINTF(Fetch, "Submitting ITLB request\n");
146 numFetchesInITLB++;
147
148 request->state = FetchRequest::InTranslation;
149
150 /* Reserve space in the queues upstream of requests for results */
151 transfers.reserve();
152 requests.push(request);
153
154 /* Submit the translation request. The response will come
155 * through finish/markDelayed on this request as it bears
156 * the Translation interface */
157 cpu.threads[request->id.threadId]->itb->translateTiming(
158 &request->request,
159 cpu.getContext(request->id.threadId),
160 request, BaseTLB::Execute);
161
162 lineSeqNum++;
163
164 /* Step the PC for the next line onto the line aligned next address.
165 * Note that as instructions can span lines, this PC is only a
166 * reliable 'new' PC if the next line has a new stream sequence number. */
167 #if THE_ISA == ALPHA_ISA
168 /* Restore the low bits of the PC used as address space flags */
169 Addr pc_low_bits = pc.instAddr() &
170 ((Addr) (1 << sizeof(TheISA::MachInst)) - 1);
171
172 pc.set(aligned_pc + request_size + pc_low_bits);
173 #else
174 pc.set(aligned_pc + request_size);
175 #endif
176 }
177
178 std::ostream &
179 operator <<(std::ostream &os, Fetch1::IcacheState state)
180 {
181 switch (state) {
182 case Fetch1::IcacheRunning:
183 os << "IcacheRunning";
184 break;
185 case Fetch1::IcacheNeedsRetry:
186 os << "IcacheNeedsRetry";
187 break;
188 default:
189 os << "IcacheState-" << static_cast<int>(state);
190 break;
191 }
192 return os;
193 }
194
195 void
196 Fetch1::FetchRequest::makePacket()
197 {
198 /* Make the necessary packet for a memory transaction */
199 packet = new Packet(&request, MemCmd::ReadReq);
200 packet->allocate();
201
202 /* This FetchRequest becomes SenderState to allow the response to be
203 * identified */
204 packet->pushSenderState(this);
205 }
206
207 void
208 Fetch1::FetchRequest::finish(const Fault &fault_, RequestPtr request_,
209 ThreadContext *tc, BaseTLB::Mode mode)
210 {
211 fault = fault_;
212
213 state = Translated;
214 fetch.handleTLBResponse(this);
215
216 /* Let's try and wake up the processor for the next cycle */
217 fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
218 }
219
220 void
221 Fetch1::handleTLBResponse(FetchRequestPtr response)
222 {
223 numFetchesInITLB--;
224
225 if (response->fault != NoFault) {
226 DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
227 "paddr: 0x%x, vaddr: 0x%x\n",
228 response->fault->name(),
229 (response->request.hasPaddr() ? response->request.getPaddr() : 0),
230 response->request.getVaddr());
231
232 if (DTRACE(MinorTrace))
233 minorTraceResponseLine(name(), response);
234 } else {
235 DPRINTF(Fetch, "Got ITLB response\n");
236 }
237
238 response->state = FetchRequest::Translated;
239
240 tryToSendToTransfers(response);
241 }
242
243 Fetch1::FetchRequest::~FetchRequest()
244 {
245 if (packet)
246 delete packet;
247 }
248
249 void
250 Fetch1::tryToSendToTransfers(FetchRequestPtr request)
251 {
252 if (!requests.empty() && requests.front() != request) {
253 DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
254 " issue to memory\n");
255 return;
256 }
257
258 if (request->state == FetchRequest::InTranslation) {
259 DPRINTF(Fetch, "Fetch still in translation, not issuing to"
260 " memory\n");
261 return;
262 }
263
264 if (request->isDiscardable() || request->fault != NoFault) {
265 /* Discarded and faulting requests carry on through transfers
266 * as Complete/packet == NULL */
267
268 request->state = FetchRequest::Complete;
269 moveFromRequestsToTransfers(request);
270
271 /* Wake up the pipeline next cycle as there will be no event
272 * for this queue->queue transfer */
273 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
274 } else if (request->state == FetchRequest::Translated) {
275 if (!request->packet)
276 request->makePacket();
277
278 /* Ensure that the packet won't delete the request */
279 assert(request->packet->needsResponse());
280
281 if (tryToSend(request))
282 moveFromRequestsToTransfers(request);
283 } else {
284 DPRINTF(Fetch, "Not advancing line fetch\n");
285 }
286 }
287
288 void
289 Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request)
290 {
291 assert(!requests.empty() && requests.front() == request);
292
293 requests.pop();
294 transfers.push(request);
295 }
296
297 bool
298 Fetch1::tryToSend(FetchRequestPtr request)
299 {
300 bool ret = false;
301
302 if (icachePort.sendTimingReq(request->packet)) {
303 /* Invalidate the fetch_requests packet so we don't
304 * accidentally fail to deallocate it (or use it!)
305 * later by overwriting it */
306 request->packet = NULL;
307 request->state = FetchRequest::RequestIssuing;
308 numFetchesInMemorySystem++;
309
310 ret = true;
311
312 DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
313 request->id);
314 } else {
315 /* Needs to be resent, wait for that */
316 icacheState = IcacheNeedsRetry;
317
318 DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
319 request->id);
320 }
321
322 return ret;
323 }
324
325 void
326 Fetch1::stepQueues()
327 {
328 IcacheState old_icache_state = icacheState;
329
330 switch (icacheState) {
331 case IcacheRunning:
332 /* Move ITLB results on to the memory system */
333 if (!requests.empty()) {
334 tryToSendToTransfers(requests.front());
335 }
336 break;
337 case IcacheNeedsRetry:
338 break;
339 }
340
341 if (icacheState != old_icache_state) {
342 DPRINTF(Fetch, "Step in state %s moving to state %s\n",
343 old_icache_state, icacheState);
344 }
345 }
346
347 void
348 Fetch1::popAndDiscard(FetchQueue &queue)
349 {
350 if (!queue.empty()) {
351 delete queue.front();
352 queue.pop();
353 }
354 }
355
356 unsigned int
357 Fetch1::numInFlightFetches()
358 {
359 return requests.occupiedSpace() +
360 transfers.occupiedSpace();
361 }
362
363 /** Print the appropriate MinorLine line for a fetch response */
364 void
365 Fetch1::minorTraceResponseLine(const std::string &name,
366 Fetch1::FetchRequestPtr response) const
367 {
368 Request &request M5_VAR_USED = response->request;
369
370 if (response->packet && response->packet->isError()) {
371 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
372 response->id, request.getVaddr());
373 } else if (response->fault != NoFault) {
374 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
375 response->id, request.getVaddr(), response->fault->name());
376 } else {
377 MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
378 response->id, request.getSize(),
379 request.getVaddr(), request.getPaddr());
380 }
381 }
382
383 bool
384 Fetch1::recvTimingResp(PacketPtr response)
385 {
386 DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
387
388 /* Only push the response if we didn't change stream? No, all responses
389 * should hit the responses queue. It's the job of 'step' to throw them
390 * away. */
391 FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr>
392 (response->popSenderState());
393
394 /* Fixup packet in fetch_request as this may have changed */
395 assert(!fetch_request->packet);
396 fetch_request->packet = response;
397
398 numFetchesInMemorySystem--;
399 fetch_request->state = FetchRequest::Complete;
400
401 if (DTRACE(MinorTrace))
402 minorTraceResponseLine(name(), fetch_request);
403
404 if (response->isError()) {
405 DPRINTF(Fetch, "Received error response packet: %s\n",
406 fetch_request->id);
407 }
408
409 /* We go to idle even if there are more things to do on the queues as
410 * it's the job of step to actually step us on to the next transaction */
411
412 /* Let's try and wake up the processor for the next cycle to move on
413 * queues */
414 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
415
416 /* Never busy */
417 return true;
418 }
419
420 void
421 Fetch1::recvReqRetry()
422 {
423 DPRINTF(Fetch, "recvRetry\n");
424 assert(icacheState == IcacheNeedsRetry);
425 assert(!requests.empty());
426
427 FetchRequestPtr retryRequest = requests.front();
428
429 icacheState = IcacheRunning;
430
431 if (tryToSend(retryRequest))
432 moveFromRequestsToTransfers(retryRequest);
433 }
434
435 std::ostream &
436 operator <<(std::ostream &os, Fetch1::FetchState state)
437 {
438 switch (state) {
439 case Fetch1::FetchHalted:
440 os << "FetchHalted";
441 break;
442 case Fetch1::FetchWaitingForPC:
443 os << "FetchWaitingForPC";
444 break;
445 case Fetch1::FetchRunning:
446 os << "FetchRunning";
447 break;
448 default:
449 os << "FetchState-" << static_cast<int>(state);
450 break;
451 }
452 return os;
453 }
454
455 void
456 Fetch1::changeStream(const BranchData &branch)
457 {
458 updateExpectedSeqNums(branch);
459
460 /* Start fetching again if we were stopped */
461 switch (branch.reason) {
462 case BranchData::SuspendThread:
463 DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
464 state = FetchWaitingForPC;
465 break;
466 case BranchData::HaltFetch:
467 DPRINTF(Fetch, "Halting fetch\n");
468 state = FetchHalted;
469 break;
470 default:
471 DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
472 state = FetchRunning;
473 break;
474 }
475 pc = branch.target;
476 }
477
478 void
479 Fetch1::updateExpectedSeqNums(const BranchData &branch)
480 {
481 DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
482 " predictionSeqNum from: %d to %d\n",
483 streamSeqNum, branch.newStreamSeqNum,
484 predictionSeqNum, branch.newPredictionSeqNum);
485
486 /* Change the stream */
487 streamSeqNum = branch.newStreamSeqNum;
488 /* Update the prediction. Note that it's possible for this to
489 * actually set the prediction to an *older* value if new
490 * predictions have been discarded by execute */
491 predictionSeqNum = branch.newPredictionSeqNum;
492 }
493
494 void
495 Fetch1::processResponse(Fetch1::FetchRequestPtr response,
496 ForwardLineData &line)
497 {
498 PacketPtr packet = response->packet;
499
500 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
501 * structure */
502 line.setFault(response->fault);
503 /* Make sequence numbers valid in return */
504 line.id = response->id;
505 /* Set PC to virtual address */
506 line.pc = response->pc;
507 /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
508 * pc.instAddr() */
509 line.lineBaseAddr = response->request.getVaddr();
510
511 if (response->fault != NoFault) {
512 /* Stop fetching if there was a fault */
513 /* Should probably try to flush the queues as well, but we
514 * can't be sure that this fault will actually reach Execute, and we
515 * can't (currently) selectively remove this stream from the queues */
516 DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
517 response->fault->name());
518 state = Fetch1::FetchWaitingForPC;
519 } else {
520 line.adoptPacketData(packet);
521 /* Null the response's packet to prevent the response from trying to
522 * deallocate the packet */
523 response->packet = NULL;
524 }
525 }
526
527 void
528 Fetch1::evaluate()
529 {
530 const BranchData &execute_branch = *inp.outputWire;
531 const BranchData &fetch2_branch = *prediction.outputWire;
532 ForwardLineData &line_out = *out.inputWire;
533
534 assert(line_out.isBubble());
535
536 blocked = !nextStageReserve.canReserve();
537
538 /* Are we changing stream? Look to the Execute branches first, then
539 * to predicted changes of stream from Fetch2 */
540 /* @todo, find better way to express ignoring branch predictions */
541 if (execute_branch.isStreamChange() &&
542 execute_branch.reason != BranchData::BranchPrediction)
543 {
544 if (state == FetchHalted) {
545 if (execute_branch.reason == BranchData::WakeupFetch) {
546 DPRINTF(Fetch, "Waking up fetch: %s\n", execute_branch);
547 changeStream(execute_branch);
548 } else {
549 DPRINTF(Fetch, "Halted, ignoring branch: %s\n",
550 execute_branch);
551 }
552 } else {
553 changeStream(execute_branch);
554 }
555
556 if (!fetch2_branch.isBubble()) {
557 DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
558 fetch2_branch);
559 }
560
561 /* The streamSeqNum tagging in request/response ->req should handle
562 * discarding those requests when we get to them. */
563 } else if (state != FetchHalted && fetch2_branch.isStreamChange()) {
564 /* Handle branch predictions by changing the instruction source
565 * if we're still processing the same stream (as set by streamSeqNum)
566 * as the one of the prediction.
567 */
568 if (fetch2_branch.newStreamSeqNum != streamSeqNum) {
569 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
570 " streamSeqNum mismatch\n",
571 fetch2_branch);
572 } else {
573 changeStream(fetch2_branch);
574 }
575 }
576
577 /* Can we fetch? */
578 /* The bare minimum requirements for initiating a fetch */
579 /* THREAD need to handle multiple threads */
580 if (state == FetchRunning && /* We are actually fetching */
581 !blocked && /* Space in the Fetch2 inputBuffer */
582 /* The thread we're going to fetch for (thread 0), is active */
583 cpu.getContext(0)->status() == ThreadContext::Active &&
584 numInFlightFetches() < fetchLimit)
585 {
586 fetchLine();
587 /* Take up a slot in the fetch queue */
588 nextStageReserve.reserve();
589 }
590
591 /* Halting shouldn't prevent fetches in flight from being processed */
592 /* Step fetches through the icachePort queues and memory system */
593 stepQueues();
594
595 /* As we've thrown away early lines, if there is a line, it must
596 * be from the right stream */
597 if (!transfers.empty() &&
598 transfers.front()->isComplete())
599 {
600 Fetch1::FetchRequestPtr response = transfers.front();
601
602 if (response->isDiscardable()) {
603 nextStageReserve.freeReservation();
604
605 DPRINTF(Fetch, "Discarding translated fetch at it's for"
606 " an old stream\n");
607
608 /* Wake up next cycle just in case there was some other
609 * action to do */
610 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
611 } else {
612 DPRINTF(Fetch, "Processing fetched line: %s\n",
613 response->id);
614
615 processResponse(response, line_out);
616 }
617
618 popAndDiscard(transfers);
619 }
620
621 /* If we generated output, and mark the stage as being active
622 * to encourage that output on to the next stage */
623 if (!line_out.isBubble())
624 cpu.activityRecorder->activity();
625
626 /* Fetch1 has no inputBuffer so the only activity we can have is to
627 * generate a line output (tested just above) or to initiate a memory
628 * fetch which will signal activity when it returns/needs stepping
629 * between queues */
630 }
631
632 bool
633 Fetch1::isDrained()
634 {
635 DPRINTF(Drain, "isDrained %s %s%s\n",
636 state,
637 (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
638 ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
639
640 return state == FetchHalted &&
641 numInFlightFetches() == 0 &&
642 (*out.inputWire).isBubble();
643 }
644
645 void
646 Fetch1::FetchRequest::reportData(std::ostream &os) const
647 {
648 os << id;
649 }
650
651 bool Fetch1::FetchRequest::isDiscardable() const
652 {
653 /* Can't discard lines in TLB/memory */
654 return state != InTranslation && state != RequestIssuing &&
655 (id.streamSeqNum != fetch.streamSeqNum ||
656 id.predictionSeqNum != fetch.predictionSeqNum);
657 }
658
659 void
660 Fetch1::minorTrace() const
661 {
662 std::ostringstream data;
663
664 if (blocked)
665 data << 'B';
666 else
667 (*out.inputWire).reportData(data);
668
669 MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s"
670 " streamSeqNum=%d lines=%s\n", state, icacheState,
671 numFetchesInITLB, numFetchesInMemorySystem,
672 streamSeqNum, data.str());
673 requests.minorTrace();
674 transfers.minorTrace();
675 }
676
677 }