2 * Copyright (c) 2013-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Authors: Andrew Bardsley
40 #include "cpu/minor/fetch1.hh"
46 #include "base/cast.hh"
47 #include "cpu/minor/pipeline.hh"
48 #include "debug/Drain.hh"
49 #include "debug/Fetch.hh"
50 #include "debug/MinorTrace.hh"
55 Fetch1::Fetch1(const std::string
&name_
,
57 MinorCPUParams
¶ms
,
58 Latch
<BranchData
>::Output inp_
,
59 Latch
<ForwardLineData
>::Input out_
,
60 Latch
<BranchData
>::Output prediction_
,
61 std::vector
<InputBuffer
<ForwardLineData
>> &next_stage_input_buffer
) :
66 prediction(prediction_
),
67 nextStageReserve(next_stage_input_buffer
),
68 icachePort(name_
+ ".icache_port", *this, cpu_
),
69 lineSnap(params
.fetch1LineSnapWidth
),
70 maxLineWidth(params
.fetch1LineWidth
),
71 fetchLimit(params
.fetch1FetchLimit
),
72 fetchInfo(params
.numThreads
),
74 requests(name_
+ ".requests", "lines", params
.fetch1FetchLimit
),
75 transfers(name_
+ ".transfers", "lines", params
.fetch1FetchLimit
),
76 icacheState(IcacheRunning
),
77 lineSeqNum(InstId::firstLineSeqNum
),
78 numFetchesInMemorySystem(0),
82 lineSnap
= cpu
.cacheLineSize();
83 DPRINTF(Fetch
, "lineSnap set to cache line size of: %d\n",
87 if (maxLineWidth
== 0) {
88 maxLineWidth
= cpu
.cacheLineSize();
89 DPRINTF(Fetch
, "maxLineWidth set to cache line size of: %d\n",
93 /* These assertions should be copied to the Python config. as well */
94 if ((lineSnap
% sizeof(TheISA::MachInst
)) != 0) {
95 fatal("%s: fetch1LineSnapWidth must be a multiple "
96 "of sizeof(TheISA::MachInst) (%d)\n", name_
,
97 sizeof(TheISA::MachInst
));
100 if (!(maxLineWidth
>= lineSnap
&&
101 (maxLineWidth
% sizeof(TheISA::MachInst
)) == 0))
103 fatal("%s: fetch1LineWidth must be a multiple of"
104 " sizeof(TheISA::MachInst)"
105 " (%d), and >= fetch1LineSnapWidth (%d)\n",
106 name_
, sizeof(TheISA::MachInst
), lineSnap
);
109 if (fetchLimit
< 1) {
110 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_
,
116 Fetch1::getScheduledThread()
118 /* Select thread via policy. */
119 std::vector
<ThreadID
> priority_list
;
121 switch (cpu
.threadPolicy
) {
122 case Enums::SingleThreaded
:
123 priority_list
.push_back(0);
125 case Enums::RoundRobin
:
126 priority_list
= cpu
.roundRobinPriority(threadPriority
);
129 priority_list
= cpu
.randomPriority();
132 panic("Unknown fetch policy");
135 for (auto tid
: priority_list
) {
136 if (cpu
.getContext(tid
)->status() == ThreadContext::Active
&&
137 !fetchInfo
[tid
].blocked
&&
138 fetchInfo
[tid
].state
== FetchRunning
) {
139 threadPriority
= tid
;
144 return InvalidThreadID
;
148 Fetch1::fetchLine(ThreadID tid
)
150 /* Reference the currently used thread state. */
151 Fetch1ThreadInfo
&thread
= fetchInfo
[tid
];
153 /* If line_offset != 0, a request is pushed for the remainder of the
155 /* Use a lower, sizeof(MachInst) aligned address for the fetch */
156 Addr aligned_pc
= thread
.pc
.instAddr() & ~((Addr
) lineSnap
- 1);
157 unsigned int line_offset
= aligned_pc
% lineSnap
;
158 unsigned int request_size
= maxLineWidth
- line_offset
;
160 /* Fill in the line's id */
161 InstId
request_id(tid
,
162 thread
.streamSeqNum
, thread
.predictionSeqNum
,
165 FetchRequestPtr request
= new FetchRequest(*this, request_id
, thread
.pc
);
167 DPRINTF(Fetch
, "Inserting fetch into the fetch queue "
168 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
169 request_id
, aligned_pc
, thread
.pc
, line_offset
, request_size
);
171 request
->request
.setContext(cpu
.threads
[tid
]->getTC()->contextId());
172 request
->request
.setVirt(0 /* asid */,
173 aligned_pc
, request_size
, Request::INST_FETCH
, cpu
.instMasterId(),
174 /* I've no idea why we need the PC, but give it */
175 thread
.pc
.instAddr());
177 DPRINTF(Fetch
, "Submitting ITLB request\n");
180 request
->state
= FetchRequest::InTranslation
;
182 /* Reserve space in the queues upstream of requests for results */
184 requests
.push(request
);
186 /* Submit the translation request. The response will come
187 * through finish/markDelayed on this request as it bears
188 * the Translation interface */
189 cpu
.threads
[request
->id
.threadId
]->itb
->translateTiming(
191 cpu
.getContext(request
->id
.threadId
),
192 request
, BaseTLB::Execute
);
196 /* Step the PC for the next line onto the line aligned next address.
197 * Note that as instructions can span lines, this PC is only a
198 * reliable 'new' PC if the next line has a new stream sequence number. */
199 #if THE_ISA == ALPHA_ISA
200 /* Restore the low bits of the PC used as address space flags */
201 Addr pc_low_bits
= thread
.pc
.instAddr() &
202 ((Addr
) (1 << sizeof(TheISA::MachInst
)) - 1);
204 thread
.pc
.set(aligned_pc
+ request_size
+ pc_low_bits
);
206 thread
.pc
.set(aligned_pc
+ request_size
);
211 operator <<(std::ostream
&os
, Fetch1::IcacheState state
)
214 case Fetch1::IcacheRunning
:
215 os
<< "IcacheRunning";
217 case Fetch1::IcacheNeedsRetry
:
218 os
<< "IcacheNeedsRetry";
221 os
<< "IcacheState-" << static_cast<int>(state
);
228 Fetch1::FetchRequest::makePacket()
230 /* Make the necessary packet for a memory transaction */
231 packet
= new Packet(&request
, MemCmd::ReadReq
);
234 /* This FetchRequest becomes SenderState to allow the response to be
236 packet
->pushSenderState(this);
240 Fetch1::FetchRequest::finish(const Fault
&fault_
, RequestPtr request_
,
241 ThreadContext
*tc
, BaseTLB::Mode mode
)
246 fetch
.handleTLBResponse(this);
248 /* Let's try and wake up the processor for the next cycle */
249 fetch
.cpu
.wakeupOnEvent(Pipeline::Fetch1StageId
);
253 Fetch1::handleTLBResponse(FetchRequestPtr response
)
257 if (response
->fault
!= NoFault
) {
258 DPRINTF(Fetch
, "Fault in address ITLB translation: %s, "
259 "paddr: 0x%x, vaddr: 0x%x\n",
260 response
->fault
->name(),
261 (response
->request
.hasPaddr() ? response
->request
.getPaddr() : 0),
262 response
->request
.getVaddr());
264 if (DTRACE(MinorTrace
))
265 minorTraceResponseLine(name(), response
);
267 DPRINTF(Fetch
, "Got ITLB response\n");
270 response
->state
= FetchRequest::Translated
;
272 tryToSendToTransfers(response
);
275 Fetch1::FetchRequest::~FetchRequest()
282 Fetch1::tryToSendToTransfers(FetchRequestPtr request
)
284 if (!requests
.empty() && requests
.front() != request
) {
285 DPRINTF(Fetch
, "Fetch not at front of requests queue, can't"
286 " issue to memory\n");
290 if (request
->state
== FetchRequest::InTranslation
) {
291 DPRINTF(Fetch
, "Fetch still in translation, not issuing to"
296 if (request
->isDiscardable() || request
->fault
!= NoFault
) {
297 /* Discarded and faulting requests carry on through transfers
298 * as Complete/packet == NULL */
300 request
->state
= FetchRequest::Complete
;
301 moveFromRequestsToTransfers(request
);
303 /* Wake up the pipeline next cycle as there will be no event
304 * for this queue->queue transfer */
305 cpu
.wakeupOnEvent(Pipeline::Fetch1StageId
);
306 } else if (request
->state
== FetchRequest::Translated
) {
307 if (!request
->packet
)
308 request
->makePacket();
310 /* Ensure that the packet won't delete the request */
311 assert(request
->packet
->needsResponse());
313 if (tryToSend(request
))
314 moveFromRequestsToTransfers(request
);
316 DPRINTF(Fetch
, "Not advancing line fetch\n");
321 Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request
)
323 assert(!requests
.empty() && requests
.front() == request
);
326 transfers
.push(request
);
330 Fetch1::tryToSend(FetchRequestPtr request
)
334 if (icachePort
.sendTimingReq(request
->packet
)) {
335 /* Invalidate the fetch_requests packet so we don't
336 * accidentally fail to deallocate it (or use it!)
337 * later by overwriting it */
338 request
->packet
= NULL
;
339 request
->state
= FetchRequest::RequestIssuing
;
340 numFetchesInMemorySystem
++;
344 DPRINTF(Fetch
, "Issued fetch request to memory: %s\n",
347 /* Needs to be resent, wait for that */
348 icacheState
= IcacheNeedsRetry
;
350 DPRINTF(Fetch
, "Line fetch needs to retry: %s\n",
360 IcacheState old_icache_state
= icacheState
;
362 switch (icacheState
) {
364 /* Move ITLB results on to the memory system */
365 if (!requests
.empty()) {
366 tryToSendToTransfers(requests
.front());
369 case IcacheNeedsRetry
:
373 if (icacheState
!= old_icache_state
) {
374 DPRINTF(Fetch
, "Step in state %s moving to state %s\n",
375 old_icache_state
, icacheState
);
380 Fetch1::popAndDiscard(FetchQueue
&queue
)
382 if (!queue
.empty()) {
383 delete queue
.front();
389 Fetch1::numInFlightFetches()
391 return requests
.occupiedSpace() +
392 transfers
.occupiedSpace();
395 /** Print the appropriate MinorLine line for a fetch response */
397 Fetch1::minorTraceResponseLine(const std::string
&name
,
398 Fetch1::FetchRequestPtr response
) const
400 Request
&request M5_VAR_USED
= response
->request
;
402 if (response
->packet
&& response
->packet
->isError()) {
403 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
404 response
->id
, request
.getVaddr());
405 } else if (response
->fault
!= NoFault
) {
406 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
407 response
->id
, request
.getVaddr(), response
->fault
->name());
409 MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
410 response
->id
, request
.getSize(),
411 request
.getVaddr(), request
.getPaddr());
416 Fetch1::recvTimingResp(PacketPtr response
)
418 DPRINTF(Fetch
, "recvTimingResp %d\n", numFetchesInMemorySystem
);
420 /* Only push the response if we didn't change stream? No, all responses
421 * should hit the responses queue. It's the job of 'step' to throw them
423 FetchRequestPtr fetch_request
= safe_cast
<FetchRequestPtr
>
424 (response
->popSenderState());
426 /* Fixup packet in fetch_request as this may have changed */
427 assert(!fetch_request
->packet
);
428 fetch_request
->packet
= response
;
430 numFetchesInMemorySystem
--;
431 fetch_request
->state
= FetchRequest::Complete
;
433 if (DTRACE(MinorTrace
))
434 minorTraceResponseLine(name(), fetch_request
);
436 if (response
->isError()) {
437 DPRINTF(Fetch
, "Received error response packet: %s\n",
441 /* We go to idle even if there are more things to do on the queues as
442 * it's the job of step to actually step us on to the next transaction */
444 /* Let's try and wake up the processor for the next cycle to move on
446 cpu
.wakeupOnEvent(Pipeline::Fetch1StageId
);
453 Fetch1::recvReqRetry()
455 DPRINTF(Fetch
, "recvRetry\n");
456 assert(icacheState
== IcacheNeedsRetry
);
457 assert(!requests
.empty());
459 FetchRequestPtr retryRequest
= requests
.front();
461 icacheState
= IcacheRunning
;
463 if (tryToSend(retryRequest
))
464 moveFromRequestsToTransfers(retryRequest
);
468 operator <<(std::ostream
&os
, Fetch1::FetchState state
)
471 case Fetch1::FetchHalted
:
474 case Fetch1::FetchWaitingForPC
:
475 os
<< "FetchWaitingForPC";
477 case Fetch1::FetchRunning
:
478 os
<< "FetchRunning";
481 os
<< "FetchState-" << static_cast<int>(state
);
488 Fetch1::changeStream(const BranchData
&branch
)
490 Fetch1ThreadInfo
&thread
= fetchInfo
[branch
.threadId
];
492 updateExpectedSeqNums(branch
);
494 /* Start fetching again if we were stopped */
495 switch (branch
.reason
) {
496 case BranchData::SuspendThread
:
498 if (thread
.wakeupGuard
) {
499 DPRINTF(Fetch
, "Not suspending fetch due to guard: %s\n",
502 DPRINTF(Fetch
, "Suspending fetch: %s\n", branch
);
503 thread
.state
= FetchWaitingForPC
;
507 case BranchData::HaltFetch
:
508 DPRINTF(Fetch
, "Halting fetch\n");
509 thread
.state
= FetchHalted
;
512 DPRINTF(Fetch
, "Changing stream on branch: %s\n", branch
);
513 thread
.state
= FetchRunning
;
516 thread
.pc
= branch
.target
;
520 Fetch1::updateExpectedSeqNums(const BranchData
&branch
)
522 Fetch1ThreadInfo
&thread
= fetchInfo
[branch
.threadId
];
524 DPRINTF(Fetch
, "Updating streamSeqNum from: %d to %d,"
525 " predictionSeqNum from: %d to %d\n",
526 thread
.streamSeqNum
, branch
.newStreamSeqNum
,
527 thread
.predictionSeqNum
, branch
.newPredictionSeqNum
);
529 /* Change the stream */
530 thread
.streamSeqNum
= branch
.newStreamSeqNum
;
531 /* Update the prediction. Note that it's possible for this to
532 * actually set the prediction to an *older* value if new
533 * predictions have been discarded by execute */
534 thread
.predictionSeqNum
= branch
.newPredictionSeqNum
;
538 Fetch1::processResponse(Fetch1::FetchRequestPtr response
,
539 ForwardLineData
&line
)
541 Fetch1ThreadInfo
&thread
= fetchInfo
[response
->id
.threadId
];
542 PacketPtr packet
= response
->packet
;
544 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
546 line
.setFault(response
->fault
);
547 /* Make sequence numbers valid in return */
548 line
.id
= response
->id
;
549 /* Set PC to virtual address */
550 line
.pc
= response
->pc
;
551 /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
553 line
.lineBaseAddr
= response
->request
.getVaddr();
555 if (response
->fault
!= NoFault
) {
556 /* Stop fetching if there was a fault */
557 /* Should probably try to flush the queues as well, but we
558 * can't be sure that this fault will actually reach Execute, and we
559 * can't (currently) selectively remove this stream from the queues */
560 DPRINTF(Fetch
, "Stopping line fetch because of fault: %s\n",
561 response
->fault
->name());
562 thread
.state
= Fetch1::FetchWaitingForPC
;
564 line
.adoptPacketData(packet
);
565 /* Null the response's packet to prevent the response from trying to
566 * deallocate the packet */
567 response
->packet
= NULL
;
574 const BranchData
&execute_branch
= *inp
.outputWire
;
575 const BranchData
&fetch2_branch
= *prediction
.outputWire
;
576 ForwardLineData
&line_out
= *out
.inputWire
;
578 assert(line_out
.isBubble());
580 for (ThreadID tid
= 0; tid
< cpu
.numThreads
; tid
++)
581 fetchInfo
[tid
].blocked
= !nextStageReserve
[tid
].canReserve();
583 /** Are both branches from later stages valid and for the same thread? */
584 if (execute_branch
.threadId
!= InvalidThreadID
&&
585 execute_branch
.threadId
== fetch2_branch
.threadId
) {
587 Fetch1ThreadInfo
&thread
= fetchInfo
[execute_branch
.threadId
];
589 /* Are we changing stream? Look to the Execute branches first, then
590 * to predicted changes of stream from Fetch2 */
591 if (execute_branch
.isStreamChange()) {
592 if (thread
.state
== FetchHalted
) {
593 DPRINTF(Fetch
, "Halted, ignoring branch: %s\n", execute_branch
);
595 changeStream(execute_branch
);
598 if (!fetch2_branch
.isBubble()) {
599 DPRINTF(Fetch
, "Ignoring simultaneous prediction: %s\n",
603 /* The streamSeqNum tagging in request/response ->req should handle
604 * discarding those requests when we get to them. */
605 } else if (thread
.state
!= FetchHalted
&& fetch2_branch
.isStreamChange()) {
606 /* Handle branch predictions by changing the instruction source
607 * if we're still processing the same stream (as set by streamSeqNum)
608 * as the one of the prediction.
610 if (fetch2_branch
.newStreamSeqNum
!= thread
.streamSeqNum
) {
611 DPRINTF(Fetch
, "Not changing stream on prediction: %s,"
612 " streamSeqNum mismatch\n",
615 changeStream(fetch2_branch
);
619 /* Fetch2 and Execute branches are for different threads */
620 if (execute_branch
.threadId
!= InvalidThreadID
&&
621 execute_branch
.isStreamChange()) {
623 if (fetchInfo
[execute_branch
.threadId
].state
== FetchHalted
) {
624 DPRINTF(Fetch
, "Halted, ignoring branch: %s\n", execute_branch
);
626 changeStream(execute_branch
);
630 if (fetch2_branch
.threadId
!= InvalidThreadID
&&
631 fetch2_branch
.isStreamChange()) {
633 if (fetchInfo
[fetch2_branch
.threadId
].state
== FetchHalted
) {
634 DPRINTF(Fetch
, "Halted, ignoring branch: %s\n", fetch2_branch
);
635 } else if (fetch2_branch
.newStreamSeqNum
!= fetchInfo
[fetch2_branch
.threadId
].streamSeqNum
) {
636 DPRINTF(Fetch
, "Not changing stream on prediction: %s,"
637 " streamSeqNum mismatch\n", fetch2_branch
);
639 changeStream(fetch2_branch
);
644 if (numInFlightFetches() < fetchLimit
) {
645 ThreadID fetch_tid
= getScheduledThread();
647 if (fetch_tid
!= InvalidThreadID
) {
648 DPRINTF(Fetch
, "Fetching from thread %d\n", fetch_tid
);
650 /* Generate fetch to selected thread */
651 fetchLine(fetch_tid
);
652 /* Take up a slot in the fetch queue */
653 nextStageReserve
[fetch_tid
].reserve();
655 DPRINTF(Fetch
, "No active threads available to fetch from\n");
660 /* Halting shouldn't prevent fetches in flight from being processed */
661 /* Step fetches through the icachePort queues and memory system */
664 /* As we've thrown away early lines, if there is a line, it must
665 * be from the right stream */
666 if (!transfers
.empty() &&
667 transfers
.front()->isComplete())
669 Fetch1::FetchRequestPtr response
= transfers
.front();
671 if (response
->isDiscardable()) {
672 nextStageReserve
[response
->id
.threadId
].freeReservation();
674 DPRINTF(Fetch
, "Discarding translated fetch as it's for"
677 /* Wake up next cycle just in case there was some other
679 cpu
.wakeupOnEvent(Pipeline::Fetch1StageId
);
681 DPRINTF(Fetch
, "Processing fetched line: %s\n",
684 processResponse(response
, line_out
);
687 popAndDiscard(transfers
);
690 /* If we generated output, and mark the stage as being active
691 * to encourage that output on to the next stage */
692 if (!line_out
.isBubble())
693 cpu
.activityRecorder
->activity();
695 /* Fetch1 has no inputBuffer so the only activity we can have is to
696 * generate a line output (tested just above) or to initiate a memory
697 * fetch which will signal activity when it returns/needs stepping
701 /* This looks hackish. And it is, but there doesn't seem to be a better
702 * way to do this. The signal from commit to suspend fetch takes 1
703 * clock cycle to propagate to fetch. However, a legitimate wakeup
704 * may occur between cycles from the memory system. Thus wakeup guard
705 * prevents us from suspending in that case. */
707 for (auto& thread
: fetchInfo
) {
708 thread
.wakeupGuard
= false;
713 Fetch1::wakeupFetch(ThreadID tid
)
715 ThreadContext
*thread_ctx
= cpu
.getContext(tid
);
716 Fetch1ThreadInfo
&thread
= fetchInfo
[tid
];
717 thread
.pc
= thread_ctx
->pcState();
718 thread
.state
= FetchRunning
;
719 thread
.wakeupGuard
= true;
720 DPRINTF(Fetch
, "[tid:%d]: Changing stream wakeup %s\n",
721 tid
, thread_ctx
->pcState());
723 cpu
.wakeupOnEvent(Pipeline::Fetch1StageId
);
729 bool drained
= numInFlightFetches() == 0 && (*out
.inputWire
).isBubble();
730 for (ThreadID tid
= 0; tid
< cpu
.numThreads
; tid
++) {
731 Fetch1ThreadInfo
&thread
= fetchInfo
[tid
];
732 DPRINTF(Drain
, "isDrained[tid:%d]: %s %s%s\n",
734 thread
.state
== FetchHalted
,
735 (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
736 ((*out
.inputWire
).isBubble() ? "" : "outputtingLine"));
738 drained
= drained
&& (thread
.state
!= FetchRunning
);
745 Fetch1::FetchRequest::reportData(std::ostream
&os
) const
750 bool Fetch1::FetchRequest::isDiscardable() const
752 Fetch1ThreadInfo
&thread
= fetch
.fetchInfo
[id
.threadId
];
754 /* Can't discard lines in TLB/memory */
755 return state
!= InTranslation
&& state
!= RequestIssuing
&&
756 (id
.streamSeqNum
!= thread
.streamSeqNum
||
757 id
.predictionSeqNum
!= thread
.predictionSeqNum
);
761 Fetch1::minorTrace() const
763 // TODO: Un-bork minorTrace for THREADS
765 const Fetch1ThreadInfo
&thread
= fetchInfo
[0];
767 std::ostringstream data
;
772 (*out
.inputWire
).reportData(data
);
774 MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s"
775 " streamSeqNum=%d lines=%s\n", thread
.state
, icacheState
,
776 numFetchesInITLB
, numFetchesInMemorySystem
,
777 thread
.streamSeqNum
, data
.str());
778 requests
.minorTrace();
779 transfers
.minorTrace();