cpu: Delete authors lists from the cpu directory.
[gem5.git] / src / cpu / minor / fetch2.cc
1 /*
2 * Copyright (c) 2013-2014,2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "cpu/minor/fetch2.hh"
39
40 #include <string>
41
42 #include "arch/decoder.hh"
43 #include "arch/utility.hh"
44 #include "cpu/minor/pipeline.hh"
45 #include "cpu/pred/bpred_unit.hh"
46 #include "debug/Branch.hh"
47 #include "debug/Fetch.hh"
48 #include "debug/MinorTrace.hh"
49
50 namespace Minor
51 {
52
53 Fetch2::Fetch2(const std::string &name,
54 MinorCPU &cpu_,
55 MinorCPUParams &params,
56 Latch<ForwardLineData>::Output inp_,
57 Latch<BranchData>::Output branchInp_,
58 Latch<BranchData>::Input predictionOut_,
59 Latch<ForwardInstData>::Input out_,
60 std::vector<InputBuffer<ForwardInstData>> &next_stage_input_buffer) :
61 Named(name),
62 cpu(cpu_),
63 inp(inp_),
64 branchInp(branchInp_),
65 predictionOut(predictionOut_),
66 out(out_),
67 nextStageReserve(next_stage_input_buffer),
68 outputWidth(params.decodeInputWidth),
69 processMoreThanOneInput(params.fetch2CycleInput),
70 branchPredictor(*params.branchPred),
71 fetchInfo(params.numThreads),
72 threadPriority(0)
73 {
74 if (outputWidth < 1)
75 fatal("%s: decodeInputWidth must be >= 1 (%d)\n", name, outputWidth);
76
77 if (params.fetch2InputBufferSize < 1) {
78 fatal("%s: fetch2InputBufferSize must be >= 1 (%d)\n", name,
79 params.fetch2InputBufferSize);
80 }
81
82 /* Per-thread input buffers */
83 for (ThreadID tid = 0; tid < params.numThreads; tid++) {
84 inputBuffer.push_back(
85 InputBuffer<ForwardLineData>(
86 name + ".inputBuffer" + std::to_string(tid), "lines",
87 params.fetch2InputBufferSize));
88 }
89 }
90
91 const ForwardLineData *
92 Fetch2::getInput(ThreadID tid)
93 {
94 /* Get a line from the inputBuffer to work with */
95 if (!inputBuffer[tid].empty()) {
96 return &(inputBuffer[tid].front());
97 } else {
98 return NULL;
99 }
100 }
101
102 void
103 Fetch2::popInput(ThreadID tid)
104 {
105 if (!inputBuffer[tid].empty()) {
106 inputBuffer[tid].front().freeLine();
107 inputBuffer[tid].pop();
108 }
109
110 fetchInfo[tid].inputIndex = 0;
111 }
112
113 void
114 Fetch2::dumpAllInput(ThreadID tid)
115 {
116 DPRINTF(Fetch, "Dumping whole input buffer\n");
117 while (!inputBuffer[tid].empty())
118 popInput(tid);
119
120 fetchInfo[tid].inputIndex = 0;
121 }
122
123 void
124 Fetch2::updateBranchPrediction(const BranchData &branch)
125 {
126 MinorDynInstPtr inst = branch.inst;
127
128 /* Don't even consider instructions we didn't try to predict or faults */
129 if (inst->isFault() || !inst->triedToPredict)
130 return;
131
132 switch (branch.reason) {
133 case BranchData::NoBranch:
134 /* No data to update */
135 break;
136 case BranchData::Interrupt:
137 /* Never try to predict interrupts */
138 break;
139 case BranchData::SuspendThread:
140 /* Don't need to act on suspends */
141 break;
142 case BranchData::HaltFetch:
143 /* Don't need to act on fetch wakeup */
144 break;
145 case BranchData::BranchPrediction:
146 /* Shouldn't happen. Fetch2 is the only source of
147 * BranchPredictions */
148 break;
149 case BranchData::UnpredictedBranch:
150 /* Unpredicted branch or barrier */
151 DPRINTF(Branch, "Unpredicted branch seen inst: %s\n", *inst);
152 branchPredictor.squash(inst->id.fetchSeqNum,
153 branch.target, true, inst->id.threadId);
154 // Update after squashing to accomodate O3CPU
155 // using the branch prediction code.
156 branchPredictor.update(inst->id.fetchSeqNum,
157 inst->id.threadId);
158 break;
159 case BranchData::CorrectlyPredictedBranch:
160 /* Predicted taken, was taken */
161 DPRINTF(Branch, "Branch predicted correctly inst: %s\n", *inst);
162 branchPredictor.update(inst->id.fetchSeqNum,
163 inst->id.threadId);
164 break;
165 case BranchData::BadlyPredictedBranch:
166 /* Predicted taken, not taken */
167 DPRINTF(Branch, "Branch mis-predicted inst: %s\n", *inst);
168 branchPredictor.squash(inst->id.fetchSeqNum,
169 branch.target /* Not used */, false, inst->id.threadId);
170 // Update after squashing to accomodate O3CPU
171 // using the branch prediction code.
172 branchPredictor.update(inst->id.fetchSeqNum,
173 inst->id.threadId);
174 break;
175 case BranchData::BadlyPredictedBranchTarget:
176 /* Predicted taken, was taken but to a different target */
177 DPRINTF(Branch, "Branch mis-predicted target inst: %s target: %s\n",
178 *inst, branch.target);
179 branchPredictor.squash(inst->id.fetchSeqNum,
180 branch.target, true, inst->id.threadId);
181 break;
182 }
183 }
184
185 void
186 Fetch2::predictBranch(MinorDynInstPtr inst, BranchData &branch)
187 {
188 Fetch2ThreadInfo &thread = fetchInfo[inst->id.threadId];
189 TheISA::PCState inst_pc = inst->pc;
190
191 assert(!inst->predictedTaken);
192
193 /* Skip non-control/sys call instructions */
194 if (inst->staticInst->isControl() ||
195 inst->staticInst->isSyscall())
196 {
197 /* Tried to predict */
198 inst->triedToPredict = true;
199
200 DPRINTF(Branch, "Trying to predict for inst: %s\n", *inst);
201
202 if (branchPredictor.predict(inst->staticInst,
203 inst->id.fetchSeqNum, inst_pc,
204 inst->id.threadId))
205 {
206 inst->predictedTaken = true;
207 inst->predictedTarget = inst_pc;
208 branch.target = inst_pc;
209 }
210 } else {
211 DPRINTF(Branch, "Not attempting prediction for inst: %s\n", *inst);
212 }
213
214 /* If we predict taken, set branch and update sequence numbers */
215 if (inst->predictedTaken) {
216 /* Update the predictionSeqNum and remember the streamSeqNum that it
217 * was associated with */
218 thread.expectedStreamSeqNum = inst->id.streamSeqNum;
219
220 BranchData new_branch = BranchData(BranchData::BranchPrediction,
221 inst->id.threadId,
222 inst->id.streamSeqNum, thread.predictionSeqNum + 1,
223 inst->predictedTarget, inst);
224
225 /* Mark with a new prediction number by the stream number of the
226 * instruction causing the prediction */
227 thread.predictionSeqNum++;
228 branch = new_branch;
229
230 DPRINTF(Branch, "Branch predicted taken inst: %s target: %s"
231 " new predictionSeqNum: %d\n",
232 *inst, inst->predictedTarget, thread.predictionSeqNum);
233 }
234 }
235
236 void
237 Fetch2::evaluate()
238 {
239 /* Push input onto appropriate input buffer */
240 if (!inp.outputWire->isBubble())
241 inputBuffer[inp.outputWire->id.threadId].setTail(*inp.outputWire);
242
243 ForwardInstData &insts_out = *out.inputWire;
244 BranchData prediction;
245 BranchData &branch_inp = *branchInp.outputWire;
246
247 assert(insts_out.isBubble());
248
249 /* React to branches from Execute to update local branch prediction
250 * structures */
251 updateBranchPrediction(branch_inp);
252
253 /* If a branch arrives, don't try and do anything about it. Only
254 * react to your own predictions */
255 if (branch_inp.isStreamChange()) {
256 DPRINTF(Fetch, "Dumping all input as a stream changing branch"
257 " has arrived\n");
258 dumpAllInput(branch_inp.threadId);
259 fetchInfo[branch_inp.threadId].havePC = false;
260 }
261
262 assert(insts_out.isBubble());
263 /* Even when blocked, clear out input lines with the wrong
264 * prediction sequence number */
265 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
266 Fetch2ThreadInfo &thread = fetchInfo[tid];
267
268 thread.blocked = !nextStageReserve[tid].canReserve();
269
270 const ForwardLineData *line_in = getInput(tid);
271
272 while (line_in &&
273 thread.expectedStreamSeqNum == line_in->id.streamSeqNum &&
274 thread.predictionSeqNum != line_in->id.predictionSeqNum)
275 {
276 DPRINTF(Fetch, "Discarding line %s"
277 " due to predictionSeqNum mismatch (expected: %d)\n",
278 line_in->id, thread.predictionSeqNum);
279
280 popInput(tid);
281 fetchInfo[tid].havePC = false;
282
283 if (processMoreThanOneInput) {
284 DPRINTF(Fetch, "Wrapping\n");
285 line_in = getInput(tid);
286 } else {
287 line_in = NULL;
288 }
289 }
290 }
291
292 ThreadID tid = getScheduledThread();
293 DPRINTF(Fetch, "Scheduled Thread: %d\n", tid);
294
295 assert(insts_out.isBubble());
296 if (tid != InvalidThreadID) {
297 Fetch2ThreadInfo &fetch_info = fetchInfo[tid];
298
299 const ForwardLineData *line_in = getInput(tid);
300
301 unsigned int output_index = 0;
302
303 /* Pack instructions into the output while we can. This may involve
304 * using more than one input line. Note that lineWidth will be 0
305 * for faulting lines */
306 while (line_in &&
307 (line_in->isFault() ||
308 fetch_info.inputIndex < line_in->lineWidth) && /* More input */
309 output_index < outputWidth && /* More output to fill */
310 prediction.isBubble() /* No predicted branch */)
311 {
312 ThreadContext *thread = cpu.getContext(line_in->id.threadId);
313 TheISA::Decoder *decoder = thread->getDecoderPtr();
314
315 /* Discard line due to prediction sequence number being wrong but
316 * without the streamSeqNum number having changed */
317 bool discard_line =
318 fetch_info.expectedStreamSeqNum == line_in->id.streamSeqNum &&
319 fetch_info.predictionSeqNum != line_in->id.predictionSeqNum;
320
321 /* Set the PC if the stream changes. Setting havePC to false in
322 * a previous cycle handles all other change of flow of control
323 * issues */
324 bool set_pc = fetch_info.lastStreamSeqNum != line_in->id.streamSeqNum;
325
326 if (!discard_line && (!fetch_info.havePC || set_pc)) {
327 /* Set the inputIndex to be the MachInst-aligned offset
328 * from lineBaseAddr of the new PC value */
329 fetch_info.inputIndex =
330 (line_in->pc.instAddr() & BaseCPU::PCMask) -
331 line_in->lineBaseAddr;
332 DPRINTF(Fetch, "Setting new PC value: %s inputIndex: 0x%x"
333 " lineBaseAddr: 0x%x lineWidth: 0x%x\n",
334 line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
335 line_in->lineWidth);
336 fetch_info.pc = line_in->pc;
337 fetch_info.havePC = true;
338 decoder->reset();
339 }
340
341 /* The generated instruction. Leave as NULL if no instruction
342 * is to be packed into the output */
343 MinorDynInstPtr dyn_inst = NULL;
344
345 if (discard_line) {
346 /* Rest of line was from an older prediction in the same
347 * stream */
348 DPRINTF(Fetch, "Discarding line %s (from inputIndex: %d)"
349 " due to predictionSeqNum mismatch (expected: %d)\n",
350 line_in->id, fetch_info.inputIndex,
351 fetch_info.predictionSeqNum);
352 } else if (line_in->isFault()) {
353 /* Pack a fault as a MinorDynInst with ->fault set */
354
355 /* Make a new instruction and pick up the line, stream,
356 * prediction, thread ids from the incoming line */
357 dyn_inst = new MinorDynInst(line_in->id);
358
359 /* Fetch and prediction sequence numbers originate here */
360 dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
361 dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
362 /* To complete the set, test that exec sequence number has
363 * not been set */
364 assert(dyn_inst->id.execSeqNum == 0);
365
366 dyn_inst->pc = fetch_info.pc;
367
368 /* Pack a faulting instruction but allow other
369 * instructions to be generated. (Fetch2 makes no
370 * immediate judgement about streamSeqNum) */
371 dyn_inst->fault = line_in->fault;
372 DPRINTF(Fetch, "Fault being passed output_index: "
373 "%d: %s\n", output_index, dyn_inst->fault->name());
374 } else {
375 uint8_t *line = line_in->line;
376
377 /* The instruction is wholly in the line, can just
378 * assign */
379 auto inst_word = *reinterpret_cast<TheISA::MachInst *>
380 (line + fetch_info.inputIndex);
381
382 if (!decoder->instReady()) {
383 decoder->moreBytes(fetch_info.pc,
384 line_in->lineBaseAddr + fetch_info.inputIndex,
385 inst_word);
386 DPRINTF(Fetch, "Offering MachInst to decoder addr: 0x%x\n",
387 line_in->lineBaseAddr + fetch_info.inputIndex);
388 }
389
390 /* Maybe make the above a loop to accomodate ISAs with
391 * instructions longer than sizeof(MachInst) */
392
393 if (decoder->instReady()) {
394 /* Make a new instruction and pick up the line, stream,
395 * prediction, thread ids from the incoming line */
396 dyn_inst = new MinorDynInst(line_in->id);
397
398 /* Fetch and prediction sequence numbers originate here */
399 dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
400 dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
401 /* To complete the set, test that exec sequence number
402 * has not been set */
403 assert(dyn_inst->id.execSeqNum == 0);
404
405 /* Note that the decoder can update the given PC.
406 * Remember not to assign it until *after* calling
407 * decode */
408 StaticInstPtr decoded_inst = decoder->decode(fetch_info.pc);
409 dyn_inst->staticInst = decoded_inst;
410
411 dyn_inst->pc = fetch_info.pc;
412 DPRINTF(Fetch, "decoder inst %s\n", *dyn_inst);
413
414 // Collect some basic inst class stats
415 if (decoded_inst->isLoad())
416 loadInstructions++;
417 else if (decoded_inst->isStore())
418 storeInstructions++;
419 else if (decoded_inst->isAtomic())
420 amoInstructions++;
421 else if (decoded_inst->isVector())
422 vecInstructions++;
423 else if (decoded_inst->isFloating())
424 fpInstructions++;
425 else if (decoded_inst->isInteger())
426 intInstructions++;
427
428 DPRINTF(Fetch, "Instruction extracted from line %s"
429 " lineWidth: %d output_index: %d inputIndex: %d"
430 " pc: %s inst: %s\n",
431 line_in->id,
432 line_in->lineWidth, output_index, fetch_info.inputIndex,
433 fetch_info.pc, *dyn_inst);
434
435 #if THE_ISA == X86_ISA || THE_ISA == ARM_ISA
436 /* In SE mode, it's possible to branch to a microop when
437 * replaying faults such as page faults (or simply
438 * intra-microcode branches in X86). Unfortunately,
439 * as Minor has micro-op decomposition in a separate
440 * pipeline stage from instruction decomposition, the
441 * following advancePC (which may follow a branch with
442 * microPC() != 0) *must* see a fresh macroop. This
443 * kludge should be improved with an addition to PCState
444 * but I offer it in this form for the moment
445 *
446 * X86 can branch within microops so we need to deal with
447 * the case that, after a branch, the first un-advanced PC
448 * may be pointing to a microop other than 0. Once
449 * advanced, however, the microop number *must* be 0 */
450 fetch_info.pc.upc(0);
451 fetch_info.pc.nupc(1);
452 #endif
453
454 /* Advance PC for the next instruction */
455 TheISA::advancePC(fetch_info.pc, decoded_inst);
456
457 /* Predict any branches and issue a branch if
458 * necessary */
459 predictBranch(dyn_inst, prediction);
460 } else {
461 DPRINTF(Fetch, "Inst not ready yet\n");
462 }
463
464 /* Step on the pointer into the line if there's no
465 * complete instruction waiting */
466 if (decoder->needMoreBytes()) {
467 fetch_info.inputIndex += sizeof(TheISA::MachInst);
468
469 DPRINTF(Fetch, "Updated inputIndex value PC: %s"
470 " inputIndex: 0x%x lineBaseAddr: 0x%x lineWidth: 0x%x\n",
471 line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
472 line_in->lineWidth);
473 }
474 }
475
476 if (dyn_inst) {
477 /* Step to next sequence number */
478 fetch_info.fetchSeqNum++;
479
480 /* Correctly size the output before writing */
481 if (output_index == 0) {
482 insts_out.resize(outputWidth);
483 }
484 /* Pack the generated dynamic instruction into the output */
485 insts_out.insts[output_index] = dyn_inst;
486 output_index++;
487
488 /* Output MinorTrace instruction info for
489 * pre-microop decomposition macroops */
490 if (DTRACE(MinorTrace) && !dyn_inst->isFault() &&
491 dyn_inst->staticInst->isMacroop())
492 {
493 dyn_inst->minorTraceInst(*this);
494 }
495 }
496
497 /* Remember the streamSeqNum of this line so we can tell when
498 * we change stream */
499 fetch_info.lastStreamSeqNum = line_in->id.streamSeqNum;
500
501 /* Asked to discard line or there was a branch or fault */
502 if (!prediction.isBubble() || /* The remains of a
503 line with a prediction in it */
504 line_in->isFault() /* A line which is just a fault */)
505 {
506 DPRINTF(Fetch, "Discarding all input on branch/fault\n");
507 dumpAllInput(tid);
508 fetch_info.havePC = false;
509 line_in = NULL;
510 } else if (discard_line) {
511 /* Just discard one line, one's behind it may have new
512 * stream sequence numbers. There's a DPRINTF above
513 * for this event */
514 popInput(tid);
515 fetch_info.havePC = false;
516 line_in = NULL;
517 } else if (fetch_info.inputIndex == line_in->lineWidth) {
518 /* Got to end of a line, pop the line but keep PC
519 * in case this is a line-wrapping inst. */
520 popInput(tid);
521 line_in = NULL;
522 }
523
524 if (!line_in && processMoreThanOneInput) {
525 DPRINTF(Fetch, "Wrapping\n");
526 line_in = getInput(tid);
527 }
528 }
529
530 /* The rest of the output (if any) should already have been packed
531 * with bubble instructions by insts_out's initialisation */
532 }
533 if (tid == InvalidThreadID) {
534 assert(insts_out.isBubble());
535 }
536 /** Reserve a slot in the next stage and output data */
537 *predictionOut.inputWire = prediction;
538
539 /* If we generated output, reserve space for the result in the next stage
540 * and mark the stage as being active this cycle */
541 if (!insts_out.isBubble()) {
542 /* Note activity of following buffer */
543 cpu.activityRecorder->activity();
544 insts_out.threadId = tid;
545 nextStageReserve[tid].reserve();
546 }
547
548 /* If we still have input to process and somewhere to put it,
549 * mark stage as active */
550 for (ThreadID i = 0; i < cpu.numThreads; i++)
551 {
552 if (getInput(i) && nextStageReserve[i].canReserve()) {
553 cpu.activityRecorder->activateStage(Pipeline::Fetch2StageId);
554 break;
555 }
556 }
557
558 /* Make sure the input (if any left) is pushed */
559 if (!inp.outputWire->isBubble())
560 inputBuffer[inp.outputWire->id.threadId].pushTail();
561 }
562
563 inline ThreadID
564 Fetch2::getScheduledThread()
565 {
566 /* Select thread via policy. */
567 std::vector<ThreadID> priority_list;
568
569 switch (cpu.threadPolicy) {
570 case Enums::SingleThreaded:
571 priority_list.push_back(0);
572 break;
573 case Enums::RoundRobin:
574 priority_list = cpu.roundRobinPriority(threadPriority);
575 break;
576 case Enums::Random:
577 priority_list = cpu.randomPriority();
578 break;
579 default:
580 panic("Unknown fetch policy");
581 }
582
583 for (auto tid : priority_list) {
584 if (getInput(tid) && !fetchInfo[tid].blocked) {
585 threadPriority = tid;
586 return tid;
587 }
588 }
589
590 return InvalidThreadID;
591 }
592
593 bool
594 Fetch2::isDrained()
595 {
596 for (const auto &buffer : inputBuffer) {
597 if (!buffer.empty())
598 return false;
599 }
600
601 return (*inp.outputWire).isBubble() &&
602 (*predictionOut.inputWire).isBubble();
603 }
604
605 void
606 Fetch2::regStats()
607 {
608 using namespace Stats;
609
610 intInstructions
611 .name(name() + ".int_instructions")
612 .desc("Number of integer instructions successfully decoded")
613 .flags(total);
614
615 fpInstructions
616 .name(name() + ".fp_instructions")
617 .desc("Number of floating point instructions successfully decoded")
618 .flags(total);
619
620 vecInstructions
621 .name(name() + ".vec_instructions")
622 .desc("Number of SIMD instructions successfully decoded")
623 .flags(total);
624
625 loadInstructions
626 .name(name() + ".load_instructions")
627 .desc("Number of memory load instructions successfully decoded")
628 .flags(total);
629
630 storeInstructions
631 .name(name() + ".store_instructions")
632 .desc("Number of memory store instructions successfully decoded")
633 .flags(total);
634
635 amoInstructions
636 .name(name() + ".amo_instructions")
637 .desc("Number of memory atomic instructions successfully decoded")
638 .flags(total);
639 }
640
641 void
642 Fetch2::minorTrace() const
643 {
644 std::ostringstream data;
645
646 if (fetchInfo[0].blocked)
647 data << 'B';
648 else
649 (*out.inputWire).reportData(data);
650
651 MINORTRACE("inputIndex=%d havePC=%d predictionSeqNum=%d insts=%s\n",
652 fetchInfo[0].inputIndex, fetchInfo[0].havePC, fetchInfo[0].predictionSeqNum, data.str());
653 inputBuffer[0].minorTrace();
654 }
655
656 }