2 * Copyright (c) 2013-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Authors: Andrew Bardsley
40 #include "arch/locked_mem.hh"
41 #include "arch/registers.hh"
42 #include "arch/utility.hh"
43 #include "cpu/minor/cpu.hh"
44 #include "cpu/minor/exec_context.hh"
45 #include "cpu/minor/execute.hh"
46 #include "cpu/minor/fetch1.hh"
47 #include "cpu/minor/lsq.hh"
48 #include "cpu/op_class.hh"
49 #include "debug/Activity.hh"
50 #include "debug/Branch.hh"
51 #include "debug/Drain.hh"
52 #include "debug/MinorExecute.hh"
53 #include "debug/MinorInterrupt.hh"
54 #include "debug/MinorMem.hh"
55 #include "debug/MinorTrace.hh"
56 #include "debug/PCEvent.hh"
61 Execute::Execute(const std::string
&name_
,
63 MinorCPUParams
¶ms
,
64 Latch
<ForwardInstData
>::Output inp_
,
65 Latch
<BranchData
>::Input out_
) :
70 issueLimit(params
.executeIssueLimit
),
71 memoryIssueLimit(params
.executeMemoryIssueLimit
),
72 commitLimit(params
.executeCommitLimit
),
73 memoryCommitLimit(params
.executeMemoryCommitLimit
),
74 processMoreThanOneInput(params
.executeCycleInput
),
75 fuDescriptions(*params
.executeFuncUnits
),
76 numFuncUnits(fuDescriptions
.funcUnits
.size()),
77 setTraceTimeOnCommit(params
.executeSetTraceTimeOnCommit
),
78 setTraceTimeOnIssue(params
.executeSetTraceTimeOnIssue
),
79 allowEarlyMemIssue(params
.executeAllowEarlyMemoryIssue
),
80 noCostFUIndex(fuDescriptions
.funcUnits
.size() + 1),
81 lsq(name_
+ ".lsq", name_
+ ".dcache_port",
83 params
.executeMaxAccessesInMemory
,
84 params
.executeMemoryWidth
,
85 params
.executeLSQRequestsQueueSize
,
86 params
.executeLSQTransfersQueueSize
,
87 params
.executeLSQStoreBufferSize
,
88 params
.executeLSQMaxStoreBufferStoresPerCycle
),
89 scoreboard(name_
+ ".scoreboard"),
90 inputBuffer(name_
+ ".inputBuffer", "insts",
91 params
.executeInputBufferSize
),
93 lastCommitWasEndOfMacroop(true),
94 instsBeingCommitted(params
.executeCommitLimit
),
95 streamSeqNum(InstId::firstStreamSeqNum
),
96 lastPredictionSeqNum(InstId::firstPredictionSeqNum
),
97 drainState(NotDraining
)
99 if (commitLimit
< 1) {
100 fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_
,
104 if (issueLimit
< 1) {
105 fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_
,
109 if (memoryIssueLimit
< 1) {
110 fatal("%s: executeMemoryIssueLimit must be >= 1 (%d)\n", name_
,
114 if (memoryCommitLimit
> commitLimit
) {
115 fatal("%s: executeMemoryCommitLimit (%d) must be <="
116 " executeCommitLimit (%d)\n",
117 name_
, memoryCommitLimit
, commitLimit
);
120 if (params
.executeInputBufferSize
< 1) {
121 fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_
,
122 params
.executeInputBufferSize
);
125 if (params
.executeInputBufferSize
< 1) {
126 fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_
,
127 params
.executeInputBufferSize
);
130 /* This should be large enough to count all the in-FU instructions
131 * which need to be accounted for in the inFlightInsts
133 unsigned int total_slots
= 0;
135 /* Make FUPipelines for each MinorFU */
136 for (unsigned int i
= 0; i
< numFuncUnits
; i
++) {
137 std::ostringstream fu_name
;
138 MinorFU
*fu_description
= fuDescriptions
.funcUnits
[i
];
140 /* Note the total number of instruction slots (for sizing
141 * the inFlightInst queue) and the maximum latency of any FU
142 * (for sizing the activity recorder) */
143 total_slots
+= fu_description
->opLat
;
145 fu_name
<< name_
<< ".fu." << i
;
147 FUPipeline
*fu
= new FUPipeline(fu_name
.str(), *fu_description
, cpu
);
149 funcUnits
.push_back(fu
);
152 /** Check that there is a functional unit for all operation classes */
153 for (int op_class
= No_OpClass
+ 1; op_class
< Num_OpClasses
; op_class
++) {
154 bool found_fu
= false;
155 unsigned int fu_index
= 0;
157 while (fu_index
< numFuncUnits
&& !found_fu
)
159 if (funcUnits
[fu_index
]->provides(
160 static_cast<OpClass
>(op_class
)))
168 warn("No functional unit for OpClass %s\n",
169 Enums::OpClassStrings
[op_class
]);
173 inFlightInsts
= new Queue
<QueuedInst
,
174 ReportTraitsAdaptor
<QueuedInst
> >(
175 name_
+ ".inFlightInsts", "insts", total_slots
);
177 inFUMemInsts
= new Queue
<QueuedInst
,
178 ReportTraitsAdaptor
<QueuedInst
> >(
179 name_
+ ".inFUMemInsts", "insts", total_slots
);
182 const ForwardInstData
*
185 /* Get a line from the inputBuffer to work with */
186 if (!inputBuffer
.empty()) {
187 const ForwardInstData
&head
= inputBuffer
.front();
189 return (head
.isBubble() ? NULL
: &(inputBuffer
.front()));
198 if (!inputBuffer
.empty())
205 Execute::tryToBranch(MinorDynInstPtr inst
, Fault fault
, BranchData
&branch
)
207 ThreadContext
*thread
= cpu
.getContext(inst
->id
.threadId
);
208 const TheISA::PCState
&pc_before
= inst
->pc
;
209 TheISA::PCState target
= thread
->pcState();
211 /* Force a branch for SerializeAfter instructions at the end of micro-op
212 * sequence when we're not suspended */
213 bool force_branch
= thread
->status() != ThreadContext::Suspended
&&
215 inst
->isLastOpInInst() &&
216 (inst
->staticInst
->isSerializeAfter() ||
217 inst
->staticInst
->isIprAccess());
219 DPRINTF(Branch
, "tryToBranch before: %s after: %s%s\n",
220 pc_before
, target
, (force_branch
? " (forcing)" : ""));
222 /* Will we change the PC to something other than the next instruction? */
223 bool must_branch
= pc_before
!= target
||
227 /* The reason for the branch data we're about to generate, set below */
228 BranchData::Reason reason
= BranchData::NoBranch
;
230 if (fault
== NoFault
)
232 TheISA::advancePC(target
, inst
->staticInst
);
233 thread
->pcState(target
);
235 DPRINTF(Branch
, "Advancing current PC from: %s to: %s\n",
239 if (inst
->predictedTaken
&& !force_branch
) {
240 /* Predicted to branch */
242 /* No branch was taken, change stream to get us back to the
243 * intended PC value */
244 DPRINTF(Branch
, "Predicted a branch from 0x%x to 0x%x but"
245 " none happened inst: %s\n",
246 inst
->pc
.instAddr(), inst
->predictedTarget
.instAddr(), *inst
);
248 reason
= BranchData::BadlyPredictedBranch
;
249 } else if (inst
->predictedTarget
== target
) {
250 /* Branch prediction got the right target, kill the branch and
252 * Note that this information to the branch predictor might get
253 * overwritten by a "real" branch during this cycle */
254 DPRINTF(Branch
, "Predicted a branch from 0x%x to 0x%x correctly"
256 inst
->pc
.instAddr(), inst
->predictedTarget
.instAddr(), *inst
);
258 reason
= BranchData::CorrectlyPredictedBranch
;
260 /* Branch prediction got the wrong target */
261 DPRINTF(Branch
, "Predicted a branch from 0x%x to 0x%x"
262 " but got the wrong target (actual: 0x%x) inst: %s\n",
263 inst
->pc
.instAddr(), inst
->predictedTarget
.instAddr(),
264 target
.instAddr(), *inst
);
266 reason
= BranchData::BadlyPredictedBranchTarget
;
268 } else if (must_branch
) {
269 /* Unpredicted branch */
270 DPRINTF(Branch
, "Unpredicted branch from 0x%x to 0x%x inst: %s\n",
271 inst
->pc
.instAddr(), target
.instAddr(), *inst
);
273 reason
= BranchData::UnpredictedBranch
;
275 /* No branch at all */
276 reason
= BranchData::NoBranch
;
279 updateBranchData(reason
, inst
, target
, branch
);
283 Execute::updateBranchData(
284 BranchData::Reason reason
,
285 MinorDynInstPtr inst
, const TheISA::PCState
&target
,
288 if (reason
!= BranchData::NoBranch
) {
289 /* Bump up the stream sequence number on a real branch*/
290 if (BranchData::isStreamChange(reason
))
293 /* Branches (even mis-predictions) don't change the predictionSeqNum,
294 * just the streamSeqNum */
295 branch
= BranchData(reason
, streamSeqNum
,
296 /* Maintaining predictionSeqNum if there's no inst is just a
297 * courtesy and looks better on minorview */
298 (inst
->isBubble() ? lastPredictionSeqNum
299 : inst
->id
.predictionSeqNum
),
302 DPRINTF(Branch
, "Branch data signalled: %s\n", branch
);
307 Execute::handleMemResponse(MinorDynInstPtr inst
,
308 LSQ::LSQRequestPtr response
, BranchData
&branch
, Fault
&fault
)
310 ThreadID thread_id
= inst
->id
.threadId
;
311 ThreadContext
*thread
= cpu
.getContext(thread_id
);
313 ExecContext
context(cpu
, *cpu
.threads
[thread_id
], *this, inst
);
315 PacketPtr packet
= response
->packet
;
317 bool is_load
= inst
->staticInst
->isLoad();
318 bool is_store
= inst
->staticInst
->isStore();
319 bool is_prefetch
= inst
->staticInst
->isDataPrefetch();
321 /* If true, the trace's predicate value will be taken from the exec
322 * context predicate, otherwise, it will be set to false */
323 bool use_context_predicate
= true;
325 if (response
->fault
!= NoFault
) {
326 /* Invoke memory faults. */
327 DPRINTF(MinorMem
, "Completing fault from DTLB access: %s\n",
328 response
->fault
->name());
330 if (inst
->staticInst
->isPrefetch()) {
331 DPRINTF(MinorMem
, "Not taking fault on prefetch: %s\n",
332 response
->fault
->name());
334 /* Don't assign to fault */
336 /* Take the fault raised during the TLB/memory access */
337 fault
= response
->fault
;
339 fault
->invoke(thread
, inst
->staticInst
);
341 } else if (!packet
) {
342 DPRINTF(MinorMem
, "Completing failed request inst: %s\n",
344 use_context_predicate
= false;
345 } else if (packet
->isError()) {
346 DPRINTF(MinorMem
, "Trying to commit error response: %s\n",
349 fatal("Received error response packet for inst: %s\n", *inst
);
350 } else if (is_store
|| is_load
|| is_prefetch
) {
353 DPRINTF(MinorMem
, "Memory response inst: %s addr: 0x%x size: %d\n",
354 *inst
, packet
->getAddr(), packet
->getSize());
356 if (is_load
&& packet
->getSize() > 0) {
357 DPRINTF(MinorMem
, "Memory data[0]: 0x%x\n",
358 static_cast<unsigned int>(packet
->getConstPtr
<uint8_t>()[0]));
361 /* Complete the memory access instruction */
362 fault
= inst
->staticInst
->completeAcc(packet
, &context
,
365 if (fault
!= NoFault
) {
366 /* Invoke fault created by instruction completion */
367 DPRINTF(MinorMem
, "Fault in memory completeAcc: %s\n",
369 fault
->invoke(thread
, inst
->staticInst
);
371 /* Stores need to be pushed into the store buffer to finish
373 if (response
->needsToBeSentToStoreBuffer())
374 lsq
.sendStoreToStoreBuffer(response
);
377 fatal("There should only ever be reads, "
378 "writes or faults at this point\n");
381 lsq
.popResponse(response
);
383 if (inst
->traceData
) {
384 inst
->traceData
->setPredicate((use_context_predicate
?
385 context
.readPredicate() : false));
388 doInstCommitAccounting(inst
);
390 /* Generate output to account for branches */
391 tryToBranch(inst
, fault
, branch
);
395 Execute::isInterrupted(ThreadID thread_id
) const
397 return cpu
.checkInterrupts(cpu
.getContext(thread_id
));
401 Execute::takeInterrupt(ThreadID thread_id
, BranchData
&branch
)
403 DPRINTF(MinorInterrupt
, "Considering interrupt status from PC: %s\n",
404 cpu
.getContext(thread_id
)->pcState());
406 Fault interrupt
= cpu
.getInterruptController()->getInterrupt
407 (cpu
.getContext(thread_id
));
409 if (interrupt
!= NoFault
) {
410 /* The interrupt *must* set pcState */
411 cpu
.getInterruptController()->updateIntrInfo
412 (cpu
.getContext(thread_id
));
413 interrupt
->invoke(cpu
.getContext(thread_id
));
415 assert(!lsq
.accessesInFlight());
417 DPRINTF(MinorInterrupt
, "Invoking interrupt: %s to PC: %s\n",
418 interrupt
->name(), cpu
.getContext(thread_id
)->pcState());
420 /* Assume that an interrupt *must* cause a branch. Assert this? */
422 updateBranchData(BranchData::Interrupt
, MinorDynInst::bubble(),
423 cpu
.getContext(thread_id
)->pcState(), branch
);
426 return interrupt
!= NoFault
;
430 Execute::executeMemRefInst(MinorDynInstPtr inst
, BranchData
&branch
,
431 bool &passed_predicate
, Fault
&fault
)
435 /* Set to true if the mem op. is issued and sent to the mem system */
436 passed_predicate
= false;
438 if (!lsq
.canRequest()) {
439 /* Not acting on instruction yet as the memory
443 ThreadContext
*thread
= cpu
.getContext(inst
->id
.threadId
);
444 TheISA::PCState old_pc
= thread
->pcState();
446 ExecContext
context(cpu
, *cpu
.threads
[inst
->id
.threadId
],
449 DPRINTF(MinorExecute
, "Initiating memRef inst: %s\n", *inst
);
451 Fault init_fault
= inst
->staticInst
->initiateAcc(&context
,
454 if (init_fault
!= NoFault
) {
455 DPRINTF(MinorExecute
, "Fault on memory inst: %s"
456 " initiateAcc: %s\n", *inst
, init_fault
->name());
459 /* Only set this if the instruction passed its
461 passed_predicate
= context
.readPredicate();
463 /* Set predicate in tracing */
465 inst
->traceData
->setPredicate(passed_predicate
);
467 /* If the instruction didn't pass its predicate (and so will not
468 * progress from here) Try to branch to correct and branch
470 if (!passed_predicate
) {
471 /* Leave it up to commit to handle the fault */
472 lsq
.pushFailedRequest(inst
);
476 /* Restore thread PC */
477 thread
->pcState(old_pc
);
484 /** Increment a cyclic buffer index for indices [0, cycle_size-1] */
486 cyclicIndexInc(unsigned int index
, unsigned int cycle_size
)
488 unsigned int ret
= index
+ 1;
490 if (ret
== cycle_size
)
496 /** Decrement a cyclic buffer index for indices [0, cycle_size-1] */
498 cyclicIndexDec(unsigned int index
, unsigned int cycle_size
)
503 ret
= cycle_size
- 1;
509 Execute::issue(bool only_issue_microops
)
511 const ForwardInstData
*insts_in
= getInput();
513 /* Early termination if we have no instructions */
517 /* Start from the first FU */
518 unsigned int fu_index
= 0;
520 /* Remains true while instructions are still being issued. If any
521 * instruction fails to issue, this is set to false and we exit issue.
522 * This strictly enforces in-order issue. For other issue behaviours,
523 * a more complicated test in the outer while loop below is needed. */
526 /* Number of insts issues this cycle to check for issueLimit */
527 unsigned num_insts_issued
= 0;
529 /* Number of memory ops issues this cycle to check for memoryIssueLimit */
530 unsigned num_mem_insts_issued
= 0;
532 /* Number of instructions discarded this cycle in order to enforce a
533 * discardLimit. @todo, add that parameter? */
534 unsigned num_insts_discarded
= 0;
537 MinorDynInstPtr inst
= insts_in
->insts
[inputIndex
];
538 ThreadID thread_id
= inst
->id
.threadId
;
539 Fault fault
= inst
->fault
;
540 bool discarded
= false;
541 bool issued_mem_ref
= false;
543 if (inst
->isBubble()) {
546 } else if (cpu
.getContext(thread_id
)->status() ==
547 ThreadContext::Suspended
)
549 DPRINTF(MinorExecute
, "Not issuing inst: %s from suspended"
553 } else if (inst
->id
.streamSeqNum
!= streamSeqNum
) {
554 DPRINTF(MinorExecute
, "Discarding inst: %s as its stream"
555 " state was unexpected, expected: %d\n",
556 *inst
, streamSeqNum
);
559 } else if (fault
== NoFault
&& only_issue_microops
&&
560 /* Is this anything other than a non-first microop */
561 (!inst
->staticInst
->isMicroop() ||
562 !inst
->staticInst
->isFirstMicroop()))
564 DPRINTF(MinorExecute
, "Not issuing new non-microop inst: %s\n",
569 /* Try and issue an instruction into an FU, assume we didn't and
570 * fix that in the loop */
573 /* Try FU from 0 each instruction */
576 /* Try and issue a single instruction stepping through the
579 FUPipeline
*fu
= funcUnits
[fu_index
];
581 DPRINTF(MinorExecute
, "Trying to issue inst: %s to FU: %d\n",
584 /* Does the examined fu have the OpClass-related capability
585 * needed to execute this instruction? Faults can always
586 * issue to any FU but probably should just 'live' in the
587 * inFlightInsts queue rather than having an FU. */
588 bool fu_is_capable
= (!inst
->isFault() ?
589 fu
->provides(inst
->staticInst
->opClass()) : true);
591 if (inst
->isNoCostInst()) {
592 /* Issue free insts. to a fake numbered FU */
593 fu_index
= noCostFUIndex
;
595 /* And start the countdown on activity to allow
596 * this instruction to get to the end of its FU */
597 cpu
.activityRecorder
->activity();
599 /* Mark the destinations for this instruction as
601 scoreboard
.markupInstDests(inst
, cpu
.curCycle() +
602 Cycles(0), cpu
.getContext(thread_id
), false);
604 inst
->fuIndex
= noCostFUIndex
;
605 inst
->extraCommitDelay
= Cycles(0);
606 inst
->extraCommitDelayExpr
= NULL
;
608 /* Push the instruction onto the inFlight queue so
609 * it can be committed in order */
610 QueuedInst
fu_inst(inst
);
611 inFlightInsts
->push(fu_inst
);
615 } else if (!fu_is_capable
|| fu
->alreadyPushed()) {
617 if (!fu_is_capable
) {
618 DPRINTF(MinorExecute
, "Can't issue as FU: %d isn't"
619 " capable\n", fu_index
);
621 DPRINTF(MinorExecute
, "Can't issue as FU: %d is"
622 " already busy\n", fu_index
);
624 } else if (fu
->stalled
) {
625 DPRINTF(MinorExecute
, "Can't issue inst: %s into FU: %d,"
628 } else if (!fu
->canInsert()) {
629 DPRINTF(MinorExecute
, "Can't issue inst: %s to busy FU"
630 " for another: %d cycles\n",
631 *inst
, fu
->cyclesBeforeInsert());
633 MinorFUTiming
*timing
= (!inst
->isFault() ?
634 fu
->findTiming(inst
->staticInst
) : NULL
);
636 const std::vector
<Cycles
> *src_latencies
=
637 (timing
? &(timing
->srcRegsRelativeLats
)
640 const std::vector
<bool> *cant_forward_from_fu_indices
=
641 &(fu
->cantForwardFromFUIndices
);
643 if (timing
&& timing
->suppress
) {
644 DPRINTF(MinorExecute
, "Can't issue inst: %s as extra"
645 " decoding is suppressing it\n",
647 } else if (!scoreboard
.canInstIssue(inst
, src_latencies
,
648 cant_forward_from_fu_indices
,
649 cpu
.curCycle(), cpu
.getContext(thread_id
)))
651 DPRINTF(MinorExecute
, "Can't issue inst: %s yet\n",
654 /* Can insert the instruction into this FU */
655 DPRINTF(MinorExecute
, "Issuing inst: %s"
656 " into FU %d\n", *inst
,
659 Cycles extra_dest_retire_lat
= Cycles(0);
660 TimingExpr
*extra_dest_retire_lat_expr
= NULL
;
661 Cycles extra_assumed_lat
= Cycles(0);
663 /* Add the extraCommitDelay and extraAssumeLat to
664 * the FU pipeline timings */
666 extra_dest_retire_lat
=
667 timing
->extraCommitLat
;
668 extra_dest_retire_lat_expr
=
669 timing
->extraCommitLatExpr
;
671 timing
->extraAssumedLat
;
674 issued_mem_ref
= inst
->isMemRef();
676 QueuedInst
fu_inst(inst
);
678 /* Decorate the inst with FU details */
679 inst
->fuIndex
= fu_index
;
680 inst
->extraCommitDelay
= extra_dest_retire_lat
;
681 inst
->extraCommitDelayExpr
=
682 extra_dest_retire_lat_expr
;
684 if (issued_mem_ref
) {
685 /* Remember which instruction this memory op
686 * depends on so that initiateAcc can be called
688 if (allowEarlyMemIssue
) {
689 inst
->instToWaitFor
=
690 scoreboard
.execSeqNumToWaitFor(inst
,
691 cpu
.getContext(thread_id
));
693 if (lsq
.getLastMemBarrier() >
696 DPRINTF(MinorExecute
, "A barrier will"
697 " cause a delay in mem ref issue of"
698 " inst: %s until after inst"
699 " %d(exec)\n", *inst
,
700 lsq
.getLastMemBarrier());
702 inst
->instToWaitFor
=
703 lsq
.getLastMemBarrier();
705 DPRINTF(MinorExecute
, "Memory ref inst:"
706 " %s must wait for inst %d(exec)"
708 *inst
, inst
->instToWaitFor
);
711 inst
->canEarlyIssue
= true;
713 /* Also queue this instruction in the memory ref
714 * queue to ensure in-order issue to the LSQ */
715 DPRINTF(MinorExecute
, "Pushing mem inst: %s\n",
717 inFUMemInsts
->push(fu_inst
);
722 /* And start the countdown on activity to allow
723 * this instruction to get to the end of its FU */
724 cpu
.activityRecorder
->activity();
726 /* Mark the destinations for this instruction as
728 scoreboard
.markupInstDests(inst
, cpu
.curCycle() +
729 fu
->description
.opLat
+
730 extra_dest_retire_lat
+
732 cpu
.getContext(thread_id
),
733 issued_mem_ref
&& extra_assumed_lat
== Cycles(0));
735 /* Push the instruction onto the inFlight queue so
736 * it can be committed in order */
737 inFlightInsts
->push(fu_inst
);
744 } while (fu_index
!= numFuncUnits
&& !issued
);
747 DPRINTF(MinorExecute
, "Didn't issue inst: %s\n", *inst
);
751 /* Generate MinorTrace's MinorInst lines. Do this at commit
752 * to allow better instruction annotation? */
753 if (DTRACE(MinorTrace
) && !inst
->isBubble())
754 inst
->minorTraceInst(*this);
756 /* Mark up barriers in the LSQ */
757 if (!discarded
&& inst
->isInst() &&
758 inst
->staticInst
->isMemBarrier())
760 DPRINTF(MinorMem
, "Issuing memory barrier inst: %s\n", *inst
);
761 lsq
.issuedMemBarrierInst(inst
);
764 if (inst
->traceData
&& setTraceTimeOnIssue
) {
765 inst
->traceData
->setWhen(curTick());
769 num_mem_insts_issued
++;
772 num_insts_discarded
++;
773 } else if (!inst
->isBubble()) {
776 if (num_insts_issued
== issueLimit
)
777 DPRINTF(MinorExecute
, "Reached inst issue limit\n");
781 DPRINTF(MinorExecute
, "Stepping to next inst inputIndex: %d\n",
785 /* Got to the end of a line */
786 if (inputIndex
== insts_in
->width()) {
788 /* Set insts_in to null to force us to leave the surrounding
792 if (processMoreThanOneInput
) {
793 DPRINTF(MinorExecute
, "Wrapping\n");
794 insts_in
= getInput();
797 } while (insts_in
&& inputIndex
< insts_in
->width() &&
798 /* We still have instructions */
799 fu_index
!= numFuncUnits
&& /* Not visited all FUs */
800 issued
&& /* We've not yet failed to issue an instruction */
801 num_insts_issued
!= issueLimit
&& /* Still allowed to issue */
802 num_mem_insts_issued
!= memoryIssueLimit
);
804 return num_insts_issued
;
808 Execute::tryPCEvents()
810 ThreadContext
*thread
= cpu
.getContext(0);
811 unsigned int num_pc_event_checks
= 0;
813 /* Handle PC events on instructions */
816 oldPC
= thread
->instAddr();
817 cpu
.system
->pcEventQueue
.service(thread
);
818 num_pc_event_checks
++;
819 } while (oldPC
!= thread
->instAddr());
821 if (num_pc_event_checks
> 1) {
822 DPRINTF(PCEvent
, "Acting on PC Event to PC: %s\n",
826 return num_pc_event_checks
> 1;
830 Execute::doInstCommitAccounting(MinorDynInstPtr inst
)
832 assert(!inst
->isFault());
834 MinorThread
*thread
= cpu
.threads
[inst
->id
.threadId
];
836 /* Increment the many and various inst and op counts in the
837 * thread and system */
838 if (!inst
->staticInst
->isMicroop() || inst
->staticInst
->isLastMicroop())
842 cpu
.stats
.numInsts
++;
843 cpu
.system
->totalNumInsts
++;
845 /* Act on events related to instruction counts */
846 cpu
.comInstEventQueue
[inst
->id
.threadId
]->serviceEvents(thread
->numInst
);
847 cpu
.system
->instEventQueue
.serviceEvents(cpu
.system
->totalNumInsts
);
853 /* Set the CP SeqNum to the numOps commit number */
855 inst
->traceData
->setCPSeq(thread
->numOp
);
857 cpu
.probeInstCommit(inst
->staticInst
);
861 Execute::commitInst(MinorDynInstPtr inst
, bool early_memory_issue
,
862 BranchData
&branch
, Fault
&fault
, bool &committed
,
863 bool &completed_mem_issue
)
865 ThreadID thread_id
= inst
->id
.threadId
;
866 ThreadContext
*thread
= cpu
.getContext(thread_id
);
868 bool completed_inst
= true;
871 /* Is the thread for this instruction suspended? In that case, just
872 * stall as long as there are no pending interrupts */
873 if (thread
->status() == ThreadContext::Suspended
&&
874 !isInterrupted(thread_id
))
876 DPRINTF(MinorExecute
, "Not committing inst from suspended thread"
877 " inst: %s\n", *inst
);
878 completed_inst
= false;
879 } else if (inst
->isFault()) {
880 ExecContext
context(cpu
, *cpu
.threads
[thread_id
], *this, inst
);
882 DPRINTF(MinorExecute
, "Fault inst reached Execute: %s\n",
883 inst
->fault
->name());
886 inst
->fault
->invoke(thread
, NULL
);
888 tryToBranch(inst
, fault
, branch
);
889 } else if (inst
->staticInst
->isMemRef()) {
890 /* Memory accesses are executed in two parts:
891 * executeMemRefInst -- calculates the EA and issues the access
892 * to memory. This is done here.
893 * handleMemResponse -- handles the response packet, done by
896 * While the memory access is in its FU, the EA is being
897 * calculated. At the end of the FU, when it is ready to
898 * 'commit' (in this function), the access is presented to the
899 * memory queues. When a response comes back from memory,
900 * Execute::commit will commit it.
902 bool predicate_passed
= false;
903 bool completed_mem_inst
= executeMemRefInst(inst
, branch
,
904 predicate_passed
, fault
);
906 if (completed_mem_inst
&& fault
!= NoFault
) {
907 if (early_memory_issue
) {
908 DPRINTF(MinorExecute
, "Fault in early executing inst: %s\n",
910 /* Don't execute the fault, just stall the instruction
911 * until it gets to the head of inFlightInsts */
912 inst
->canEarlyIssue
= false;
913 /* Not completed as we'll come here again to pick up
914 * the fault when we get to the end of the FU */
915 completed_inst
= false;
917 DPRINTF(MinorExecute
, "Fault in execute: %s\n",
919 fault
->invoke(thread
, NULL
);
921 tryToBranch(inst
, fault
, branch
);
922 completed_inst
= true;
925 completed_inst
= completed_mem_inst
;
927 completed_mem_issue
= completed_inst
;
928 } else if (inst
->isInst() && inst
->staticInst
->isMemBarrier() &&
929 !lsq
.canPushIntoStoreBuffer())
931 DPRINTF(MinorExecute
, "Can't commit data barrier inst: %s yet as"
932 " there isn't space in the store buffer\n", *inst
);
934 completed_inst
= false;
936 ExecContext
context(cpu
, *cpu
.threads
[thread_id
], *this, inst
);
938 DPRINTF(MinorExecute
, "Committing inst: %s\n", *inst
);
940 fault
= inst
->staticInst
->execute(&context
,
943 /* Set the predicate for tracing and dump */
945 inst
->traceData
->setPredicate(context
.readPredicate());
949 if (fault
!= NoFault
) {
950 DPRINTF(MinorExecute
, "Fault in execute of inst: %s fault: %s\n",
951 *inst
, fault
->name());
952 fault
->invoke(thread
, inst
->staticInst
);
955 doInstCommitAccounting(inst
);
956 tryToBranch(inst
, fault
, branch
);
959 if (completed_inst
) {
960 /* Keep a copy of this instruction's predictionSeqNum just in case
961 * we need to issue a branch without an instruction (such as an
963 lastPredictionSeqNum
= inst
->id
.predictionSeqNum
;
965 /* Check to see if this instruction suspended the current thread. */
966 if (!inst
->isFault() &&
967 thread
->status() == ThreadContext::Suspended
&&
968 branch
.isBubble() && /* It didn't branch too */
969 !isInterrupted(thread_id
)) /* Don't suspend if we have
972 TheISA::PCState resume_pc
= cpu
.getContext(0)->pcState();
974 assert(resume_pc
.microPC() == 0);
976 DPRINTF(MinorInterrupt
, "Suspending thread: %d from Execute"
977 " inst: %s\n", inst
->id
.threadId
, *inst
);
979 cpu
.stats
.numFetchSuspends
++;
981 updateBranchData(BranchData::SuspendThread
, inst
, resume_pc
,
986 return completed_inst
;
990 Execute::commit(bool only_commit_microops
, bool discard
, BranchData
&branch
)
992 Fault fault
= NoFault
;
993 Cycles now
= cpu
.curCycle();
996 * Try and execute as many instructions from the end of FU pipelines as
997 * possible. This *doesn't* include actually advancing the pipelines.
999 * We do this by looping on the front of the inFlightInsts queue for as
1000 * long as we can find the desired instruction at the end of the
1001 * functional unit it was issued to without seeing a branch or a fault.
1002 * In this function, these terms are used:
1003 * complete -- The instruction has finished its passage through
1004 * its functional unit and its fate has been decided
1005 * (committed, discarded, issued to the memory system)
1006 * commit -- The instruction is complete(d), not discarded and has
1007 * its effects applied to the CPU state
1008 * discard(ed) -- The instruction is complete but not committed
1009 * as its streamSeqNum disagrees with the current
1010 * Execute::streamSeqNum
1012 * Commits are also possible from two other places:
1014 * 1) Responses returning from the LSQ
1015 * 2) Mem ops issued to the LSQ ('committed' from the FUs) earlier
1016 * than their position in the inFlightInsts queue, but after all
1017 * their dependencies are resolved.
1020 /* Has an instruction been completed? Once this becomes false, we stop
1021 * trying to complete instructions. */
1022 bool completed_inst
= true;
1024 /* Number of insts committed this cycle to check against commitLimit */
1025 unsigned int num_insts_committed
= 0;
1027 /* Number of memory access instructions committed to check against
1029 unsigned int num_mem_refs_committed
= 0;
1031 if (only_commit_microops
&& !inFlightInsts
->empty()) {
1032 DPRINTF(MinorInterrupt
, "Only commit microops %s %d\n",
1033 *(inFlightInsts
->front().inst
),
1034 lastCommitWasEndOfMacroop
);
1037 while (!inFlightInsts
->empty() && /* Some more instructions to process */
1038 !branch
.isStreamChange() && /* No real branch */
1039 fault
== NoFault
&& /* No faults */
1040 completed_inst
&& /* Still finding instructions to execute */
1041 num_insts_committed
!= commitLimit
/* Not reached commit limit */
1044 if (only_commit_microops
) {
1045 DPRINTF(MinorInterrupt
, "Committing tail of insts before"
1047 *(inFlightInsts
->front().inst
));
1050 QueuedInst
*head_inflight_inst
= &(inFlightInsts
->front());
1052 InstSeqNum head_exec_seq_num
=
1053 head_inflight_inst
->inst
->id
.execSeqNum
;
1055 /* The instruction we actually process if completed_inst
1056 * remains true to the end of the loop body.
1057 * Start by considering the the head of the in flight insts queue */
1058 MinorDynInstPtr inst
= head_inflight_inst
->inst
;
1060 bool committed_inst
= false;
1061 bool discard_inst
= false;
1062 bool completed_mem_ref
= false;
1063 bool issued_mem_ref
= false;
1064 bool early_memory_issue
= false;
1066 /* Must set this again to go around the loop */
1067 completed_inst
= false;
1069 /* If we're just completing a macroop before an interrupt or drain,
1070 * can we stil commit another microop (rather than a memory response)
1071 * without crosing into the next full instruction? */
1072 bool can_commit_insts
= !inFlightInsts
->empty() &&
1073 !(only_commit_microops
&& lastCommitWasEndOfMacroop
);
1075 /* Can we find a mem response for this inst */
1076 LSQ::LSQRequestPtr mem_response
=
1077 (inst
->inLSQ
? lsq
.findResponse(inst
) : NULL
);
1079 DPRINTF(MinorExecute
, "Trying to commit canCommitInsts: %d\n",
1082 /* Test for PC events after every instruction */
1083 if (isInbetweenInsts() && tryPCEvents()) {
1084 ThreadContext
*thread
= cpu
.getContext(0);
1086 /* Branch as there was a change in PC */
1087 updateBranchData(BranchData::UnpredictedBranch
,
1088 MinorDynInst::bubble(), thread
->pcState(), branch
);
1089 } else if (mem_response
&&
1090 num_mem_refs_committed
< memoryCommitLimit
)
1092 /* Try to commit from the memory responses next */
1093 discard_inst
= inst
->id
.streamSeqNum
!= streamSeqNum
||
1096 DPRINTF(MinorExecute
, "Trying to commit mem response: %s\n",
1099 /* Complete or discard the response */
1101 DPRINTF(MinorExecute
, "Discarding mem inst: %s as its"
1102 " stream state was unexpected, expected: %d\n",
1103 *inst
, streamSeqNum
);
1105 lsq
.popResponse(mem_response
);
1107 handleMemResponse(inst
, mem_response
, branch
, fault
);
1108 committed_inst
= true;
1111 completed_mem_ref
= true;
1112 completed_inst
= true;
1113 } else if (can_commit_insts
) {
1114 /* If true, this instruction will, subject to timing tweaks,
1115 * be considered for completion. try_to_commit flattens
1116 * the `if' tree a bit and allows other tests for inst
1117 * commit to be inserted here. */
1118 bool try_to_commit
= false;
1120 /* Try and issue memory ops early if they:
1121 * - Can push a request into the LSQ
1122 * - Have reached the end of their FUs
1123 * - Have had all their dependencies satisfied
1124 * - Are from the right stream
1126 * For any other case, leave it to the normal instruction
1127 * issue below to handle them.
1129 if (!inFUMemInsts
->empty() && lsq
.canRequest()) {
1130 DPRINTF(MinorExecute
, "Trying to commit from mem FUs\n");
1132 const MinorDynInstPtr head_mem_ref_inst
=
1133 inFUMemInsts
->front().inst
;
1134 FUPipeline
*fu
= funcUnits
[head_mem_ref_inst
->fuIndex
];
1135 const MinorDynInstPtr
&fu_inst
= fu
->front().inst
;
1137 /* Use this, possibly out of order, inst as the one
1138 * to 'commit'/send to the LSQ */
1139 if (!fu_inst
->isBubble() &&
1141 fu_inst
->canEarlyIssue
&&
1142 streamSeqNum
== fu_inst
->id
.streamSeqNum
&&
1143 head_exec_seq_num
> fu_inst
->instToWaitFor
)
1145 DPRINTF(MinorExecute
, "Issuing mem ref early"
1146 " inst: %s instToWaitFor: %d\n",
1147 *(fu_inst
), fu_inst
->instToWaitFor
);
1150 try_to_commit
= true;
1151 early_memory_issue
= true;
1152 completed_inst
= true;
1156 /* Try and commit FU-less insts */
1157 if (!completed_inst
&& inst
->isNoCostInst()) {
1158 DPRINTF(MinorExecute
, "Committing no cost inst: %s", *inst
);
1160 try_to_commit
= true;
1161 completed_inst
= true;
1164 /* Try to issue from the ends of FUs and the inFlightInsts
1166 if (!completed_inst
&& !inst
->inLSQ
) {
1167 DPRINTF(MinorExecute
, "Trying to commit from FUs\n");
1169 /* Try to commit from a functional unit */
1170 /* Is the head inst of the expected inst's FU actually the
1172 QueuedInst
&fu_inst
=
1173 funcUnits
[inst
->fuIndex
]->front();
1174 InstSeqNum fu_inst_seq_num
= fu_inst
.inst
->id
.execSeqNum
;
1176 if (fu_inst
.inst
->isBubble()) {
1177 /* No instruction ready */
1178 completed_inst
= false;
1179 } else if (fu_inst_seq_num
!= head_exec_seq_num
) {
1180 /* Past instruction: we must have already executed it
1181 * in the same cycle and so the head inst isn't
1182 * actually at the end of its pipeline
1183 * Future instruction: handled above and only for
1184 * mem refs on their way to the LSQ */
1185 } else /* if (fu_inst_seq_num == head_exec_seq_num) */ {
1186 /* All instructions can be committed if they have the
1187 * right execSeqNum and there are no in-flight
1188 * mem insts before us */
1189 try_to_commit
= true;
1190 completed_inst
= true;
1194 if (try_to_commit
) {
1195 discard_inst
= inst
->id
.streamSeqNum
!= streamSeqNum
||
1198 /* Is this instruction discardable as its streamSeqNum
1200 if (!discard_inst
) {
1201 /* Try to commit or discard a non-memory instruction.
1202 * Memory ops are actually 'committed' from this FUs
1203 * and 'issued' into the memory system so we need to
1204 * account for them later (commit_was_mem_issue gets
1206 if (inst
->extraCommitDelayExpr
) {
1207 DPRINTF(MinorExecute
, "Evaluating expression for"
1208 " extra commit delay inst: %s\n", *inst
);
1210 ThreadContext
*thread
=
1211 cpu
.getContext(inst
->id
.threadId
);
1213 TimingExprEvalContext
context(inst
->staticInst
,
1216 uint64_t extra_delay
= inst
->extraCommitDelayExpr
->
1219 DPRINTF(MinorExecute
, "Extra commit delay expr"
1220 " result: %d\n", extra_delay
);
1222 if (extra_delay
< 128) {
1223 inst
->extraCommitDelay
+= Cycles(extra_delay
);
1225 DPRINTF(MinorExecute
, "Extra commit delay was"
1226 " very long: %d\n", extra_delay
);
1228 inst
->extraCommitDelayExpr
= NULL
;
1231 /* Move the extraCommitDelay from the instruction
1232 * into the minimumCommitCycle */
1233 if (inst
->extraCommitDelay
!= Cycles(0)) {
1234 inst
->minimumCommitCycle
= cpu
.curCycle() +
1235 inst
->extraCommitDelay
;
1236 inst
->extraCommitDelay
= Cycles(0);
1239 /* @todo Think about making lastMemBarrier be
1240 * MAX_UINT_64 to avoid using 0 as a marker value */
1241 if (!inst
->isFault() && inst
->isMemRef() &&
1242 lsq
.getLastMemBarrier() <
1243 inst
->id
.execSeqNum
&&
1244 lsq
.getLastMemBarrier() != 0)
1246 DPRINTF(MinorExecute
, "Not committing inst: %s yet"
1247 " as there are incomplete barriers in flight\n",
1249 completed_inst
= false;
1250 } else if (inst
->minimumCommitCycle
> now
) {
1251 DPRINTF(MinorExecute
, "Not committing inst: %s yet"
1252 " as it wants to be stalled for %d more cycles\n",
1253 *inst
, inst
->minimumCommitCycle
- now
);
1254 completed_inst
= false;
1256 completed_inst
= commitInst(inst
,
1257 early_memory_issue
, branch
, fault
,
1258 committed_inst
, issued_mem_ref
);
1261 /* Discard instruction */
1262 completed_inst
= true;
1265 if (completed_inst
) {
1266 /* Allow the pipeline to advance. If the FU head
1267 * instruction wasn't the inFlightInsts head
1268 * but had already been committed, it would have
1269 * unstalled the pipeline before here */
1270 if (inst
->fuIndex
!= noCostFUIndex
)
1271 funcUnits
[inst
->fuIndex
]->stalled
= false;
1275 DPRINTF(MinorExecute
, "No instructions to commit\n");
1276 completed_inst
= false;
1279 /* All discardable instructions must also be 'completed' by now */
1280 assert(!(discard_inst
&& !completed_inst
));
1282 /* Instruction committed but was discarded due to streamSeqNum
1285 DPRINTF(MinorExecute
, "Discarding inst: %s as its stream"
1286 " state was unexpected, expected: %d\n",
1287 *inst
, streamSeqNum
);
1289 if (fault
== NoFault
)
1290 cpu
.stats
.numDiscardedOps
++;
1293 /* Mark the mem inst as being in the LSQ */
1294 if (issued_mem_ref
) {
1299 /* Pop issued (to LSQ) and discarded mem refs from the inFUMemInsts
1300 * as they've *definitely* exited the FUs */
1301 if (completed_inst
&& inst
->isMemRef()) {
1302 /* The MemRef could have been discarded from the FU or the memory
1303 * queue, so just check an FU instruction */
1304 if (!inFUMemInsts
->empty() &&
1305 inFUMemInsts
->front().inst
== inst
)
1307 inFUMemInsts
->pop();
1311 if (completed_inst
&& !(issued_mem_ref
&& fault
== NoFault
)) {
1312 /* Note that this includes discarded insts */
1313 DPRINTF(MinorExecute
, "Completed inst: %s\n", *inst
);
1315 /* Got to the end of a full instruction? */
1316 lastCommitWasEndOfMacroop
= inst
->isFault() ||
1317 inst
->isLastOpInInst();
1319 /* lastPredictionSeqNum is kept as a convenience to prevent its
1320 * value from changing too much on the minorview display */
1321 lastPredictionSeqNum
= inst
->id
.predictionSeqNum
;
1323 /* Finished with the inst, remove it from the inst queue and
1324 * clear its dependencies */
1325 inFlightInsts
->pop();
1327 /* Complete barriers in the LSQ/move to store buffer */
1328 if (inst
->isInst() && inst
->staticInst
->isMemBarrier()) {
1329 DPRINTF(MinorMem
, "Completing memory barrier"
1330 " inst: %s committed: %d\n", *inst
, committed_inst
);
1331 lsq
.completeMemBarrierInst(inst
, committed_inst
);
1334 scoreboard
.clearInstDests(inst
, inst
->isMemRef());
1337 /* Handle per-cycle instruction counting */
1338 if (committed_inst
) {
1339 bool is_no_cost_inst
= inst
->isNoCostInst();
1341 /* Don't show no cost instructions as having taken a commit
1343 if (DTRACE(MinorTrace
) && !is_no_cost_inst
)
1344 instsBeingCommitted
.insts
[num_insts_committed
] = inst
;
1346 if (!is_no_cost_inst
)
1347 num_insts_committed
++;
1349 if (num_insts_committed
== commitLimit
)
1350 DPRINTF(MinorExecute
, "Reached inst commit limit\n");
1352 /* Re-set the time of the instruction if that's required for
1354 if (inst
->traceData
) {
1355 if (setTraceTimeOnCommit
)
1356 inst
->traceData
->setWhen(curTick());
1357 inst
->traceData
->dump();
1360 if (completed_mem_ref
)
1361 num_mem_refs_committed
++;
1363 if (num_mem_refs_committed
== memoryCommitLimit
)
1364 DPRINTF(MinorExecute
, "Reached mem ref commit limit\n");
1370 Execute::isInbetweenInsts() const
1372 return lastCommitWasEndOfMacroop
&&
1373 !lsq
.accessesInFlight();
1379 inputBuffer
.setTail(*inp
.outputWire
);
1380 BranchData
&branch
= *out
.inputWire
;
1382 const ForwardInstData
*insts_in
= getInput();
1384 /* Do all the cycle-wise activities for dcachePort here to potentially
1385 * free up input spaces in the LSQ's requests queue */
1388 /* Has an interrupt been signalled? This may not be acted on
1389 * straighaway so this is different from took_interrupt below */
1390 bool interrupted
= false;
1391 /* If there was an interrupt signalled, was it acted on now? */
1392 bool took_interrupt
= false;
1394 if (cpu
.getInterruptController()) {
1395 /* This is here because it seems that after drainResume the
1396 * interrupt controller isn't always set */
1397 interrupted
= drainState
== NotDraining
&& isInterrupted(0);
1399 DPRINTF(MinorInterrupt
, "No interrupt controller\n");
1402 unsigned int num_issued
= 0;
1404 if (DTRACE(MinorTrace
)) {
1405 /* Empty the instsBeingCommitted for MinorTrace */
1406 instsBeingCommitted
.bubbleFill();
1409 /* THREAD threadId on isInterrupted */
1410 /* Act on interrupts */
1411 if (interrupted
&& isInbetweenInsts()) {
1412 took_interrupt
= takeInterrupt(0, branch
);
1413 /* Clear interrupted if no interrupt was actually waiting */
1414 interrupted
= took_interrupt
;
1417 if (took_interrupt
) {
1418 /* Do no commit/issue this cycle */
1419 } else if (!branch
.isBubble()) {
1420 /* It's important that this is here to carry Fetch1 wakeups to Fetch1
1421 * without overwriting them */
1422 DPRINTF(MinorInterrupt
, "Execute skipping a cycle to allow old"
1423 " branch to complete\n");
1426 if (inFlightInsts
->empty()) {
1427 DPRINTF(MinorInterrupt
, "Waiting but no insts\n");
1429 DPRINTF(MinorInterrupt
, "Waiting for end of inst before"
1430 " signalling interrupt\n");
1434 /* commit can set stalled flags observable to issue and so *must* be
1436 if (drainState
!= NotDraining
) {
1437 if (drainState
== DrainCurrentInst
) {
1438 /* Commit only micro-ops, don't kill anything else */
1439 commit(true, false, branch
);
1441 if (isInbetweenInsts())
1442 setDrainState(DrainHaltFetch
);
1444 /* Discard any generated branch */
1445 branch
= BranchData::bubble();
1446 } else if (drainState
== DrainAllInsts
) {
1447 /* Kill all instructions */
1450 commit(false, true, branch
);
1453 /* Commit micro-ops only if interrupted. Otherwise, commit
1454 * anything you like */
1455 commit(interrupted
, false, branch
);
1458 /* This will issue merrily even when interrupted in the sure and
1459 * certain knowledge that the interrupt with change the stream */
1461 num_issued
= issue(false);
1464 /* Halt fetch, but don't do it until we have the current instruction in
1466 if (drainState
== DrainHaltFetch
) {
1467 updateBranchData(BranchData::HaltFetch
, MinorDynInst::bubble(),
1468 TheISA::PCState(0), branch
);
1470 cpu
.wakeupOnEvent(Pipeline::ExecuteStageId
);
1471 setDrainState(DrainAllInsts
);
1474 MinorDynInstPtr next_issuable_inst
= NULL
;
1475 bool can_issue_next
= false;
1477 /* Find the next issuable instruction and see if it can be issued */
1479 MinorDynInstPtr inst
= getInput()->insts
[inputIndex
];
1481 if (inst
->isFault()) {
1482 can_issue_next
= true;
1483 } else if (!inst
->isBubble()) {
1484 if (cpu
.getContext(inst
->id
.threadId
)->status() !=
1485 ThreadContext::Suspended
)
1487 next_issuable_inst
= inst
;
1492 bool becoming_stalled
= true;
1494 /* Advance the pipelines and note whether they still need to be
1496 for (unsigned int i
= 0; i
< numFuncUnits
; i
++) {
1497 FUPipeline
*fu
= funcUnits
[i
];
1501 /* If we need to go again, the pipeline will have been left or set
1502 * to be unstalled */
1503 if (fu
->occupancy
!= 0 && !fu
->stalled
)
1504 becoming_stalled
= false;
1506 /* Could we possibly issue the next instruction? This is quite
1507 * an expensive test */
1508 if (next_issuable_inst
&& !fu
->stalled
&&
1509 scoreboard
.canInstIssue(next_issuable_inst
,
1510 NULL
, NULL
, cpu
.curCycle() + Cycles(1),
1511 cpu
.getContext(next_issuable_inst
->id
.threadId
)) &&
1512 fu
->provides(next_issuable_inst
->staticInst
->opClass()))
1514 can_issue_next
= true;
1518 bool head_inst_might_commit
= false;
1520 /* Could the head in flight insts be committed */
1521 if (!inFlightInsts
->empty()) {
1522 const QueuedInst
&head_inst
= inFlightInsts
->front();
1524 if (head_inst
.inst
->isNoCostInst()) {
1525 head_inst_might_commit
= true;
1527 FUPipeline
*fu
= funcUnits
[head_inst
.inst
->fuIndex
];
1529 /* Head inst is commitable */
1531 fu
->front().inst
->id
== head_inst
.inst
->id
) ||
1532 lsq
.findResponse(head_inst
.inst
))
1534 head_inst_might_commit
= true;
1539 DPRINTF(Activity
, "Need to tick num issued insts: %s%s%s%s%s%s\n",
1540 (num_issued
!= 0 ? " (issued some insts)" : ""),
1541 (becoming_stalled
? " (becoming stalled)" : "(not becoming stalled)"),
1542 (can_issue_next
? " (can issued next inst)" : ""),
1543 (head_inst_might_commit
? "(head inst might commit)" : ""),
1544 (lsq
.needsToTick() ? " (LSQ needs to tick)" : ""),
1545 (interrupted
? " (interrupted)" : ""));
1548 num_issued
!= 0 || /* Issued some insts this cycle */
1549 !becoming_stalled
|| /* Some FU pipelines can still move */
1550 can_issue_next
|| /* Can still issue a new inst */
1551 head_inst_might_commit
|| /* Could possible commit the next inst */
1552 lsq
.needsToTick() || /* Must step the dcache port */
1553 interrupted
; /* There are pending interrupts */
1555 if (!need_to_tick
) {
1556 DPRINTF(Activity
, "The next cycle might be skippable as there are no"
1557 " advanceable FUs\n");
1560 /* Wake up if we need to tick again */
1562 cpu
.wakeupOnEvent(Pipeline::ExecuteStageId
);
1564 /* Note activity of following buffer */
1565 if (!branch
.isBubble())
1566 cpu
.activityRecorder
->activity();
1568 /* Make sure the input (if any left) is pushed */
1569 inputBuffer
.pushTail();
1573 Execute::wakeupFetch(BranchData::Reason reason
)
1576 assert(branch
.isBubble());
1578 /* THREAD thread id */
1579 ThreadContext
*thread
= cpu
.getContext(0);
1581 /* Force a branch to the current PC (which should be the next inst.) to
1583 if (!branch
.isStreamChange() /* No real branch already happened */) {
1584 DPRINTF(MinorInterrupt
, "Waking up Fetch (via Execute) by issuing"
1585 " a branch: %s\n", thread
->pcState());
1587 assert(thread
->pcState().microPC() == 0);
1589 updateBranchData(reason
,
1590 MinorDynInst::bubble(), thread
->pcState(), branch
);
1592 DPRINTF(MinorInterrupt
, "Already branching, no need for wakeup\n");
1595 *out
.inputWire
= branch
;
1597 /* Make sure we get ticked */
1598 cpu
.wakeupOnEvent(Pipeline::ExecuteStageId
);
1602 Execute::minorTrace() const
1604 std::ostringstream insts
;
1605 std::ostringstream stalled
;
1607 instsBeingCommitted
.reportData(insts
);
1609 inputBuffer
.minorTrace();
1610 scoreboard
.minorTrace();
1612 /* Report functional unit stalling in one string */
1614 while (i
< numFuncUnits
)
1616 stalled
<< (funcUnits
[i
]->stalled
? '1' : 'E');
1618 if (i
!= numFuncUnits
)
1622 MINORTRACE("insts=%s inputIndex=%d streamSeqNum=%d"
1623 " stalled=%s drainState=%d isInbetweenInsts=%d\n",
1624 insts
.str(), inputIndex
, streamSeqNum
, stalled
.str(), drainState
,
1625 isInbetweenInsts());
1627 std::for_each(funcUnits
.begin(), funcUnits
.end(),
1628 std::mem_fun(&FUPipeline::minorTrace
));
1630 inFlightInsts
->minorTrace();
1631 inFUMemInsts
->minorTrace();
1635 Execute::drainResume()
1637 DPRINTF(Drain
, "MinorExecute drainResume\n");
1639 setDrainState(NotDraining
);
1641 /* Wakeup fetch and keep the pipeline running until that branch takes
1643 wakeupFetch(BranchData::WakeupFetch
);
1644 cpu
.wakeupOnEvent(Pipeline::ExecuteStageId
);
1647 std::ostream
&operator <<(std::ostream
&os
, Execute::DrainState state
)
1651 case Execute::NotDraining
:
1652 os
<< "NotDraining";
1654 case Execute::DrainCurrentInst
:
1655 os
<< "DrainCurrentInst";
1657 case Execute::DrainHaltFetch
:
1658 os
<< "DrainHaltFetch";
1660 case Execute::DrainAllInsts
:
1661 os
<< "DrainAllInsts";
1664 os
<< "Drain-" << static_cast<int>(state
);
1672 Execute::setDrainState(DrainState state
)
1674 DPRINTF(Drain
, "setDrainState: %s\n", state
);
1681 DPRINTF(Drain
, "MinorExecute drain\n");
1683 if (drainState
== NotDraining
) {
1684 cpu
.wakeupOnEvent(Pipeline::ExecuteStageId
);
1686 /* Go to DrainCurrentInst if we're between microops
1687 * or waiting on an unbufferable memory operation.
1688 * Otherwise we can go straight to DrainHaltFetch
1690 if (isInbetweenInsts())
1691 setDrainState(DrainHaltFetch
);
1693 setDrainState(DrainCurrentInst
);
1696 return (isDrained() ? 0 : 1);
1700 Execute::isDrained()
1702 return drainState
== DrainAllInsts
&&
1703 inputBuffer
.empty() &&
1704 inFlightInsts
->empty() &&
1710 for (unsigned int i
= 0; i
< numFuncUnits
; i
++)
1711 delete funcUnits
[i
];
1713 delete inFlightInsts
;
1717 Execute::instIsRightStream(MinorDynInstPtr inst
)
1719 return inst
->id
.streamSeqNum
== streamSeqNum
;
1723 Execute::instIsHeadInst(MinorDynInstPtr inst
)
1727 if (!inFlightInsts
->empty())
1728 ret
= inFlightInsts
->front().inst
->id
== inst
->id
;
1733 MinorCPU::MinorCPUPort
&
1734 Execute::getDcachePort()
1736 return lsq
.getDcachePort();