2 * Copyright (c) 2013-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Authors: Andrew Bardsley
40 #include "cpu/minor/decode.hh"
41 #include "cpu/minor/pipeline.hh"
42 #include "debug/Decode.hh"
47 Decode::Decode(const std::string
&name
,
49 MinorCPUParams
¶ms
,
50 Latch
<ForwardInstData
>::Output inp_
,
51 Latch
<ForwardInstData
>::Input out_
,
52 Reservable
&next_stage_input_buffer
) :
57 nextStageReserve(next_stage_input_buffer
),
58 outputWidth(params
.executeInputWidth
),
59 processMoreThanOneInput(params
.decodeCycleInput
),
60 inputBuffer(name
+ ".inputBuffer", "insts", params
.decodeInputBufferSize
),
63 execSeqNum(InstId::firstExecSeqNum
),
67 fatal("%s: executeInputWidth must be >= 1 (%d)\n", name
, outputWidth
);
69 if (params
.decodeInputBufferSize
< 1) {
70 fatal("%s: decodeInputBufferSize must be >= 1 (%d)\n", name
,
71 params
.decodeInputBufferSize
);
75 const ForwardInstData
*
78 /* Get insts from the inputBuffer to work with */
79 if (!inputBuffer
.empty()) {
80 const ForwardInstData
&head
= inputBuffer
.front();
82 return (head
.isBubble() ? NULL
: &(inputBuffer
.front()));
91 if (!inputBuffer
.empty())
99 /** Add the tracing data to an instruction. This originates in
100 * decode because this is the first place that execSeqNums are known
101 * (these are used as the 'FetchSeq' in tracing data) */
103 dynInstAddTracing(MinorDynInstPtr inst
, StaticInstPtr static_inst
,
106 inst
->traceData
= cpu
.getTracer()->getInstRecord(curTick(),
107 cpu
.getContext(inst
->id
.threadId
),
108 inst
->staticInst
, inst
->pc
, static_inst
);
110 /* Use the execSeqNum as the fetch sequence number as this most closely
111 * matches the other processor models' idea of fetch sequence */
113 inst
->traceData
->setFetchSeq(inst
->id
.execSeqNum
);
120 inputBuffer
.setTail(*inp
.outputWire
);
121 ForwardInstData
&insts_out
= *out
.inputWire
;
123 assert(insts_out
.isBubble());
127 if (!nextStageReserve
.canReserve()) {
130 const ForwardInstData
*insts_in
= getInput();
132 unsigned int output_index
= 0;
134 /* Pack instructions into the output while we can. This may involve
135 * using more than one input line */
137 inputIndex
< insts_in
->width() && /* Still more input */
138 output_index
< outputWidth
/* Still more output to fill */)
140 MinorDynInstPtr inst
= insts_in
->insts
[inputIndex
];
142 if (inst
->isBubble()) {
147 StaticInstPtr static_inst
= inst
->staticInst
;
148 /* Static inst of a macro-op above the output_inst */
149 StaticInstPtr parent_static_inst
= NULL
;
150 MinorDynInstPtr output_inst
= inst
;
152 if (inst
->isFault()) {
153 DPRINTF(Decode
, "Fault being passed: %d\n",
154 inst
->fault
->name());
158 } else if (static_inst
->isMacroop()) {
159 /* Generate a new micro-op */
160 StaticInstPtr static_micro_inst
;
162 /* Set up PC for the next micro-op emitted */
164 microopPC
= inst
->pc
;
168 /* Get the micro-op static instruction from the
171 static_inst
->fetchMicroop(microopPC
.microPC());
173 output_inst
= new MinorDynInst(inst
->id
);
174 output_inst
->pc
= microopPC
;
175 output_inst
->staticInst
= static_micro_inst
;
176 output_inst
->fault
= NoFault
;
178 /* Allow a predicted next address only on the last
180 if (static_micro_inst
->isLastMicroop()) {
181 output_inst
->predictedTaken
= inst
->predictedTaken
;
182 output_inst
->predictedTarget
= inst
->predictedTarget
;
185 DPRINTF(Decode
, "Microop decomposition inputIndex:"
186 " %d output_index: %d lastMicroop: %s microopPC:"
188 inputIndex
, output_index
,
189 (static_micro_inst
->isLastMicroop() ?
191 microopPC
.instAddr(), microopPC
.microPC(),
194 /* Acknowledge that the static_inst isn't mine, it's my
195 * parent macro-op's */
196 parent_static_inst
= static_inst
;
198 static_micro_inst
->advancePC(microopPC
);
200 /* Step input if this is the last micro-op */
201 if (static_micro_inst
->isLastMicroop()) {
206 /* Doesn't need decomposing, pass on instruction */
207 DPRINTF(Decode
, "Passing on inst: %s inputIndex:"
208 " %d output_index: %d\n",
209 *output_inst
, inputIndex
, output_index
);
211 parent_static_inst
= static_inst
;
218 /* Set execSeqNum of output_inst */
219 output_inst
->id
.execSeqNum
= execSeqNum
;
222 dynInstAddTracing(output_inst
, parent_static_inst
, cpu
);
225 /* Step to next sequence number */
228 /* Correctly size the output before writing */
229 if(output_index
== 0) insts_out
.resize(outputWidth
);
230 /* Push into output */
231 insts_out
.insts
[output_index
] = output_inst
;
235 /* Have we finished with the input? */
236 if (inputIndex
== insts_in
->width()) {
237 /* If we have just been producing micro-ops, we *must* have
238 * got to the end of that for inputIndex to be pushed past
239 * insts_in->width() */
244 if (processMoreThanOneInput
) {
245 DPRINTF(Decode
, "Wrapping\n");
246 insts_in
= getInput();
251 /* The rest of the output (if any) should already have been packed
252 * with bubble instructions by insts_out's initialisation
254 * for (; output_index < outputWidth; output_index++)
255 * assert(insts_out.insts[output_index]->isBubble());
259 /* If we generated output, reserve space for the result in the next stage
260 * and mark the stage as being active this cycle */
261 if (!insts_out
.isBubble()) {
262 /* Note activity of following buffer */
263 cpu
.activityRecorder
->activity();
264 nextStageReserve
.reserve();
267 /* If we still have input to process and somewhere to put it,
268 * mark stage as active */
269 if (getInput() && nextStageReserve
.canReserve())
270 cpu
.activityRecorder
->activateStage(Pipeline::DecodeStageId
);
272 /* Make sure the input (if any left) is pushed */
273 inputBuffer
.pushTail();
279 return inputBuffer
.empty() && (*inp
.outputWire
).isBubble();
283 Decode::minorTrace() const
285 std::ostringstream data
;
290 (*out
.inputWire
).reportData(data
);
292 MINORTRACE("insts=%s\n", data
.str());
293 inputBuffer
.minorTrace();