inorder: add necessary debug flag header files
[gem5.git] / src / cpu / inorder / resources / fetch_seq_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include "config/the_isa.hh"
33 #include "cpu/inorder/resources/fetch_seq_unit.hh"
34 #include "cpu/inorder/resource_pool.hh"
35 #include "debug/InOrderFetchSeq.hh"
36 #include "debug/InOrderStall.hh"
37
38 using namespace std;
39 using namespace TheISA;
40 using namespace ThePipeline;
41
42 FetchSeqUnit::FetchSeqUnit(std::string res_name, int res_id, int res_width,
43 int res_latency, InOrderCPU *_cpu,
44 ThePipeline::Params *params)
45 : Resource(res_name, res_id, res_width, res_latency, _cpu),
46 instSize(sizeof(MachInst))
47 {
48 for (ThreadID tid = 0; tid < ThePipeline::MaxThreads; tid++) {
49 pcValid[tid] = false;
50 pcBlockStage[tid] = 0;
51
52 //@todo: Use CPU's squashSeqNum here instead of maintaining our own
53 // state
54 squashSeqNum[tid] = (InstSeqNum)-1;
55 lastSquashCycle[tid] = 0;
56 }
57 }
58
59 FetchSeqUnit::~FetchSeqUnit()
60 {
61 delete [] resourceEvent;
62 }
63
64 void
65 FetchSeqUnit::init()
66 {
67 resourceEvent = new FetchSeqEvent[width];
68
69 for (int i = 0; i < width; i++) {
70 reqs[i] = new ResourceRequest(this);
71 }
72
73 initSlots();
74 }
75
76 void
77 FetchSeqUnit::execute(int slot_num)
78 {
79 ResourceRequest* fs_req = reqs[slot_num];
80 DynInstPtr inst = fs_req->inst;
81 ThreadID tid = inst->readTid();
82 int stage_num = fs_req->getStageNum();
83
84 if (inst->fault != NoFault) {
85 DPRINTF(InOrderFetchSeq,
86 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
87 "next stage.\n", tid, inst->seqNum, inst->fault->name(),
88 inst->pcState());
89 fs_req->done();
90 return;
91 }
92
93 switch (fs_req->cmd)
94 {
95 case AssignNextPC:
96 {
97 DPRINTF(InOrderFetchSeq, "[tid:%i]: Current PC is %s\n", tid,
98 pc[tid]);
99
100 if (pcValid[tid]) {
101 inst->pcState(pc[tid]);
102 inst->setMemAddr(pc[tid].instAddr());
103
104 // Advance to next PC (typically PC + 4)
105 pc[tid].advance();
106
107 inst->setSeqNum(cpu->getAndIncrementInstSeq(tid));
108
109 DPRINTF(InOrderFetchSeq, "[tid:%i]: Assigning [sn:%i] to "
110 "PC %s\n", tid, inst->seqNum, inst->pcState());
111
112 fs_req->done();
113 } else {
114 DPRINTF(InOrderStall, "STALL: [tid:%i]: NPC not valid\n", tid);
115 fs_req->done(false);
116 }
117 }
118 break;
119
120 case UpdateTargetPC:
121 {
122 assert(!inst->isCondDelaySlot() &&
123 "Not Handling Conditional Delay Slot");
124
125 if (inst->isControl()) {
126 if (inst->isReturn() && !inst->predTaken()) {
127 // If it's a return, then we must wait for resolved address.
128 // The Predictor will mark a return a false as "not taken"
129 // if there is no RAS entry
130 DPRINTF(InOrderFetchSeq, "[tid:%d]: Setting block signal "
131 "for stage %i.\n",
132 tid, stage_num);
133 cpu->pipelineStage[stage_num]->
134 toPrevStages->stageBlock[stage_num][tid] = true;
135 pcValid[tid] = false;
136 pcBlockStage[tid] = stage_num;
137 } else if (inst->predTaken()) {
138 // Taken Control
139 inst->setSquashInfo(stage_num);
140 setupSquash(inst, stage_num, tid);
141
142 DPRINTF(InOrderFetchSeq, "[tid:%i] Setting up squash to "
143 "start from stage %i, after [sn:%i].\n",
144 tid, stage_num, inst->squashSeqNum);
145 }
146 } else {
147 DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i]: Ignoring branch "
148 "target update since then is not a control "
149 "instruction.\n", tid, inst->seqNum);
150 }
151
152 fs_req->done();
153 }
154 break;
155
156 default:
157 fatal("Unrecognized command to %s", resName);
158 }
159 }
160
161 void
162 FetchSeqUnit::squash(DynInstPtr inst, int squash_stage,
163 InstSeqNum squash_seq_num, ThreadID tid)
164 {
165 DPRINTF(InOrderFetchSeq, "[tid:%i]: Updating due to squash from %s (%s) "
166 "stage %i.\n", tid, inst->instName(), inst->pcState(),
167 squash_stage);
168
169 if (lastSquashCycle[tid] == curTick() &&
170 squashSeqNum[tid] <= squash_seq_num) {
171 DPRINTF(InOrderFetchSeq, "[tid:%i]: Ignoring squash from stage %i, "
172 "since there is an outstanding squash that is older.\n",
173 tid, squash_stage);
174 } else {
175 squashSeqNum[tid] = squash_seq_num;
176 lastSquashCycle[tid] = curTick();
177
178 if (inst->staticInst) {
179 if (inst->fault != NoFault) {
180 // A Trap Caused This Fault and will update the pc state
181 // when done trapping
182 DPRINTF(InOrderFetchSeq, "[tid:%i] Blocking due to fault @ "
183 "[sn:%i].%s %s \n", inst->seqNum,
184 inst->instName(), inst->pcState());
185 pcValid[tid] = false;
186 } else {
187 TheISA::PCState nextPC;
188 assert(inst->staticInst);
189 if (inst->isControl()) {
190 nextPC = inst->readPredTarg();
191
192 // If we are already fetching this PC then advance to next PC
193 // =======
194 // This should handle ISAs w/delay slots and annulled delay
195 // slots to figure out which is the next PC to fetch after
196 // a mispredict
197 DynInstPtr bdelay_inst = NULL;
198 ListIt bdelay_it;
199 if (inst->onInstList) {
200 bdelay_it = inst->getInstListIt();
201 bdelay_it++;
202 } else {
203 InstSeqNum branch_delay_num = inst->seqNum + 1;
204 bdelay_it = cpu->findInst(branch_delay_num, tid);
205 }
206
207 if (bdelay_it != cpu->instList[tid].end()) {
208 bdelay_inst = (*bdelay_it);
209 }
210
211 if (bdelay_inst) {
212 if (bdelay_inst->pc.instAddr() == nextPC.instAddr()) {
213 bdelay_inst->pc = nextPC;
214 advancePC(nextPC, inst->staticInst);
215 DPRINTF(InOrderFetchSeq, "Advanced PC to %s\n", nextPC);
216 }
217 }
218 } else {
219 nextPC = inst->pcState();
220 advancePC(nextPC, inst->staticInst);
221 }
222
223
224 DPRINTF(InOrderFetchSeq, "[tid:%i]: Setting PC to %s.\n",
225 tid, nextPC);
226 pc[tid] = nextPC;
227
228 // Unblock Any Stages Waiting for this information to be updated ...
229 if (!pcValid[tid]) {
230 DPRINTF(InOrderFetchSeq, "[tid:%d]: Setting unblock signal "
231 "for stage %i.\n",
232 tid, pcBlockStage[tid]);
233
234 // Need to use "fromNextStages" instead of "toPrevStages"
235 // because the timebuffer will have already have advanced
236 // in the tick function and this squash function will happen after
237 // the tick
238 cpu->pipelineStage[pcBlockStage[tid]]->
239 fromNextStages->stageUnblock[pcBlockStage[tid]][tid] = true;
240 }
241
242 pcValid[tid] = true;
243 }
244 }
245 }
246
247 Resource::squash(inst, squash_stage, squash_seq_num, tid);
248 }
249
250 FetchSeqUnit::FetchSeqEvent::FetchSeqEvent()
251 : ResourceEvent()
252 { }
253
254 void
255 FetchSeqUnit::FetchSeqEvent::process()
256 {
257 FetchSeqUnit* fs_res = dynamic_cast<FetchSeqUnit*>(resource);
258 assert(fs_res);
259
260 for (int i = 0; i < MaxThreads; i++) {
261 fs_res->pc[i] = fs_res->cpu->pcState(i);
262 DPRINTF(InOrderFetchSeq, "[tid:%i]: Setting PC: %s.\n",
263 fs_res->pc[i]);
264
265 fs_res->pcValid[i] = true;
266 }
267 }
268
269
270 void
271 FetchSeqUnit::activateThread(ThreadID tid)
272 {
273 pcValid[tid] = true;
274
275 pc[tid] = cpu->pcState(tid);
276
277 cpu->fetchPriorityList.push_back(tid);
278
279 DPRINTF(InOrderFetchSeq, "[tid:%i]: Reading PC: %s.\n",
280 tid, pc[tid]);
281 }
282
283 void
284 FetchSeqUnit::deactivateThread(ThreadID tid)
285 {
286 pcValid[tid] = false;
287 pcBlockStage[tid] = 0;
288
289 squashSeqNum[tid] = (InstSeqNum)-1;
290 lastSquashCycle[tid] = 0;
291
292 list<ThreadID>::iterator thread_it = find(cpu->fetchPriorityList.begin(),
293 cpu->fetchPriorityList.end(),
294 tid);
295
296 if (thread_it != cpu->fetchPriorityList.end())
297 cpu->fetchPriorityList.erase(thread_it);
298 }
299
300 void
301 FetchSeqUnit::suspendThread(ThreadID tid)
302 {
303 deactivateThread(tid);
304 }
305
306 void
307 FetchSeqUnit::trap(Fault fault, ThreadID tid, DynInstPtr inst)
308 {
309 pcValid[tid] = true;
310 pc[tid] = cpu->pcState(tid);
311 DPRINTF(InOrderFetchSeq, "[tid:%i]: Trap updating to PC: "
312 "%s.\n", tid, pc[tid]);
313 }
314
315 void
316 FetchSeqUnit::updateAfterContextSwitch(DynInstPtr inst, ThreadID tid)
317 {
318 pcValid[tid] = true;
319
320 if (cpu->thread[tid]->lastGradIsBranch) {
321 /** This function assumes that the instruction causing the context
322 * switch was right after the branch. Thus, if it's not, then
323 * we are updating incorrectly here
324 */
325 assert(cpu->nextInstAddr(tid) == inst->instAddr());
326 pc[tid] = cpu->thread[tid]->lastBranchPC;
327 } else {
328 pc[tid] = inst->pcState();
329 }
330 assert(inst->staticInst);
331 advancePC(pc[tid], inst->staticInst);
332
333 DPRINTF(InOrderFetchSeq, "[tid:%i]: Updating PCs due to Context Switch."
334 "Assigning PC: %s.\n", tid, pc[tid]);
335 }