inorder: check for interrupts each tick
[gem5.git] / src / cpu / inorder / resources / fetch_seq_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include "config/the_isa.hh"
33 #include "cpu/inorder/resources/fetch_seq_unit.hh"
34 #include "cpu/inorder/resource_pool.hh"
35 #include "debug/InOrderFetchSeq.hh"
36 #include "debug/InOrderStall.hh"
37
38 using namespace std;
39 using namespace TheISA;
40 using namespace ThePipeline;
41
42 FetchSeqUnit::FetchSeqUnit(std::string res_name, int res_id, int res_width,
43 int res_latency, InOrderCPU *_cpu,
44 ThePipeline::Params *params)
45 : Resource(res_name, res_id, res_width, res_latency, _cpu),
46 instSize(sizeof(MachInst))
47 {
48 for (ThreadID tid = 0; tid < ThePipeline::MaxThreads; tid++) {
49 pcValid[tid] = false;
50 pcBlockStage[tid] = 0;
51
52 //@todo: Use CPU's squashSeqNum here instead of maintaining our own
53 // state
54 squashSeqNum[tid] = (InstSeqNum)-1;
55 lastSquashCycle[tid] = 0;
56 }
57 }
58
59 FetchSeqUnit::~FetchSeqUnit()
60 {
61 delete [] resourceEvent;
62 }
63
64 void
65 FetchSeqUnit::init()
66 {
67 resourceEvent = new FetchSeqEvent[width];
68
69 for (int i = 0; i < width; i++) {
70 reqs[i] = new ResourceRequest(this);
71 }
72
73 initSlots();
74 }
75
76 void
77 FetchSeqUnit::execute(int slot_num)
78 {
79 ResourceRequest* fs_req = reqs[slot_num];
80 DynInstPtr inst = fs_req->inst;
81 ThreadID tid = inst->readTid();
82 int stage_num = fs_req->getStageNum();
83
84 switch (fs_req->cmd)
85 {
86 case AssignNextPC:
87 {
88 DPRINTF(InOrderFetchSeq, "[tid:%i]: Current PC is %s\n", tid,
89 pc[tid]);
90
91 if (pcValid[tid]) {
92 inst->pcState(pc[tid]);
93 inst->setMemAddr(pc[tid].instAddr());
94
95 // Advance to next PC (typically PC + 4)
96 pc[tid].advance();
97
98 inst->setSeqNum(cpu->getAndIncrementInstSeq(tid));
99
100 DPRINTF(InOrderFetchSeq, "[tid:%i]: Assigning [sn:%i] to "
101 "PC %s\n", tid, inst->seqNum, inst->pcState());
102
103 fs_req->done();
104 } else {
105 DPRINTF(InOrderStall, "STALL: [tid:%i]: NPC not valid\n", tid);
106 fs_req->done(false);
107 }
108 }
109 break;
110
111 case UpdateTargetPC:
112 {
113 assert(!inst->isCondDelaySlot() &&
114 "Not Handling Conditional Delay Slot");
115
116 if (inst->isControl()) {
117 if (inst->isReturn() && !inst->predTaken()) {
118 // If it's a return, then we must wait for resolved address.
119 // The Predictor will mark a return a false as "not taken"
120 // if there is no RAS entry
121 DPRINTF(InOrderFetchSeq, "[tid:%d]: Setting block signal "
122 "for stage %i.\n",
123 tid, stage_num);
124 cpu->pipelineStage[stage_num]->
125 toPrevStages->stageBlock[stage_num][tid] = true;
126 pcValid[tid] = false;
127 pcBlockStage[tid] = stage_num;
128 } else if (inst->predTaken()) {
129 // Taken Control
130 inst->setSquashInfo(stage_num);
131 setupSquash(inst, stage_num, tid);
132
133 DPRINTF(InOrderFetchSeq, "[tid:%i] Setting up squash to "
134 "start from stage %i, after [sn:%i].\n",
135 tid, stage_num, inst->squashSeqNum);
136 }
137 } else {
138 DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i]: Ignoring branch "
139 "target update since then is not a control "
140 "instruction.\n", tid, inst->seqNum);
141 }
142
143 fs_req->done();
144 }
145 break;
146
147 default:
148 fatal("Unrecognized command to %s", resName);
149 }
150 }
151
152 void
153 FetchSeqUnit::squash(DynInstPtr inst, int squash_stage,
154 InstSeqNum squash_seq_num, ThreadID tid)
155 {
156 DPRINTF(InOrderFetchSeq, "[tid:%i]: Updating due to squash from %s (%s) "
157 "stage %i.\n", tid, inst->instName(), inst->pcState(),
158 squash_stage);
159
160 if (lastSquashCycle[tid] == curTick() &&
161 squashSeqNum[tid] <= squash_seq_num) {
162 DPRINTF(InOrderFetchSeq, "[tid:%i]: Ignoring squash from stage %i, "
163 "since there is an outstanding squash that is older.\n",
164 tid, squash_stage);
165 } else {
166 squashSeqNum[tid] = squash_seq_num;
167 lastSquashCycle[tid] = curTick();
168
169 if (inst->staticInst) {
170 if (inst->fault != NoFault) {
171 // A Trap Caused This Fault and will update the pc state
172 // when done trapping
173 DPRINTF(InOrderFetchSeq, "[tid:%i] Blocking due to fault @ "
174 "[sn:%i].%s %s \n", inst->seqNum,
175 inst->instName(), inst->pcState());
176 pcValid[tid] = false;
177 } else {
178 TheISA::PCState nextPC;
179 assert(inst->staticInst);
180 if (inst->isControl()) {
181 nextPC = inst->readPredTarg();
182
183 // If we are already fetching this PC then advance to next PC
184 // =======
185 // This should handle ISAs w/delay slots and annulled delay
186 // slots to figure out which is the next PC to fetch after
187 // a mispredict
188 DynInstPtr bdelay_inst = NULL;
189 ListIt bdelay_it;
190 if (inst->onInstList) {
191 bdelay_it = inst->getInstListIt();
192 bdelay_it++;
193 } else {
194 InstSeqNum branch_delay_num = inst->seqNum + 1;
195 bdelay_it = cpu->findInst(branch_delay_num, tid);
196 }
197
198 if (bdelay_it != cpu->instList[tid].end()) {
199 bdelay_inst = (*bdelay_it);
200 }
201
202 if (bdelay_inst) {
203 DPRINTF(Resource, "Evaluating %s v. %s\n",
204 bdelay_inst->pc, nextPC);
205
206 if (bdelay_inst->pc.instAddr() == nextPC.instAddr()) {
207 bdelay_inst->pc = nextPC;
208 advancePC(nextPC, inst->staticInst);
209 DPRINTF(Resource, "Advanced PC to %s\n", nextPC);
210 }
211 }
212 } else {
213 nextPC = inst->pcState();
214 advancePC(nextPC, inst->staticInst);
215 }
216
217
218 DPRINTF(InOrderFetchSeq, "[tid:%i]: Setting PC to %s.\n",
219 tid, nextPC);
220 pc[tid] = nextPC;
221
222 // Unblock Any Stages Waiting for this information to be updated ...
223 if (!pcValid[tid]) {
224 DPRINTF(InOrderFetchSeq, "[tid:%d]: Setting unblock signal "
225 "for stage %i.\n",
226 tid, pcBlockStage[tid]);
227
228 // Need to use "fromNextStages" instead of "toPrevStages"
229 // because the timebuffer will have already have advanced
230 // in the tick function and this squash function will happen after
231 // the tick
232 cpu->pipelineStage[pcBlockStage[tid]]->
233 fromNextStages->stageUnblock[pcBlockStage[tid]][tid] = true;
234 }
235
236 pcValid[tid] = true;
237 }
238 }
239 }
240
241 Resource::squash(inst, squash_stage, squash_seq_num, tid);
242 }
243
244 FetchSeqUnit::FetchSeqEvent::FetchSeqEvent()
245 : ResourceEvent()
246 { }
247
248 void
249 FetchSeqUnit::FetchSeqEvent::process()
250 {
251 FetchSeqUnit* fs_res = dynamic_cast<FetchSeqUnit*>(resource);
252 assert(fs_res);
253
254 for (int i = 0; i < MaxThreads; i++) {
255 fs_res->pc[i] = fs_res->cpu->pcState(i);
256 DPRINTF(InOrderFetchSeq, "[tid:%i]: Setting PC: %s.\n",
257 fs_res->pc[i]);
258
259 fs_res->pcValid[i] = true;
260 }
261 }
262
263
264 void
265 FetchSeqUnit::activateThread(ThreadID tid)
266 {
267 pcValid[tid] = true;
268
269 pc[tid] = cpu->pcState(tid);
270
271 cpu->fetchPriorityList.push_back(tid);
272
273 DPRINTF(InOrderFetchSeq, "[tid:%i]: Reading PC: %s.\n",
274 tid, pc[tid]);
275 }
276
277 void
278 FetchSeqUnit::deactivateThread(ThreadID tid)
279 {
280 pcValid[tid] = false;
281 pcBlockStage[tid] = 0;
282
283 squashSeqNum[tid] = (InstSeqNum)-1;
284 lastSquashCycle[tid] = 0;
285
286 list<ThreadID>::iterator thread_it = find(cpu->fetchPriorityList.begin(),
287 cpu->fetchPriorityList.end(),
288 tid);
289
290 if (thread_it != cpu->fetchPriorityList.end())
291 cpu->fetchPriorityList.erase(thread_it);
292 }
293
294 void
295 FetchSeqUnit::suspendThread(ThreadID tid)
296 {
297 deactivateThread(tid);
298 }
299
300 void
301 FetchSeqUnit::trap(Fault fault, ThreadID tid, DynInstPtr inst)
302 {
303 pcValid[tid] = true;
304 pc[tid] = cpu->pcState(tid);
305 DPRINTF(Fault, "[tid:%i]: Trap updating to PC: "
306 "%s.\n", tid, pc[tid]);
307 DPRINTF(InOrderFetchSeq, "[tid:%i]: Trap updating to PC: "
308 "%s.\n", tid, pc[tid]);
309 }
310
311 void
312 FetchSeqUnit::updateAfterContextSwitch(DynInstPtr inst, ThreadID tid)
313 {
314 pcValid[tid] = true;
315
316 if (cpu->thread[tid]->lastGradIsBranch) {
317 /** This function assumes that the instruction causing the context
318 * switch was right after the branch. Thus, if it's not, then
319 * we are updating incorrectly here
320 */
321 assert(cpu->nextInstAddr(tid) == inst->instAddr());
322 pc[tid] = cpu->thread[tid]->lastBranchPC;
323 } else {
324 pc[tid] = inst->pcState();
325 }
326 assert(inst->staticInst);
327 advancePC(pc[tid], inst->staticInst);
328
329 DPRINTF(InOrderFetchSeq, "[tid:%i]: Updating PCs due to Context Switch."
330 "Assigning PC: %s.\n", tid, pc[tid]);
331 }