inorder: implement separate fetch unit
[gem5.git] / src / cpu / inorder / resources / fetch_unit.cc
1 /*
2 * Copyright (c) 2011 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/fetch_unit.hh"
41 #include "cpu/inorder/pipeline_traits.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "mem/request.hh"
45
46 using namespace std;
47 using namespace TheISA;
48 using namespace ThePipeline;
49
50 FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
51 int res_latency, InOrderCPU *_cpu,
52 ThePipeline::Params *params)
53 : CacheUnit(res_name, res_id, res_width, res_latency, _cpu,
54 params)
55 { }
56
57 int
58 FetchUnit::getSlot(DynInstPtr inst)
59 {
60 if (tlbBlocked[inst->threadNumber]) {
61 return -1;
62 }
63
64 if (!inst->validMemAddr()) {
65 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
66 "cache access\n", inst->readTid(), inst->seqNum);
67 }
68
69 int new_slot = Resource::getSlot(inst);
70
71 if (new_slot == -1)
72 return -1;
73
74 inst->memTime = curTick();
75 return new_slot;
76 }
77
78 void
79 FetchUnit::removeAddrDependency(DynInstPtr inst)
80 {
81 inst->unsetMemAddr();
82 }
83
84 ResReqPtr
85 FetchUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
86 int slot_num, unsigned cmd)
87 {
88 ScheduleEntry* sched_entry = inst->resSched.top();
89
90 if (!inst->validMemAddr()) {
91 panic("Mem. Addr. must be set before requesting cache access\n");
92 }
93
94 MemCmd::Command pkt_cmd;
95
96 switch (sched_entry->cmd)
97 {
98 case InitiateFetch:
99 pkt_cmd = MemCmd::ReadReq;
100
101 DPRINTF(InOrderCachePort,
102 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
103 inst->readTid(), inst->seqNum, inst->getMemAddr());
104 break;
105
106 default:
107 panic("%i: Unexpected request type (%i) to %s", curTick(),
108 sched_entry->cmd, name());
109 }
110
111 return new CacheRequest(this, inst, stage_num, id, slot_num,
112 sched_entry->cmd, 0, pkt_cmd,
113 0/*flags*/, this->cpu->readCpuId(),
114 inst->resSched.top()->idx);
115 }
116
117 void
118 FetchUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
119 int acc_size, int flags)
120 {
121 ThreadID tid = inst->readTid();
122 Addr aligned_addr = inst->getMemAddr();
123
124 inst->fetchMemReq =
125 new Request(inst->readTid(), aligned_addr, acc_size, flags,
126 inst->instAddr(), cpu->readCpuId(), inst->readTid());
127
128 cache_req->memReq = inst->fetchMemReq;
129 }
130
131
132 void
133 FetchUnit::execute(int slot_num)
134 {
135 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
136 assert(cache_req);
137
138 if (cachePortBlocked) {
139 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
140 cache_req->setCompleted(false);
141 return;
142 }
143
144 DynInstPtr inst = cache_req->inst;
145 #if TRACING_ON
146 ThreadID tid = inst->readTid();
147 int seq_num = inst->seqNum;
148 std::string acc_type = "write";
149 #endif
150
151 cache_req->fault = NoFault;
152
153 switch (cache_req->cmd)
154 {
155 case InitiateFetch:
156 {
157 doTLBAccess(inst, cache_req, cacheBlkSize, 0, TheISA::TLB::Execute);
158
159 if (cache_req->fault == NoFault) {
160 DPRINTF(InOrderCachePort,
161 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
162 tid, name(), cache_req->inst->getMemAddr());
163
164 cache_req->reqData = new uint8_t[cacheBlksize];
165
166 inst->setCurResSlot(slot_num);
167
168 doCacheAccess(inst);
169 }
170
171 break;
172 }
173
174 case CompleteFetch:
175 if (cache_req->isMemAccComplete()) {
176 DPRINTF(InOrderCachePort,
177 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
178 tid, inst->seqNum);
179
180
181 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
182 tid, seq_num,
183 inst->staticInst->disassemble(inst->instAddr()));
184
185 removeAddrDependency(inst);
186
187 delete cache_req->dataPkt;
188
189 // Do not stall and switch threads for fetch... for now..
190 // TODO: We need to detect cache misses for latencies > 1
191 // cache_req->setMemStall(false);
192
193 cache_req->done();
194 } else {
195 DPRINTF(InOrderCachePort,
196 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
197 tid, inst->seqNum);
198 DPRINTF(InOrderStall,
199 "STALL: [tid:%i]: Fetch miss from %08p\n",
200 tid, cache_req->inst->instAddr());
201 cache_req->setCompleted(false);
202 //cache_req->setMemStall(true);
203 }
204 break;
205
206 default:
207 fatal("Unrecognized command to %s", resName);
208 }
209 }
210
211 void
212 FetchUnit::processCacheCompletion(PacketPtr pkt)
213 {
214 // Cast to correct packet type
215 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
216
217 assert(cache_pkt);
218
219 if (cache_pkt->cacheReq->isSquashed()) {
220 DPRINTF(InOrderCachePort,
221 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
222 cache_pkt->cacheReq->getInst()->readTid(),
223 cache_pkt->cacheReq->getInst()->seqNum);
224 DPRINTF(RefCount,
225 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
226 cache_pkt->cacheReq->getTid(),
227 cache_pkt->cacheReq->seqNum);
228
229 cache_pkt->cacheReq->done();
230 delete cache_pkt;
231
232 cpu->wakeCPU();
233
234 return;
235 }
236
237 DPRINTF(InOrderCachePort,
238 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
239 cache_pkt->cacheReq->getInst()->readTid(),
240 cache_pkt->cacheReq->getInst()->seqNum,
241 cache_pkt->cacheReq->getInst()->getMemAddr());
242
243 // Cast to correct request type
244 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
245 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
246
247 if (!cache_req) {
248 panic("[tid:%u]: [sn:%i]: Can't find slot for cache access to "
249 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
250 cache_pkt->cacheReq->getInst()->seqNum,
251 cache_pkt->cacheReq->getInst()->getMemAddr());
252 }
253
254 assert(cache_req);
255
256
257 // Get resource request info
258 unsigned stage_num = cache_req->getStageNum();
259 DynInstPtr inst = cache_req->inst;
260 ThreadID tid = cache_req->inst->readTid();
261
262 if (!cache_req->isSquashed()) {
263 assert(inst->resSched.top()->cmd == CompleteFetch);
264
265 DPRINTF(InOrderCachePort,
266 "[tid:%u]: [sn:%i]: Processing fetch access\n",
267 tid, inst->seqNum);
268
269 // NOTE: This is only allowing a thread to fetch one line
270 // at a time. Re-examine when/if prefetching
271 // gets implemented.
272 // memcpy(fetchData[tid], cache_pkt->getPtr<uint8_t>(),
273 // cache_pkt->getSize());
274
275 // Get the instruction from the array of the cache line.
276 // @todo: update this
277 ExtMachInst ext_inst;
278 StaticInstPtr staticInst = NULL;
279 TheISA::PCState instPC = inst->pcState();
280 MachInst mach_inst =
281 TheISA::gtoh(*reinterpret_cast<TheISA::MachInst *>
282 (cache_pkt->getPtr<uint8_t>()));
283
284 predecoder.setTC(cpu->thread[tid]->getTC());
285 predecoder.moreBytes(instPC, inst->instAddr(), mach_inst);
286 ext_inst = predecoder.getExtMachInst(instPC);
287 inst->pcState(instPC);
288
289 inst->setMachInst(ext_inst);
290
291 // Set Up More TraceData info
292 if (inst->traceData) {
293 inst->traceData->setStaticInst(inst->staticInst);
294 inst->traceData->setPC(instPC);
295 }
296
297 cache_req->setMemAccPending(false);
298 cache_req->setMemAccCompleted();
299
300 if (cache_req->isMemStall() &&
301 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
302 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
303 tid);
304
305 cpu->activateContext(tid);
306
307 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
308 "miss.\n", tid);
309 }
310
311 // Wake up the CPU (if it went to sleep and was waiting on this
312 // completion event).
313 cpu->wakeCPU();
314
315 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
316 tid, cpu->pipelineStage[stage_num]->name());
317
318 cpu->switchToActive(stage_num);
319 } else {
320 DPRINTF(InOrderCachePort,
321 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
322 tid, cache_req->inst->instAddr());
323 cache_req->setMemAccCompleted();
324 }
325 }
326
327 void
328 FetchUnit::squash(DynInstPtr inst, int stage_num,
329 InstSeqNum squash_seq_num, ThreadID tid)
330 {
331 CacheUnit::squash(inst, stage_num, squash_seq_num, tid);
332 }
333