inorder: dont handle multiple faults on same cycle
[gem5.git] / src / cpu / inorder / resources / fetch_unit.cc
1 /*
2 * Copyright (c) 2011 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <list>
33 #include <vector>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/predecoder.hh"
38 #include "arch/utility.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/resources/fetch_unit.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "debug/Activity.hh"
46 #include "debug/InOrderCachePort.hh"
47 #include "debug/InOrderStall.hh"
48 #include "debug/RefCount.hh"
49 #include "debug/ThreadModel.hh"
50 #include "mem/request.hh"
51
52 using namespace std;
53 using namespace TheISA;
54 using namespace ThePipeline;
55
56 FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
57 int res_latency, InOrderCPU *_cpu,
58 ThePipeline::Params *params)
59 : CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
60 instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize),
61 predecoder(NULL)
62 { }
63
64 FetchUnit::~FetchUnit()
65 {
66 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
67 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
68 while (fetch_it != end_it) {
69 delete (*fetch_it)->block;
70 delete *fetch_it;
71 fetch_it++;
72 }
73 fetchBuffer.clear();
74
75
76 std::list<FetchBlock*>::iterator pend_it = pendingFetch.begin();
77 std::list<FetchBlock*>::iterator pend_end = pendingFetch.end();
78 while (pend_it != pend_end) {
79 if ((*pend_it)->block) {
80 delete (*pend_it)->block;
81 }
82
83 delete *pend_it;
84 pend_it++;
85 }
86 pendingFetch.clear();
87 }
88
89 void
90 FetchUnit::createMachInst(std::list<FetchBlock*>::iterator fetch_it,
91 DynInstPtr inst)
92 {
93 ExtMachInst ext_inst;
94 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
95 Addr fetch_addr = inst->getMemAddr();
96 unsigned fetch_offset = (fetch_addr - block_addr) / instSize;
97 ThreadID tid = inst->readTid();
98 TheISA::PCState instPC = inst->pcState();
99
100
101 DPRINTF(InOrderCachePort, "Creating instruction [sn:%i] w/fetch data @"
102 "addr:%08p block:%08p\n", inst->seqNum, fetch_addr, block_addr);
103
104 assert((*fetch_it)->valid);
105
106 TheISA::MachInst *fetchInsts =
107 reinterpret_cast<TheISA::MachInst *>((*fetch_it)->block);
108
109 MachInst mach_inst =
110 TheISA::gtoh(fetchInsts[fetch_offset]);
111
112 predecoder.setTC(cpu->thread[tid]->getTC());
113 predecoder.moreBytes(instPC, inst->instAddr(), mach_inst);
114 assert(predecoder.extMachInstReady());
115 ext_inst = predecoder.getExtMachInst(instPC);
116
117 inst->pcState(instPC);
118 inst->setMachInst(ext_inst);
119 }
120
121 void
122 FetchUnit::removeAddrDependency(DynInstPtr inst)
123 {
124 inst->unsetMemAddr();
125 }
126
127 ResReqPtr
128 FetchUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
129 int slot_num, unsigned cmd)
130 {
131 ScheduleEntry* sched_entry = *inst->curSkedEntry;
132 CacheRequest* cache_req = dynamic_cast<CacheRequest*>(reqs[slot_num]);
133
134 if (!inst->validMemAddr()) {
135 panic("Mem. Addr. must be set before requesting cache access\n");
136 }
137
138 assert(sched_entry->cmd == InitiateFetch);
139
140 DPRINTF(InOrderCachePort,
141 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
142 inst->readTid(), inst->seqNum, inst->getMemAddr());
143
144 cache_req->setRequest(inst, stage_num, id, slot_num,
145 sched_entry->cmd, MemCmd::ReadReq,
146 inst->curSkedEntry->idx);
147
148 return cache_req;
149 }
150
151 void
152 FetchUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
153 int acc_size, int flags)
154 {
155 ThreadID tid = inst->readTid();
156 Addr aligned_addr = cacheBlockAlign(inst->getMemAddr());
157 if (cache_req->memReq == NULL) {
158 cache_req->memReq =
159 new Request(tid, aligned_addr, acc_size, flags,
160 inst->instAddr(), cpu->readCpuId(), tid);
161 DPRINTF(InOrderCachePort, "[sn:%i] Created memReq @%x, ->%x\n",
162 inst->seqNum, &cache_req->memReq, cache_req->memReq);
163 }
164 }
165
166 std::list<FetchUnit::FetchBlock*>::iterator
167 FetchUnit::findBlock(std::list<FetchBlock*> &fetch_blocks, int asid,
168 Addr block_addr)
169 {
170 std::list<FetchBlock*>::iterator fetch_it = fetch_blocks.begin();
171 std::list<FetchBlock*>::iterator end_it = fetch_blocks.end();
172
173 while (fetch_it != end_it) {
174 if ((*fetch_it)->asid == asid &&
175 (*fetch_it)->addr == block_addr) {
176 return fetch_it;
177 }
178
179 fetch_it++;
180 }
181
182 return fetch_it;
183 }
184
185 std::list<FetchUnit::FetchBlock*>::iterator
186 FetchUnit::findReplacementBlock()
187 {
188 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
189 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
190
191 while (fetch_it != end_it) {
192 if ((*fetch_it)->cnt == 0) {
193 return fetch_it;
194 } else {
195 DPRINTF(InOrderCachePort, "Block %08p has %i insts pending.\n",
196 (*fetch_it)->addr, (*fetch_it)->cnt);
197 }
198 fetch_it++;
199 }
200
201 return fetch_it;
202 }
203
204 void
205 FetchUnit::markBlockUsed(std::list<FetchBlock*>::iterator block_it)
206 {
207 // Move block from whatever location it is in fetch buffer
208 // to the back (represents most-recently-used location)
209 if (block_it != fetchBuffer.end()) {
210 FetchBlock *mru_blk = *block_it;
211 fetchBuffer.erase(block_it);
212 fetchBuffer.push_back(mru_blk);
213 }
214 }
215
216 int
217 FetchUnit::blocksInUse()
218 {
219 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
220 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
221
222 int cnt = 0;
223 while (fetch_it != end_it) {
224 if ((*fetch_it)->cnt > 0)
225 cnt++;
226
227 fetch_it++;
228 }
229
230 return cnt;
231 }
232
233 void
234 FetchUnit::execute(int slot_num)
235 {
236 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
237 assert(cache_req);
238
239 if (cachePortBlocked && cache_req->cmd == InitiateFetch) {
240 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
241 cache_req->done(false);
242 return;
243 }
244
245 DynInstPtr inst = cache_req->inst;
246 ThreadID tid = inst->readTid();
247 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
248 int asid = cpu->asid[tid];
249
250 if (inst->fault != NoFault) {
251 DPRINTF(InOrderCachePort,
252 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
253 "next stage.\n", tid, inst->seqNum, inst->fault->name(),
254 cacheBlockAlign(inst->getMemAddr()));
255 finishCacheUnitReq(inst, cache_req);
256 return;
257 }
258
259 switch (cache_req->cmd)
260 {
261 case InitiateFetch:
262 {
263 // Check to see if we've already got this request buffered
264 // or pending to be buffered
265 bool do_fetch = true;
266 int total_pending = pendingFetch.size() + blocksInUse();
267
268 std::list<FetchBlock*>::iterator pending_it;
269 pending_it = findBlock(pendingFetch, asid, block_addr);
270 if (pending_it != pendingFetch.end()) {
271 (*pending_it)->cnt++;
272 do_fetch = false;
273
274 DPRINTF(InOrderCachePort, "%08p is a pending fetch block "
275 "(pending:%i).\n", block_addr,
276 (*pending_it)->cnt);
277 } else if (total_pending < fetchBuffSize) {
278 std::list<FetchBlock*>::iterator buff_it;
279 buff_it = findBlock(fetchBuffer, asid, block_addr);
280 if (buff_it != fetchBuffer.end()) {
281 (*buff_it)->cnt++;
282 do_fetch = false;
283
284 DPRINTF(InOrderCachePort, "%08p is in fetch buffer "
285 "(pending:%i).\n", block_addr, (*buff_it)->cnt);
286 }
287 }
288
289 if (!do_fetch) {
290 DPRINTF(InOrderCachePort, "Inst. [sn:%i] marked to be filled "
291 "through fetch buffer.\n", inst->seqNum);
292 cache_req->fetchBufferFill = true;
293 cache_req->setCompleted(true);
294 return;
295 }
296
297 // Check to see if there is room in the fetchbuffer for this instruction.
298 // If not, block this request.
299 if (total_pending >= fetchBuffSize) {
300 DPRINTF(InOrderCachePort, "No room available in fetch buffer.\n");
301 cache_req->done(false);
302 return;
303 }
304
305 doTLBAccess(inst, cache_req, cacheBlkSize, Request::INST_FETCH, TheISA::TLB::Execute);
306
307 if (inst->fault == NoFault) {
308 DPRINTF(InOrderCachePort,
309 "[tid:%u]: Initiating fetch access to %s for "
310 "addr:%#x (block:%#x)\n", tid, name(),
311 cache_req->inst->getMemAddr(), block_addr);
312
313 cache_req->reqData = new uint8_t[cacheBlkSize];
314
315 inst->setCurResSlot(slot_num);
316
317 doCacheAccess(inst);
318
319 if (cache_req->isMemAccPending()) {
320 pendingFetch.push_back(new FetchBlock(asid, block_addr));
321
322 // mark replacement block
323 }
324 }
325
326 break;
327 }
328
329 case CompleteFetch:
330 if (inst->fault != NoFault) {
331 DPRINTF(InOrderCachePort,
332 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
333 "next stage.\n", tid, inst->seqNum, inst->fault->name(),
334 inst->getMemAddr());
335 finishCacheUnitReq(inst, cache_req);
336 return;
337 }
338
339 if (cache_req->fetchBufferFill) {
340 // Block request if it's depending on a previous fetch, but it hasnt made it yet
341 std::list<FetchBlock*>::iterator fetch_it = findBlock(fetchBuffer, asid, block_addr);
342 if (fetch_it == fetchBuffer.end()) {
343 DPRINTF(InOrderCachePort, "%#x not available yet\n",
344 block_addr);
345 cache_req->setCompleted(false);
346 return;
347 }
348
349 // Make New Instruction
350 createMachInst(fetch_it, inst);
351 if (inst->traceData) {
352 inst->traceData->setStaticInst(inst->staticInst);
353 inst->traceData->setPC(inst->pcState());
354 }
355
356 // FetchBuffer Book-Keeping
357 (*fetch_it)->cnt--;
358 assert((*fetch_it)->cnt >= 0);
359 markBlockUsed(fetch_it);
360
361 cache_req->done();
362 return;
363 }
364
365 if (cache_req->isMemAccComplete()) {
366 if (fetchBuffer.size() >= fetchBuffSize) {
367 // If there is no replacement block, then we'll just have
368 // to wait till that gets cleared before satisfying the fetch
369 // for this instruction
370 std::list<FetchBlock*>::iterator repl_it =
371 findReplacementBlock();
372 if (repl_it == fetchBuffer.end()) {
373 DPRINTF(InOrderCachePort, "Unable to find replacement block"
374 " and complete fetch.\n");
375 cache_req->setCompleted(false);
376 return;
377 }
378
379 delete [] (*repl_it)->block;
380 delete *repl_it;
381 fetchBuffer.erase(repl_it);
382 }
383
384 DPRINTF(InOrderCachePort,
385 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
386 tid, inst->seqNum);
387
388 // Make New Instruction
389 std::list<FetchBlock*>::iterator fetch_it =
390 findBlock(pendingFetch, asid, block_addr);
391
392 assert(fetch_it != pendingFetch.end());
393 assert((*fetch_it)->valid);
394
395 createMachInst(fetch_it, inst);
396 if (inst->traceData) {
397 inst->traceData->setStaticInst(inst->staticInst);
398 inst->traceData->setPC(inst->pcState());
399 }
400
401
402 // Update instructions waiting on new fetch block
403 FetchBlock *new_block = (*fetch_it);
404 new_block->cnt--;
405 assert(new_block->cnt >= 0);
406
407 // Finally, update FetchBuffer w/Pending Block into the
408 // MRU location
409 pendingFetch.erase(fetch_it);
410 fetchBuffer.push_back(new_block);
411
412 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
413 tid, inst->seqNum,
414 inst->staticInst->disassemble(inst->instAddr()));
415
416 inst->unsetMemAddr();
417
418 cache_req->done();
419 } else {
420 DPRINTF(InOrderCachePort,
421 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
422 tid, inst->seqNum);
423 DPRINTF(InOrderStall,
424 "STALL: [tid:%i]: Fetch miss from %08p\n",
425 tid, cache_req->inst->instAddr());
426 cache_req->setCompleted(false);
427 // NOTE: For SwitchOnCacheMiss ThreadModel, we *don't* switch on
428 // fetch miss, but we could ...
429 // cache_req->setMemStall(true);
430 }
431 break;
432
433 default:
434 fatal("Unrecognized command to %s", resName);
435 }
436 }
437
438 void
439 FetchUnit::processCacheCompletion(PacketPtr pkt)
440 {
441 // Cast to correct packet type
442 // @todo: use pkt Sender state here to be consistent with other
443 // cpu models
444 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
445 assert(cache_pkt);
446
447 DPRINTF(InOrderCachePort, "Finished request for %x\n",
448 cache_pkt->getAddr());
449
450 if (processSquash(cache_pkt))
451 return;
452
453 Addr block_addr = cacheBlockAlign(cache_pkt->cacheReq->
454 getInst()->getMemAddr());
455
456 DPRINTF(InOrderCachePort,
457 "[tid:%u]: [sn:%i]: Waking from fetch access to addr:%#x(phys:%#x), size:%i\n",
458 cache_pkt->cacheReq->getInst()->readTid(),
459 cache_pkt->cacheReq->getInst()->seqNum,
460 block_addr, cache_pkt->getAddr(), cache_pkt->getSize());
461
462 // Cast to correct request type
463 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
464 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
465
466 if (!cache_req) {
467 panic("[tid:%u]: [sn:%i]: Can't find slot for fetch access to "
468 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
469 cache_pkt->cacheReq->getInst()->seqNum,
470 block_addr);
471 }
472
473 // Get resource request info
474 unsigned stage_num = cache_req->getStageNum();
475 DynInstPtr inst = cache_req->inst;
476 ThreadID tid = cache_req->inst->readTid();
477 short asid = cpu->asid[tid];
478
479 assert(!cache_req->isSquashed());
480 assert(inst->curSkedEntry->cmd == CompleteFetch);
481
482 DPRINTF(InOrderCachePort,
483 "[tid:%u]: [sn:%i]: Processing fetch access for block %#x\n",
484 tid, inst->seqNum, block_addr);
485
486 std::list<FetchBlock*>::iterator pend_it = findBlock(pendingFetch, asid,
487 block_addr);
488 assert(pend_it != pendingFetch.end());
489
490 // Copy Data to pendingFetch queue...
491 (*pend_it)->block = new uint8_t[cacheBlkSize];
492 memcpy((*pend_it)->block, cache_pkt->getPtr<uint8_t>(), cacheBlkSize);
493 (*pend_it)->valid = true;
494
495 cache_req->setMemAccPending(false);
496 cache_req->setMemAccCompleted();
497
498 if (cache_req->isMemStall() &&
499 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
500 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
501 tid);
502
503 cpu->activateContext(tid);
504
505 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
506 "miss.\n", tid);
507 }
508
509 // Wake up the CPU (if it went to sleep and was waiting on this
510 // completion event).
511 cpu->wakeCPU();
512
513 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
514 tid, cpu->pipelineStage[stage_num]->name());
515
516 cpu->switchToActive(stage_num);
517 }
518
519 void
520 FetchUnit::squashCacheRequest(CacheReqPtr req_ptr)
521 {
522 DynInstPtr inst = req_ptr->getInst();
523 ThreadID tid = inst->readTid();
524 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
525 int asid = cpu->asid[tid];
526
527 // Check Fetch Buffer (or pending fetch) for this block and
528 // update pending counts
529 std::list<FetchBlock*>::iterator buff_it = findBlock(fetchBuffer,
530 asid,
531 block_addr);
532 if (buff_it != fetchBuffer.end()) {
533 (*buff_it)->cnt--;
534 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Access "
535 "for Fetch Buffer block %08p (cnt=%i)\n", inst->seqNum,
536 block_addr, (*buff_it)->cnt);
537 assert((*buff_it)->cnt >= 0);
538 } else {
539 std::list<FetchBlock*>::iterator block_it = findBlock(pendingFetch,
540 asid,
541 block_addr);
542 if (block_it != pendingFetch.end()) {
543 (*block_it)->cnt--;
544 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Access "
545 "for Pending Buffer Block %08p (cnt=%i)\n",
546 inst->seqNum,
547 block_addr, (*block_it)->cnt);
548 assert((*block_it)->cnt >= 0);
549 if ((*block_it)->cnt == 0) {
550 if ((*block_it)->block) {
551 delete [] (*block_it)->block;
552 }
553 delete *block_it;
554 pendingFetch.erase(block_it);
555 }
556 }
557 }
558
559 CacheUnit::squashCacheRequest(req_ptr);
560 }
561
562 void
563 FetchUnit::trap(Fault fault, ThreadID tid, DynInstPtr inst)
564 {
565 //@todo: per thread?
566 predecoder.reset();
567 }