inorder: clear fetchbuffer on traps
[gem5.git] / src / cpu / inorder / resources / fetch_unit.cc
1 /*
2 * Copyright (c) 2011 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <list>
33 #include <vector>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/predecoder.hh"
38 #include "arch/utility.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/resources/fetch_unit.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "debug/Activity.hh"
46 #include "debug/InOrderCachePort.hh"
47 #include "debug/InOrderStall.hh"
48 #include "debug/RefCount.hh"
49 #include "debug/ThreadModel.hh"
50 #include "mem/request.hh"
51
52 using namespace std;
53 using namespace TheISA;
54 using namespace ThePipeline;
55
56 FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
57 int res_latency, InOrderCPU *_cpu,
58 ThePipeline::Params *params)
59 : CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
60 instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize)
61 {
62 for (int tid = 0; tid < MaxThreads; tid++)
63 predecoder[tid] = new Predecoder(NULL);
64 }
65
66 FetchUnit::~FetchUnit()
67 {
68 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
69 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
70 while (fetch_it != end_it) {
71 delete (*fetch_it)->block;
72 delete *fetch_it;
73 fetch_it++;
74 }
75 fetchBuffer.clear();
76
77
78 std::list<FetchBlock*>::iterator pend_it = pendingFetch.begin();
79 std::list<FetchBlock*>::iterator pend_end = pendingFetch.end();
80 while (pend_it != pend_end) {
81 if ((*pend_it)->block) {
82 delete (*pend_it)->block;
83 }
84
85 delete *pend_it;
86 pend_it++;
87 }
88 pendingFetch.clear();
89 }
90
91 void
92 FetchUnit::createMachInst(std::list<FetchBlock*>::iterator fetch_it,
93 DynInstPtr inst)
94 {
95 ExtMachInst ext_inst;
96 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
97 Addr fetch_addr = inst->getMemAddr();
98 unsigned fetch_offset = (fetch_addr - block_addr) / instSize;
99 ThreadID tid = inst->readTid();
100 TheISA::PCState instPC = inst->pcState();
101
102
103 DPRINTF(InOrderCachePort, "Creating instruction [sn:%i] w/fetch data @"
104 "addr:%08p block:%08p\n", inst->seqNum, fetch_addr, block_addr);
105
106 assert((*fetch_it)->valid);
107
108 TheISA::MachInst *fetchInsts =
109 reinterpret_cast<TheISA::MachInst *>((*fetch_it)->block);
110
111 MachInst mach_inst =
112 TheISA::gtoh(fetchInsts[fetch_offset]);
113
114 predecoder[tid]->setTC(cpu->thread[tid]->getTC());
115 predecoder[tid]->moreBytes(instPC, inst->instAddr(), mach_inst);
116 assert(predecoder[tid]->extMachInstReady());
117 ext_inst = predecoder[tid]->getExtMachInst(instPC);
118
119 inst->pcState(instPC);
120 inst->setMachInst(ext_inst);
121 }
122
123 void
124 FetchUnit::removeAddrDependency(DynInstPtr inst)
125 {
126 inst->unsetMemAddr();
127 }
128
129 ResReqPtr
130 FetchUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
131 int slot_num, unsigned cmd)
132 {
133 ScheduleEntry* sched_entry = *inst->curSkedEntry;
134 CacheRequest* cache_req = dynamic_cast<CacheRequest*>(reqs[slot_num]);
135
136 if (!inst->validMemAddr()) {
137 panic("Mem. Addr. must be set before requesting cache access\n");
138 }
139
140 assert(sched_entry->cmd == InitiateFetch);
141
142 DPRINTF(InOrderCachePort,
143 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
144 inst->readTid(), inst->seqNum, inst->getMemAddr());
145
146 cache_req->setRequest(inst, stage_num, id, slot_num,
147 sched_entry->cmd, MemCmd::ReadReq,
148 inst->curSkedEntry->idx);
149
150 return cache_req;
151 }
152
153 void
154 FetchUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
155 int acc_size, int flags)
156 {
157 ThreadID tid = inst->readTid();
158 Addr aligned_addr = cacheBlockAlign(inst->getMemAddr());
159 if (cache_req->memReq == NULL) {
160 cache_req->memReq =
161 new Request(tid, aligned_addr, acc_size, flags,
162 inst->instAddr(), cpu->readCpuId(), tid);
163 DPRINTF(InOrderCachePort, "[sn:%i] Created memReq @%x, ->%x\n",
164 inst->seqNum, &cache_req->memReq, cache_req->memReq);
165 }
166 }
167
168 std::list<FetchUnit::FetchBlock*>::iterator
169 FetchUnit::findBlock(std::list<FetchBlock*> &fetch_blocks, int asid,
170 Addr block_addr)
171 {
172 std::list<FetchBlock*>::iterator fetch_it = fetch_blocks.begin();
173 std::list<FetchBlock*>::iterator end_it = fetch_blocks.end();
174
175 while (fetch_it != end_it) {
176 if ((*fetch_it)->asid == asid &&
177 (*fetch_it)->addr == block_addr) {
178 return fetch_it;
179 }
180
181 fetch_it++;
182 }
183
184 return fetch_it;
185 }
186
187 std::list<FetchUnit::FetchBlock*>::iterator
188 FetchUnit::findReplacementBlock()
189 {
190 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
191 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
192
193 while (fetch_it != end_it) {
194 if ((*fetch_it)->cnt == 0) {
195 return fetch_it;
196 } else {
197 DPRINTF(InOrderCachePort, "Block %08p has %i insts pending.\n",
198 (*fetch_it)->addr, (*fetch_it)->cnt);
199 }
200 fetch_it++;
201 }
202
203 return fetch_it;
204 }
205
206 void
207 FetchUnit::markBlockUsed(std::list<FetchBlock*>::iterator block_it)
208 {
209 // Move block from whatever location it is in fetch buffer
210 // to the back (represents most-recently-used location)
211 if (block_it != fetchBuffer.end()) {
212 FetchBlock *mru_blk = *block_it;
213 fetchBuffer.erase(block_it);
214 fetchBuffer.push_back(mru_blk);
215 }
216 }
217
218 int
219 FetchUnit::blocksInUse()
220 {
221 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
222 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
223
224 int cnt = 0;
225 while (fetch_it != end_it) {
226 if ((*fetch_it)->cnt > 0)
227 cnt++;
228
229 fetch_it++;
230 }
231
232 return cnt;
233 }
234
235 void
236 FetchUnit::clearFetchBuffer()
237 {
238 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
239 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
240
241 while (fetch_it != end_it) {
242 if ((*fetch_it)->block) {
243 delete [] (*fetch_it)->block;
244 }
245 delete *fetch_it;
246 fetch_it++;
247 }
248 fetchBuffer.clear();
249 }
250
251 void
252 FetchUnit::execute(int slot_num)
253 {
254 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
255 assert(cache_req);
256
257 if (cachePortBlocked && cache_req->cmd == InitiateFetch) {
258 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
259 cache_req->done(false);
260 return;
261 }
262
263 DynInstPtr inst = cache_req->inst;
264 ThreadID tid = inst->readTid();
265 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
266 int asid = cpu->asid[tid];
267
268 if (inst->fault != NoFault) {
269 DPRINTF(InOrderCachePort,
270 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
271 "next stage.\n", tid, inst->seqNum, inst->fault->name(),
272 cacheBlockAlign(inst->getMemAddr()));
273 finishCacheUnitReq(inst, cache_req);
274 return;
275 }
276
277 switch (cache_req->cmd)
278 {
279 case InitiateFetch:
280 {
281 // Check to see if we've already got this request buffered
282 // or pending to be buffered
283 bool do_fetch = true;
284 int total_pending = pendingFetch.size() + blocksInUse();
285
286 std::list<FetchBlock*>::iterator pending_it;
287 pending_it = findBlock(pendingFetch, asid, block_addr);
288 if (pending_it != pendingFetch.end()) {
289 (*pending_it)->cnt++;
290 do_fetch = false;
291
292 DPRINTF(InOrderCachePort, "%08p is a pending fetch block "
293 "(pending:%i).\n", block_addr,
294 (*pending_it)->cnt);
295 } else if (total_pending < fetchBuffSize) {
296 std::list<FetchBlock*>::iterator buff_it;
297 buff_it = findBlock(fetchBuffer, asid, block_addr);
298 if (buff_it != fetchBuffer.end()) {
299 (*buff_it)->cnt++;
300 do_fetch = false;
301
302 DPRINTF(InOrderCachePort, "%08p is in fetch buffer "
303 "(pending:%i).\n", block_addr, (*buff_it)->cnt);
304 }
305 }
306
307 if (!do_fetch) {
308 DPRINTF(InOrderCachePort, "Inst. [sn:%i] marked to be filled "
309 "through fetch buffer.\n", inst->seqNum);
310 cache_req->fetchBufferFill = true;
311 cache_req->setCompleted(true);
312 return;
313 }
314
315 // Check to see if there is room in the fetchbuffer for this instruction.
316 // If not, block this request.
317 if (total_pending >= fetchBuffSize) {
318 DPRINTF(InOrderCachePort, "No room available in fetch buffer.\n");
319 cache_req->done(false);
320 return;
321 }
322
323 doTLBAccess(inst, cache_req, cacheBlkSize, Request::INST_FETCH, TheISA::TLB::Execute);
324
325 if (inst->fault == NoFault) {
326 DPRINTF(InOrderCachePort,
327 "[tid:%u]: Initiating fetch access to %s for "
328 "addr:%#x (block:%#x)\n", tid, name(),
329 cache_req->inst->getMemAddr(), block_addr);
330
331 cache_req->reqData = new uint8_t[cacheBlkSize];
332
333 inst->setCurResSlot(slot_num);
334
335 doCacheAccess(inst);
336
337 if (cache_req->isMemAccPending()) {
338 pendingFetch.push_back(new FetchBlock(asid, block_addr));
339
340 // mark replacement block
341 }
342 }
343
344 break;
345 }
346
347 case CompleteFetch:
348 if (inst->fault != NoFault) {
349 DPRINTF(InOrderCachePort,
350 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
351 "next stage.\n", tid, inst->seqNum, inst->fault->name(),
352 inst->getMemAddr());
353 finishCacheUnitReq(inst, cache_req);
354 return;
355 }
356
357 if (cache_req->fetchBufferFill) {
358 // Block request if it's depending on a previous fetch, but it hasnt made it yet
359 std::list<FetchBlock*>::iterator fetch_it = findBlock(fetchBuffer, asid, block_addr);
360 if (fetch_it == fetchBuffer.end()) {
361 DPRINTF(InOrderCachePort, "%#x not available yet\n",
362 block_addr);
363 cache_req->setCompleted(false);
364 return;
365 }
366
367 // Make New Instruction
368 createMachInst(fetch_it, inst);
369 if (inst->traceData) {
370 inst->traceData->setStaticInst(inst->staticInst);
371 inst->traceData->setPC(inst->pcState());
372 }
373
374 // FetchBuffer Book-Keeping
375 (*fetch_it)->cnt--;
376 assert((*fetch_it)->cnt >= 0);
377 markBlockUsed(fetch_it);
378
379 cache_req->done();
380 return;
381 }
382
383 if (cache_req->isMemAccComplete()) {
384 if (fetchBuffer.size() >= fetchBuffSize) {
385 // If there is no replacement block, then we'll just have
386 // to wait till that gets cleared before satisfying the fetch
387 // for this instruction
388 std::list<FetchBlock*>::iterator repl_it =
389 findReplacementBlock();
390 if (repl_it == fetchBuffer.end()) {
391 DPRINTF(InOrderCachePort, "Unable to find replacement block"
392 " and complete fetch.\n");
393 cache_req->setCompleted(false);
394 return;
395 }
396
397 delete [] (*repl_it)->block;
398 delete *repl_it;
399 fetchBuffer.erase(repl_it);
400 }
401
402 DPRINTF(InOrderCachePort,
403 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
404 tid, inst->seqNum);
405
406 // Make New Instruction
407 std::list<FetchBlock*>::iterator fetch_it =
408 findBlock(pendingFetch, asid, block_addr);
409
410 assert(fetch_it != pendingFetch.end());
411 assert((*fetch_it)->valid);
412
413 createMachInst(fetch_it, inst);
414 if (inst->traceData) {
415 inst->traceData->setStaticInst(inst->staticInst);
416 inst->traceData->setPC(inst->pcState());
417 }
418
419
420 // Update instructions waiting on new fetch block
421 FetchBlock *new_block = (*fetch_it);
422 new_block->cnt--;
423 assert(new_block->cnt >= 0);
424
425 // Finally, update FetchBuffer w/Pending Block into the
426 // MRU location
427 pendingFetch.erase(fetch_it);
428 fetchBuffer.push_back(new_block);
429
430 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
431 tid, inst->seqNum,
432 inst->staticInst->disassemble(inst->instAddr()));
433
434 inst->unsetMemAddr();
435
436 cache_req->done();
437 } else {
438 DPRINTF(InOrderCachePort,
439 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
440 tid, inst->seqNum);
441 DPRINTF(InOrderStall,
442 "STALL: [tid:%i]: Fetch miss from %08p\n",
443 tid, cache_req->inst->instAddr());
444 cache_req->setCompleted(false);
445 // NOTE: For SwitchOnCacheMiss ThreadModel, we *don't* switch on
446 // fetch miss, but we could ...
447 // cache_req->setMemStall(true);
448 }
449 break;
450
451 default:
452 fatal("Unrecognized command to %s", resName);
453 }
454 }
455
456 void
457 FetchUnit::processCacheCompletion(PacketPtr pkt)
458 {
459 // Cast to correct packet type
460 // @todo: use pkt Sender state here to be consistent with other
461 // cpu models
462 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
463 assert(cache_pkt);
464
465 DPRINTF(InOrderCachePort, "Finished request for %x\n",
466 cache_pkt->getAddr());
467
468 if (processSquash(cache_pkt))
469 return;
470
471 Addr block_addr = cacheBlockAlign(cache_pkt->cacheReq->
472 getInst()->getMemAddr());
473
474 DPRINTF(InOrderCachePort,
475 "[tid:%u]: [sn:%i]: Waking from fetch access to addr:%#x(phys:%#x), size:%i\n",
476 cache_pkt->cacheReq->getInst()->readTid(),
477 cache_pkt->cacheReq->getInst()->seqNum,
478 block_addr, cache_pkt->getAddr(), cache_pkt->getSize());
479
480 // Cast to correct request type
481 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
482 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
483
484 if (!cache_req) {
485 panic("[tid:%u]: [sn:%i]: Can't find slot for fetch access to "
486 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
487 cache_pkt->cacheReq->getInst()->seqNum,
488 block_addr);
489 }
490
491 // Get resource request info
492 unsigned stage_num = cache_req->getStageNum();
493 DynInstPtr inst = cache_req->inst;
494 ThreadID tid = cache_req->inst->readTid();
495 short asid = cpu->asid[tid];
496
497 assert(!cache_req->isSquashed());
498 assert(inst->curSkedEntry->cmd == CompleteFetch);
499
500 DPRINTF(InOrderCachePort,
501 "[tid:%u]: [sn:%i]: Processing fetch access for block %#x\n",
502 tid, inst->seqNum, block_addr);
503
504 std::list<FetchBlock*>::iterator pend_it = findBlock(pendingFetch, asid,
505 block_addr);
506 assert(pend_it != pendingFetch.end());
507
508 // Copy Data to pendingFetch queue...
509 (*pend_it)->block = new uint8_t[cacheBlkSize];
510 memcpy((*pend_it)->block, cache_pkt->getPtr<uint8_t>(), cacheBlkSize);
511 (*pend_it)->valid = true;
512
513 cache_req->setMemAccPending(false);
514 cache_req->setMemAccCompleted();
515
516 if (cache_req->isMemStall() &&
517 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
518 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
519 tid);
520
521 cpu->activateContext(tid);
522
523 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
524 "miss.\n", tid);
525 }
526
527 // Wake up the CPU (if it went to sleep and was waiting on this
528 // completion event).
529 cpu->wakeCPU();
530
531 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
532 tid, cpu->pipelineStage[stage_num]->name());
533
534 cpu->switchToActive(stage_num);
535 }
536
537 void
538 FetchUnit::squashCacheRequest(CacheReqPtr req_ptr)
539 {
540 DynInstPtr inst = req_ptr->getInst();
541 ThreadID tid = inst->readTid();
542 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
543 int asid = cpu->asid[tid];
544
545 // Check Fetch Buffer (or pending fetch) for this block and
546 // update pending counts
547 std::list<FetchBlock*>::iterator buff_it = findBlock(fetchBuffer,
548 asid,
549 block_addr);
550 if (buff_it != fetchBuffer.end()) {
551 (*buff_it)->cnt--;
552 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Access "
553 "for Fetch Buffer block %08p (cnt=%i)\n", inst->seqNum,
554 block_addr, (*buff_it)->cnt);
555 assert((*buff_it)->cnt >= 0);
556 } else {
557 std::list<FetchBlock*>::iterator block_it = findBlock(pendingFetch,
558 asid,
559 block_addr);
560 if (block_it != pendingFetch.end()) {
561 (*block_it)->cnt--;
562 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Access "
563 "for Pending Buffer Block %08p (cnt=%i)\n",
564 inst->seqNum,
565 block_addr, (*block_it)->cnt);
566 assert((*block_it)->cnt >= 0);
567 if ((*block_it)->cnt == 0) {
568 if ((*block_it)->block) {
569 delete [] (*block_it)->block;
570 }
571 delete *block_it;
572 pendingFetch.erase(block_it);
573 }
574 }
575 }
576
577 CacheUnit::squashCacheRequest(req_ptr);
578 }
579
580 void
581 FetchUnit::trap(Fault fault, ThreadID tid, DynInstPtr inst)
582 {
583 //@todo: per thread?
584 predecoder[tid]->reset();
585
586 //@todo: squash using dummy inst seq num
587 squash(NULL, NumStages - 1, 0, tid);
588
589 //@todo: make sure no blocks are in use
590 assert(blocksInUse() == 0);
591 assert(pendingFetch.size() == 0);
592
593 //@todo: clear pendingFetch and fetchBuffer
594 clearFetchBuffer();
595 }