inorder: implement trap handling
[gem5.git] / src / cpu / inorder / resources / fetch_unit.cc
1 /*
2 * Copyright (c) 2011 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <list>
33 #include <vector>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/predecoder.hh"
38 #include "arch/utility.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/resources/fetch_unit.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "debug/Activity.hh"
46 #include "debug/InOrderCachePort.hh"
47 #include "debug/InOrderStall.hh"
48 #include "debug/RefCount.hh"
49 #include "debug/ThreadModel.hh"
50 #include "mem/request.hh"
51
52 using namespace std;
53 using namespace TheISA;
54 using namespace ThePipeline;
55
56 FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
57 int res_latency, InOrderCPU *_cpu,
58 ThePipeline::Params *params)
59 : CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
60 instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize),
61 predecoder(NULL)
62 { }
63
64 FetchUnit::~FetchUnit()
65 {
66 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
67 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
68 while (fetch_it != end_it) {
69 delete (*fetch_it)->block;
70 delete *fetch_it;
71 fetch_it++;
72 }
73 fetchBuffer.clear();
74
75
76 std::list<FetchBlock*>::iterator pend_it = pendingFetch.begin();
77 std::list<FetchBlock*>::iterator pend_end = pendingFetch.end();
78 while (pend_it != pend_end) {
79 if ((*pend_it)->block) {
80 delete (*pend_it)->block;
81 }
82
83 delete *pend_it;
84 pend_it++;
85 }
86 pendingFetch.clear();
87 }
88
89 void
90 FetchUnit::createMachInst(std::list<FetchBlock*>::iterator fetch_it,
91 DynInstPtr inst)
92 {
93 ExtMachInst ext_inst;
94 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
95 Addr fetch_addr = inst->getMemAddr();
96 unsigned fetch_offset = (fetch_addr - block_addr) / instSize;
97 ThreadID tid = inst->readTid();
98 TheISA::PCState instPC = inst->pcState();
99
100
101 DPRINTF(InOrderCachePort, "Creating instruction [sn:%i] w/fetch data @"
102 "addr:%08p block:%08p\n", inst->seqNum, fetch_addr, block_addr);
103
104 assert((*fetch_it)->valid);
105
106 TheISA::MachInst *fetchInsts =
107 reinterpret_cast<TheISA::MachInst *>((*fetch_it)->block);
108
109 MachInst mach_inst =
110 TheISA::gtoh(fetchInsts[fetch_offset]);
111
112 predecoder.setTC(cpu->thread[tid]->getTC());
113 predecoder.moreBytes(instPC, inst->instAddr(), mach_inst);
114 assert(predecoder.extMachInstReady());
115 ext_inst = predecoder.getExtMachInst(instPC);
116
117 inst->pcState(instPC);
118 inst->setMachInst(ext_inst);
119 }
120
121 int
122 FetchUnit::getSlot(DynInstPtr inst)
123 {
124 if (tlbBlocked[inst->threadNumber]) {
125 return -1;
126 }
127
128 if (!inst->validMemAddr()) {
129 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
130 "cache access\n", inst->readTid(), inst->seqNum);
131 }
132
133 int new_slot = Resource::getSlot(inst);
134
135 if (new_slot == -1)
136 return -1;
137
138 inst->memTime = curTick();
139 return new_slot;
140 }
141
142 void
143 FetchUnit::removeAddrDependency(DynInstPtr inst)
144 {
145 inst->unsetMemAddr();
146 }
147
148 ResReqPtr
149 FetchUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
150 int slot_num, unsigned cmd)
151 {
152 ScheduleEntry* sched_entry = *inst->curSkedEntry;
153 CacheRequest* cache_req = dynamic_cast<CacheRequest*>(reqs[slot_num]);
154
155 if (!inst->validMemAddr()) {
156 panic("Mem. Addr. must be set before requesting cache access\n");
157 }
158
159 assert(sched_entry->cmd == InitiateFetch);
160
161 DPRINTF(InOrderCachePort,
162 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
163 inst->readTid(), inst->seqNum, inst->getMemAddr());
164
165 cache_req->setRequest(inst, stage_num, id, slot_num,
166 sched_entry->cmd, MemCmd::ReadReq,
167 inst->curSkedEntry->idx);
168
169 return cache_req;
170 }
171
172 void
173 FetchUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
174 int acc_size, int flags)
175 {
176 ThreadID tid = inst->readTid();
177 Addr aligned_addr = cacheBlockAlign(inst->getMemAddr());
178
179 if (inst->fetchMemReq == NULL)
180 inst->fetchMemReq =
181 new Request(tid, aligned_addr, acc_size, flags,
182 inst->instAddr(), cpu->readCpuId(), tid);
183
184
185 cache_req->memReq = inst->fetchMemReq;
186 }
187
188 std::list<FetchUnit::FetchBlock*>::iterator
189 FetchUnit::findBlock(std::list<FetchBlock*> &fetch_blocks, int asid,
190 Addr block_addr)
191 {
192 std::list<FetchBlock*>::iterator fetch_it = fetch_blocks.begin();
193 std::list<FetchBlock*>::iterator end_it = fetch_blocks.end();
194
195 while (fetch_it != end_it) {
196 if ((*fetch_it)->asid == asid &&
197 (*fetch_it)->addr == block_addr) {
198 return fetch_it;
199 }
200
201 fetch_it++;
202 }
203
204 return fetch_it;
205 }
206
207 std::list<FetchUnit::FetchBlock*>::iterator
208 FetchUnit::findReplacementBlock()
209 {
210 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
211 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
212
213 while (fetch_it != end_it) {
214 if ((*fetch_it)->cnt == 0) {
215 return fetch_it;
216 } else {
217 DPRINTF(InOrderCachePort, "Block %08p has %i insts pending.\n",
218 (*fetch_it)->addr, (*fetch_it)->cnt);
219 }
220 fetch_it++;
221 }
222
223 return fetch_it;
224 }
225
226 void
227 FetchUnit::markBlockUsed(std::list<FetchBlock*>::iterator block_it)
228 {
229 // Move block from whatever location it is in fetch buffer
230 // to the back (represents most-recently-used location)
231 if (block_it != fetchBuffer.end()) {
232 FetchBlock *mru_blk = *block_it;
233 fetchBuffer.erase(block_it);
234 fetchBuffer.push_back(mru_blk);
235 }
236 }
237
238 void
239 FetchUnit::execute(int slot_num)
240 {
241 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
242 assert(cache_req);
243
244 if (cachePortBlocked && cache_req->cmd == InitiateFetch) {
245 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
246 cache_req->done(false);
247 return;
248 }
249
250 DynInstPtr inst = cache_req->inst;
251 ThreadID tid = inst->readTid();
252 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
253 int asid = cpu->asid[tid];
254
255 inst->fault = NoFault;
256
257 switch (cache_req->cmd)
258 {
259 case InitiateFetch:
260 {
261 // Check to see if we've already got this request buffered
262 // or pending to be buffered
263 bool do_fetch = true;
264 std::list<FetchBlock*>::iterator pending_it;
265 pending_it = findBlock(pendingFetch, asid, block_addr);
266 if (pending_it != pendingFetch.end()) {
267 (*pending_it)->cnt++;
268 do_fetch = false;
269
270 DPRINTF(InOrderCachePort, "%08p is a pending fetch block "
271 "(pending:%i).\n", block_addr,
272 (*pending_it)->cnt);
273 } else if (pendingFetch.size() < fetchBuffSize) {
274 std::list<FetchBlock*>::iterator buff_it;
275 buff_it = findBlock(fetchBuffer, asid, block_addr);
276 if (buff_it != fetchBuffer.end()) {
277 (*buff_it)->cnt++;
278 do_fetch = false;
279
280 DPRINTF(InOrderCachePort, "%08p is in fetch buffer"
281 "(pending:%i).\n", block_addr, (*buff_it)->cnt);
282 }
283 }
284
285 if (!do_fetch) {
286 DPRINTF(InOrderCachePort, "Inst. [sn:%i] marked to be filled "
287 "through fetch buffer.\n", inst->seqNum);
288 cache_req->fetchBufferFill = true;
289 cache_req->setCompleted(true);
290 return;
291 }
292
293 // Check to see if there is room in the fetchbuffer for this instruction.
294 // If not, block this request.
295 if (pendingFetch.size() >= fetchBuffSize) {
296 DPRINTF(InOrderCachePort, "No room available in fetch buffer.\n");
297 cache_req->done();
298 return;
299 }
300
301 doTLBAccess(inst, cache_req, cacheBlkSize, 0, TheISA::TLB::Execute);
302
303 if (inst->fault == NoFault) {
304 DPRINTF(InOrderCachePort,
305 "[tid:%u]: Initiating fetch access to %s for "
306 "addr:%#x (block:%#x)\n", tid, name(),
307 cache_req->inst->getMemAddr(), block_addr);
308
309 cache_req->reqData = new uint8_t[cacheBlkSize];
310
311 inst->setCurResSlot(slot_num);
312
313 doCacheAccess(inst);
314
315 if (cache_req->isMemAccPending()) {
316 pendingFetch.push_back(new FetchBlock(asid, block_addr));
317 }
318 }
319
320 break;
321 }
322
323 case CompleteFetch:
324 if (cache_req->fetchBufferFill) {
325 // Block request if it's depending on a previous fetch, but it hasnt made it yet
326 std::list<FetchBlock*>::iterator fetch_it = findBlock(fetchBuffer, asid, block_addr);
327 if (fetch_it == fetchBuffer.end()) {
328 DPRINTF(InOrderCachePort, "%#x not available yet\n",
329 block_addr);
330 cache_req->setCompleted(false);
331 return;
332 }
333
334 // Make New Instruction
335 createMachInst(fetch_it, inst);
336 if (inst->traceData) {
337 inst->traceData->setStaticInst(inst->staticInst);
338 inst->traceData->setPC(inst->pcState());
339 }
340
341 // FetchBuffer Book-Keeping
342 (*fetch_it)->cnt--;
343 assert((*fetch_it)->cnt >= 0);
344 markBlockUsed(fetch_it);
345
346 cache_req->done();
347 return;
348 }
349
350 if (cache_req->isMemAccComplete()) {
351 if (fetchBuffer.size() >= fetchBuffSize) {
352 // If there is no replacement block, then we'll just have
353 // to wait till that gets cleared before satisfying the fetch
354 // for this instruction
355 std::list<FetchBlock*>::iterator repl_it =
356 findReplacementBlock();
357 if (repl_it == fetchBuffer.end()) {
358 DPRINTF(InOrderCachePort, "Unable to find replacement block"
359 " and complete fetch.\n");
360 cache_req->setCompleted(false);
361 return;
362 }
363
364 delete [] (*repl_it)->block;
365 delete *repl_it;
366 fetchBuffer.erase(repl_it);
367 }
368
369 DPRINTF(InOrderCachePort,
370 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
371 tid, inst->seqNum);
372
373 // Make New Instruction
374 std::list<FetchBlock*>::iterator fetch_it =
375 findBlock(pendingFetch, asid, block_addr);
376
377 assert(fetch_it != pendingFetch.end());
378 assert((*fetch_it)->valid);
379
380 createMachInst(fetch_it, inst);
381 if (inst->traceData) {
382 inst->traceData->setStaticInst(inst->staticInst);
383 inst->traceData->setPC(inst->pcState());
384 }
385
386
387 // Update instructions waiting on new fetch block
388 FetchBlock *new_block = (*fetch_it);
389 new_block->cnt--;
390 assert(new_block->cnt >= 0);
391
392 // Finally, update FetchBuffer w/Pending Block into the
393 // MRU location
394 pendingFetch.erase(fetch_it);
395 fetchBuffer.push_back(new_block);
396
397 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
398 tid, inst->seqNum,
399 inst->staticInst->disassemble(inst->instAddr()));
400
401 inst->unsetMemAddr();
402
403 delete cache_req->dataPkt;
404
405 cache_req->done();
406 } else {
407 DPRINTF(InOrderCachePort,
408 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
409 tid, inst->seqNum);
410 DPRINTF(InOrderStall,
411 "STALL: [tid:%i]: Fetch miss from %08p\n",
412 tid, cache_req->inst->instAddr());
413 cache_req->setCompleted(false);
414 // NOTE: For SwitchOnCacheMiss ThreadModel, we *don't* switch on
415 // fetch miss, but we could ...
416 // cache_req->setMemStall(true);
417 }
418 break;
419
420 default:
421 fatal("Unrecognized command to %s", resName);
422 }
423 }
424
425 void
426 FetchUnit::processCacheCompletion(PacketPtr pkt)
427 {
428 // Cast to correct packet type
429 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
430 assert(cache_pkt);
431
432 if (cache_pkt->cacheReq->isSquashed()) {
433 DPRINTF(InOrderCachePort,
434 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
435 cache_pkt->cacheReq->getInst()->readTid(),
436 cache_pkt->cacheReq->getInst()->seqNum);
437 DPRINTF(RefCount,
438 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
439 cache_pkt->cacheReq->getTid(),
440 cache_pkt->cacheReq->seqNum);
441
442 cache_pkt->cacheReq->done();
443 cache_pkt->cacheReq->freeSlot();
444 delete cache_pkt;
445
446 cpu->wakeCPU();
447 return;
448 }
449
450 Addr block_addr = cacheBlockAlign(cache_pkt->cacheReq->
451 getInst()->getMemAddr());
452
453 DPRINTF(InOrderCachePort,
454 "[tid:%u]: [sn:%i]: Waking from fetch access to addr:%#x(phys:%#x), size:%i\n",
455 cache_pkt->cacheReq->getInst()->readTid(),
456 cache_pkt->cacheReq->getInst()->seqNum,
457 block_addr, cache_pkt->getAddr(), cache_pkt->getSize());
458
459 // Cast to correct request type
460 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
461 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
462
463 if (!cache_req) {
464 panic("[tid:%u]: [sn:%i]: Can't find slot for fetch access to "
465 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
466 cache_pkt->cacheReq->getInst()->seqNum,
467 block_addr);
468 }
469
470 // Get resource request info
471 unsigned stage_num = cache_req->getStageNum();
472 DynInstPtr inst = cache_req->inst;
473 ThreadID tid = cache_req->inst->readTid();
474 short asid = cpu->asid[tid];
475
476 assert(!cache_req->isSquashed());
477 assert(inst->curSkedEntry->cmd == CompleteFetch);
478
479 DPRINTF(InOrderCachePort,
480 "[tid:%u]: [sn:%i]: Processing fetch access for block %#x\n",
481 tid, inst->seqNum, block_addr);
482
483 std::list<FetchBlock*>::iterator pend_it = findBlock(pendingFetch, asid,
484 block_addr);
485 assert(pend_it != pendingFetch.end());
486
487 // Copy Data to pendingFetch queue...
488 (*pend_it)->block = new uint8_t[cacheBlkSize];
489 memcpy((*pend_it)->block, cache_pkt->getPtr<uint8_t>(), cacheBlkSize);
490 (*pend_it)->valid = true;
491
492 cache_req->setMemAccPending(false);
493 cache_req->setMemAccCompleted();
494
495 if (cache_req->isMemStall() &&
496 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
497 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
498 tid);
499
500 cpu->activateContext(tid);
501
502 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
503 "miss.\n", tid);
504 }
505
506 // Wake up the CPU (if it went to sleep and was waiting on this
507 // completion event).
508 cpu->wakeCPU();
509
510 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
511 tid, cpu->pipelineStage[stage_num]->name());
512
513 cpu->switchToActive(stage_num);
514 }
515
516 void
517 FetchUnit::squashCacheRequest(CacheReqPtr req_ptr)
518 {
519 DynInstPtr inst = req_ptr->getInst();
520 ThreadID tid = inst->readTid();
521 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
522 int asid = cpu->asid[tid];
523
524 // Check Fetch Buffer (or pending fetch) for this block and
525 // update pending counts
526 std::list<FetchBlock*>::iterator buff_it = findBlock(fetchBuffer,
527 asid,
528 block_addr);
529 if (buff_it != fetchBuffer.end()) {
530 (*buff_it)->cnt--;
531 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Fetch "
532 "for Buffer block %08p (cnt=%i)\n", inst->seqNum,
533 block_addr, (*buff_it)->cnt);
534 } else {
535 std::list<FetchBlock*>::iterator block_it = findBlock(pendingFetch,
536 asid,
537 block_addr);
538 if (block_it != pendingFetch.end()) {
539 (*block_it)->cnt--;
540 if ((*block_it)->cnt == 0) {
541 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Fetch "
542 "for block %08p (cnt=%i)\n", inst->seqNum,
543 block_addr, (*block_it)->cnt);
544 if ((*block_it)->block) {
545 delete [] (*block_it)->block;
546 }
547 delete *block_it;
548 pendingFetch.erase(block_it);
549 }
550 }
551 }
552
553 CacheUnit::squashCacheRequest(req_ptr);
554 }
555
556 void
557 FetchUnit::trap(Fault fault, ThreadID tid, DynInstPtr inst)
558 {
559 //@todo: per thread?
560 predecoder.reset();
561 }