CPU: Merge the predecoder and decoder.
[gem5.git] / src / cpu / inorder / resources / fetch_unit.cc
1 /*
2 * Copyright (c) 2011 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <list>
33 #include <vector>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "config/the_isa.hh"
39 #include "cpu/inorder/resources/cache_unit.hh"
40 #include "cpu/inorder/resources/fetch_unit.hh"
41 #include "cpu/inorder/cpu.hh"
42 #include "cpu/inorder/pipeline_traits.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "debug/Activity.hh"
45 #include "debug/InOrderCachePort.hh"
46 #include "debug/InOrderStall.hh"
47 #include "debug/RefCount.hh"
48 #include "debug/ThreadModel.hh"
49 #include "mem/request.hh"
50
51 using namespace std;
52 using namespace TheISA;
53 using namespace ThePipeline;
54
55 FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
56 int res_latency, InOrderCPU *_cpu,
57 ThePipeline::Params *params)
58 : CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
59 instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize)
60 {
61 for (int tid = 0; tid < MaxThreads; tid++)
62 decoder[tid] = new Decoder(NULL);
63 }
64
65 FetchUnit::~FetchUnit()
66 {
67 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
68 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
69 while (fetch_it != end_it) {
70 delete (*fetch_it)->block;
71 delete *fetch_it;
72 fetch_it++;
73 }
74 fetchBuffer.clear();
75
76
77 std::list<FetchBlock*>::iterator pend_it = pendingFetch.begin();
78 std::list<FetchBlock*>::iterator pend_end = pendingFetch.end();
79 while (pend_it != pend_end) {
80 if ((*pend_it)->block) {
81 delete (*pend_it)->block;
82 }
83
84 delete *pend_it;
85 pend_it++;
86 }
87 pendingFetch.clear();
88 }
89
90 void
91 FetchUnit::createMachInst(std::list<FetchBlock*>::iterator fetch_it,
92 DynInstPtr inst)
93 {
94 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
95 Addr fetch_addr = inst->getMemAddr();
96 unsigned fetch_offset = (fetch_addr - block_addr) / instSize;
97 ThreadID tid = inst->readTid();
98 TheISA::PCState instPC = inst->pcState();
99
100
101 DPRINTF(InOrderCachePort, "Creating instruction [sn:%i] w/fetch data @"
102 "addr:%08p block:%08p\n", inst->seqNum, fetch_addr, block_addr);
103
104 assert((*fetch_it)->valid);
105
106 TheISA::MachInst *fetchInsts =
107 reinterpret_cast<TheISA::MachInst *>((*fetch_it)->block);
108
109 MachInst mach_inst =
110 TheISA::gtoh(fetchInsts[fetch_offset]);
111
112 decoder[tid]->setTC(cpu->thread[tid]->getTC());
113 decoder[tid]->moreBytes(instPC, inst->instAddr(), mach_inst);
114 assert(decoder[tid]->instReady());
115 inst->setStaticInst(decoder[tid]->decode(instPC));
116 inst->pcState(instPC);
117 }
118
119 void
120 FetchUnit::removeAddrDependency(DynInstPtr inst)
121 {
122 inst->unsetMemAddr();
123 }
124
125 ResReqPtr
126 FetchUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
127 int slot_num, unsigned cmd)
128 {
129 ScheduleEntry* sched_entry = *inst->curSkedEntry;
130 CacheRequest* cache_req = dynamic_cast<CacheRequest*>(reqs[slot_num]);
131
132 if (!inst->validMemAddr()) {
133 panic("Mem. Addr. must be set before requesting cache access\n");
134 }
135
136 assert(sched_entry->cmd == InitiateFetch);
137
138 DPRINTF(InOrderCachePort,
139 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
140 inst->readTid(), inst->seqNum, inst->getMemAddr());
141
142 cache_req->setRequest(inst, stage_num, id, slot_num,
143 sched_entry->cmd, MemCmd::ReadReq,
144 inst->curSkedEntry->idx);
145
146 return cache_req;
147 }
148
149 void
150 FetchUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
151 int acc_size, int flags)
152 {
153 ThreadID tid = inst->readTid();
154 Addr aligned_addr = cacheBlockAlign(inst->getMemAddr());
155 if (cache_req->memReq == NULL) {
156 cache_req->memReq =
157 new Request(tid, aligned_addr, acc_size, flags,
158 cpu->instMasterId(), inst->instAddr(), cpu->readCpuId(),
159 tid);
160 DPRINTF(InOrderCachePort, "[sn:%i] Created memReq @%x, ->%x\n",
161 inst->seqNum, &cache_req->memReq, cache_req->memReq);
162 }
163 }
164
165 std::list<FetchUnit::FetchBlock*>::iterator
166 FetchUnit::findBlock(std::list<FetchBlock*> &fetch_blocks, int asid,
167 Addr block_addr)
168 {
169 std::list<FetchBlock*>::iterator fetch_it = fetch_blocks.begin();
170 std::list<FetchBlock*>::iterator end_it = fetch_blocks.end();
171
172 while (fetch_it != end_it) {
173 if ((*fetch_it)->asid == asid &&
174 (*fetch_it)->addr == block_addr) {
175 return fetch_it;
176 }
177
178 fetch_it++;
179 }
180
181 return fetch_it;
182 }
183
184 std::list<FetchUnit::FetchBlock*>::iterator
185 FetchUnit::findReplacementBlock()
186 {
187 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
188 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
189
190 while (fetch_it != end_it) {
191 if ((*fetch_it)->cnt == 0) {
192 return fetch_it;
193 } else {
194 DPRINTF(InOrderCachePort, "Block %08p has %i insts pending.\n",
195 (*fetch_it)->addr, (*fetch_it)->cnt);
196 }
197 fetch_it++;
198 }
199
200 return fetch_it;
201 }
202
203 void
204 FetchUnit::markBlockUsed(std::list<FetchBlock*>::iterator block_it)
205 {
206 // Move block from whatever location it is in fetch buffer
207 // to the back (represents most-recently-used location)
208 if (block_it != fetchBuffer.end()) {
209 FetchBlock *mru_blk = *block_it;
210 fetchBuffer.erase(block_it);
211 fetchBuffer.push_back(mru_blk);
212 }
213 }
214
215 int
216 FetchUnit::blocksInUse()
217 {
218 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
219 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
220
221 int cnt = 0;
222 while (fetch_it != end_it) {
223 if ((*fetch_it)->cnt > 0)
224 cnt++;
225
226 fetch_it++;
227 }
228
229 return cnt;
230 }
231
232 void
233 FetchUnit::clearFetchBuffer()
234 {
235 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
236 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
237
238 while (fetch_it != end_it) {
239 if ((*fetch_it)->block) {
240 delete [] (*fetch_it)->block;
241 }
242 delete *fetch_it;
243 fetch_it++;
244 }
245 fetchBuffer.clear();
246 }
247
248 void
249 FetchUnit::execute(int slot_num)
250 {
251 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
252 assert(cache_req);
253
254 if (cachePortBlocked && cache_req->cmd == InitiateFetch) {
255 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
256 cache_req->done(false);
257 return;
258 }
259
260 DynInstPtr inst = cache_req->inst;
261 ThreadID tid = inst->readTid();
262 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
263 int asid = cpu->asid[tid];
264
265 if (inst->fault != NoFault) {
266 DPRINTF(InOrderCachePort,
267 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
268 "next stage.\n", tid, inst->seqNum, inst->fault->name(),
269 cacheBlockAlign(inst->getMemAddr()));
270 finishCacheUnitReq(inst, cache_req);
271 return;
272 }
273
274 switch (cache_req->cmd)
275 {
276 case InitiateFetch:
277 {
278 // Check to see if we've already got this request buffered
279 // or pending to be buffered
280 bool do_fetch = true;
281 int total_pending = pendingFetch.size() + blocksInUse();
282
283 std::list<FetchBlock*>::iterator pending_it;
284 pending_it = findBlock(pendingFetch, asid, block_addr);
285 if (pending_it != pendingFetch.end()) {
286 (*pending_it)->cnt++;
287 do_fetch = false;
288
289 DPRINTF(InOrderCachePort, "%08p is a pending fetch block "
290 "(pending:%i).\n", block_addr,
291 (*pending_it)->cnt);
292 } else if (total_pending < fetchBuffSize) {
293 std::list<FetchBlock*>::iterator buff_it;
294 buff_it = findBlock(fetchBuffer, asid, block_addr);
295 if (buff_it != fetchBuffer.end()) {
296 (*buff_it)->cnt++;
297 do_fetch = false;
298
299 DPRINTF(InOrderCachePort, "%08p is in fetch buffer "
300 "(pending:%i).\n", block_addr, (*buff_it)->cnt);
301 }
302 }
303
304 if (!do_fetch) {
305 DPRINTF(InOrderCachePort, "Inst. [sn:%i] marked to be filled "
306 "through fetch buffer.\n", inst->seqNum);
307 cache_req->fetchBufferFill = true;
308 cache_req->setCompleted(true);
309 return;
310 }
311
312 // Check to see if there is room in the fetchbuffer for this instruction.
313 // If not, block this request.
314 if (total_pending >= fetchBuffSize) {
315 DPRINTF(InOrderCachePort, "No room available in fetch buffer.\n");
316 cache_req->done(false);
317 return;
318 }
319
320 doTLBAccess(inst, cache_req, cacheBlkSize, Request::INST_FETCH, TheISA::TLB::Execute);
321
322 if (inst->fault == NoFault) {
323 DPRINTF(InOrderCachePort,
324 "[tid:%u]: Initiating fetch access to %s for "
325 "addr:%#x (block:%#x)\n", tid, name(),
326 cache_req->inst->getMemAddr(), block_addr);
327
328 cache_req->reqData = new uint8_t[cacheBlkSize];
329
330 inst->setCurResSlot(slot_num);
331
332 doCacheAccess(inst);
333
334 if (cache_req->isMemAccPending()) {
335 pendingFetch.push_back(new FetchBlock(asid, block_addr));
336
337 // mark replacement block
338 }
339 }
340
341 break;
342 }
343
344 case CompleteFetch:
345 if (inst->fault != NoFault) {
346 DPRINTF(InOrderCachePort,
347 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
348 "next stage.\n", tid, inst->seqNum, inst->fault->name(),
349 inst->getMemAddr());
350 finishCacheUnitReq(inst, cache_req);
351 return;
352 }
353
354 if (cache_req->fetchBufferFill) {
355 // Block request if it's depending on a previous fetch, but it hasnt made it yet
356 std::list<FetchBlock*>::iterator fetch_it = findBlock(fetchBuffer, asid, block_addr);
357 if (fetch_it == fetchBuffer.end()) {
358 DPRINTF(InOrderCachePort, "%#x not available yet\n",
359 block_addr);
360 cache_req->setCompleted(false);
361 return;
362 }
363
364 // Make New Instruction
365 createMachInst(fetch_it, inst);
366 if (inst->traceData) {
367 inst->traceData->setStaticInst(inst->staticInst);
368 inst->traceData->setPC(inst->pcState());
369 }
370
371 // FetchBuffer Book-Keeping
372 (*fetch_it)->cnt--;
373 assert((*fetch_it)->cnt >= 0);
374 markBlockUsed(fetch_it);
375
376 cache_req->done();
377 return;
378 }
379
380 if (cache_req->isMemAccComplete()) {
381 if (fetchBuffer.size() >= fetchBuffSize) {
382 // If there is no replacement block, then we'll just have
383 // to wait till that gets cleared before satisfying the fetch
384 // for this instruction
385 std::list<FetchBlock*>::iterator repl_it =
386 findReplacementBlock();
387 if (repl_it == fetchBuffer.end()) {
388 DPRINTF(InOrderCachePort, "Unable to find replacement block"
389 " and complete fetch.\n");
390 cache_req->setCompleted(false);
391 return;
392 }
393
394 delete [] (*repl_it)->block;
395 delete *repl_it;
396 fetchBuffer.erase(repl_it);
397 }
398
399 DPRINTF(InOrderCachePort,
400 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
401 tid, inst->seqNum);
402
403 // Make New Instruction
404 std::list<FetchBlock*>::iterator fetch_it =
405 findBlock(pendingFetch, asid, block_addr);
406
407 assert(fetch_it != pendingFetch.end());
408 assert((*fetch_it)->valid);
409
410 createMachInst(fetch_it, inst);
411 if (inst->traceData) {
412 inst->traceData->setStaticInst(inst->staticInst);
413 inst->traceData->setPC(inst->pcState());
414 }
415
416
417 // Update instructions waiting on new fetch block
418 FetchBlock *new_block = (*fetch_it);
419 new_block->cnt--;
420 assert(new_block->cnt >= 0);
421
422 // Finally, update FetchBuffer w/Pending Block into the
423 // MRU location
424 pendingFetch.erase(fetch_it);
425 fetchBuffer.push_back(new_block);
426
427 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
428 tid, inst->seqNum,
429 inst->staticInst->disassemble(inst->instAddr()));
430
431 inst->unsetMemAddr();
432
433 cache_req->done();
434 } else {
435 DPRINTF(InOrderCachePort,
436 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
437 tid, inst->seqNum);
438 DPRINTF(InOrderStall,
439 "STALL: [tid:%i]: Fetch miss from %08p\n",
440 tid, cache_req->inst->instAddr());
441 cache_req->setCompleted(false);
442 // NOTE: For SwitchOnCacheMiss ThreadModel, we *don't* switch on
443 // fetch miss, but we could ...
444 // cache_req->setMemStall(true);
445 }
446 break;
447
448 default:
449 fatal("Unrecognized command to %s", resName);
450 }
451 }
452
453 void
454 FetchUnit::processCacheCompletion(PacketPtr pkt)
455 {
456 // Cast to correct packet type
457 // @todo: use pkt Sender state here to be consistent with other
458 // cpu models
459 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
460 assert(cache_pkt);
461
462 DPRINTF(InOrderCachePort, "Finished request for %x\n",
463 cache_pkt->getAddr());
464
465 if (processSquash(cache_pkt))
466 return;
467
468 Addr block_addr = cacheBlockAlign(cache_pkt->cacheReq->
469 getInst()->getMemAddr());
470
471 DPRINTF(InOrderCachePort,
472 "[tid:%u]: [sn:%i]: Waking from fetch access to addr:%#x(phys:%#x), size:%i\n",
473 cache_pkt->cacheReq->getInst()->readTid(),
474 cache_pkt->cacheReq->getInst()->seqNum,
475 block_addr, cache_pkt->getAddr(), cache_pkt->getSize());
476
477 // Cast to correct request type
478 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
479 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
480
481 if (!cache_req) {
482 panic("[tid:%u]: [sn:%i]: Can't find slot for fetch access to "
483 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
484 cache_pkt->cacheReq->getInst()->seqNum,
485 block_addr);
486 }
487
488 // Get resource request info
489 unsigned stage_num = cache_req->getStageNum();
490 DynInstPtr inst = cache_req->inst;
491 ThreadID tid = cache_req->inst->readTid();
492 short asid = cpu->asid[tid];
493
494 assert(!cache_req->isSquashed());
495 assert(inst->curSkedEntry->cmd == CompleteFetch);
496
497 DPRINTF(InOrderCachePort,
498 "[tid:%u]: [sn:%i]: Processing fetch access for block %#x\n",
499 tid, inst->seqNum, block_addr);
500
501 std::list<FetchBlock*>::iterator pend_it = findBlock(pendingFetch, asid,
502 block_addr);
503 assert(pend_it != pendingFetch.end());
504
505 // Copy Data to pendingFetch queue...
506 (*pend_it)->block = new uint8_t[cacheBlkSize];
507 memcpy((*pend_it)->block, cache_pkt->getPtr<uint8_t>(), cacheBlkSize);
508 (*pend_it)->valid = true;
509
510 cache_req->setMemAccPending(false);
511 cache_req->setMemAccCompleted();
512
513 if (cache_req->isMemStall() &&
514 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
515 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
516 tid);
517
518 cpu->activateContext(tid);
519
520 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
521 "miss.\n", tid);
522 }
523
524 // Wake up the CPU (if it went to sleep and was waiting on this
525 // completion event).
526 cpu->wakeCPU();
527
528 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
529 tid, cpu->pipelineStage[stage_num]->name());
530
531 cpu->switchToActive(stage_num);
532 }
533
534 void
535 FetchUnit::squashCacheRequest(CacheReqPtr req_ptr)
536 {
537 DynInstPtr inst = req_ptr->getInst();
538 ThreadID tid = inst->readTid();
539 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
540 int asid = cpu->asid[tid];
541
542 // Check Fetch Buffer (or pending fetch) for this block and
543 // update pending counts
544 std::list<FetchBlock*>::iterator buff_it = findBlock(fetchBuffer,
545 asid,
546 block_addr);
547 if (buff_it != fetchBuffer.end()) {
548 (*buff_it)->cnt--;
549 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Access "
550 "for Fetch Buffer block %08p (cnt=%i)\n", inst->seqNum,
551 block_addr, (*buff_it)->cnt);
552 assert((*buff_it)->cnt >= 0);
553 } else {
554 std::list<FetchBlock*>::iterator block_it = findBlock(pendingFetch,
555 asid,
556 block_addr);
557 if (block_it != pendingFetch.end()) {
558 (*block_it)->cnt--;
559 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Access "
560 "for Pending Buffer Block %08p (cnt=%i)\n",
561 inst->seqNum,
562 block_addr, (*block_it)->cnt);
563 assert((*block_it)->cnt >= 0);
564 if ((*block_it)->cnt == 0) {
565 if ((*block_it)->block) {
566 delete [] (*block_it)->block;
567 }
568 delete *block_it;
569 pendingFetch.erase(block_it);
570 }
571 }
572 }
573
574 CacheUnit::squashCacheRequest(req_ptr);
575 }
576
577 void
578 FetchUnit::trap(Fault fault, ThreadID tid, DynInstPtr inst)
579 {
580 //@todo: per thread?
581 decoder[tid]->reset();
582
583 //@todo: squash using dummy inst seq num
584 squash(NULL, NumStages - 1, 0, tid);
585
586 //@todo: make sure no blocks are in use
587 assert(blocksInUse() == 0);
588 assert(pendingFetch.size() == 0);
589
590 //@todo: clear pendingFetch and fetchBuffer
591 clearFetchBuffer();
592 }