includes: sort all includes
[gem5.git] / src / cpu / inorder / resources / fetch_unit.cc
1 /*
2 * Copyright (c) 2011 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <list>
33 #include <vector>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/predecoder.hh"
38 #include "arch/utility.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/resources/fetch_unit.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "mem/request.hh"
46
47 using namespace std;
48 using namespace TheISA;
49 using namespace ThePipeline;
50
51 FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
52 int res_latency, InOrderCPU *_cpu,
53 ThePipeline::Params *params)
54 : CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
55 instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize),
56 predecoder(NULL)
57 { }
58
59 FetchUnit::~FetchUnit()
60 {
61 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
62 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
63 while (fetch_it != end_it) {
64 delete (*fetch_it)->block;
65 delete *fetch_it;
66 fetch_it++;
67 }
68 fetchBuffer.clear();
69
70
71 std::list<FetchBlock*>::iterator pend_it = pendingFetch.begin();
72 std::list<FetchBlock*>::iterator pend_end = pendingFetch.end();
73 while (pend_it != pend_end) {
74 if ((*pend_it)->block) {
75 delete (*pend_it)->block;
76 }
77
78 delete *pend_it;
79 pend_it++;
80 }
81 pendingFetch.clear();
82 }
83
84 void
85 FetchUnit::createMachInst(std::list<FetchBlock*>::iterator fetch_it,
86 DynInstPtr inst)
87 {
88 ExtMachInst ext_inst;
89 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
90 Addr fetch_addr = inst->getMemAddr();
91 unsigned fetch_offset = (fetch_addr - block_addr) / instSize;
92 ThreadID tid = inst->readTid();
93 TheISA::PCState instPC = inst->pcState();
94
95
96 DPRINTF(InOrderCachePort, "Creating instruction [sn:%i] w/fetch data @"
97 "addr:%08p block:%08p\n", inst->seqNum, fetch_addr, block_addr);
98
99 assert((*fetch_it)->valid);
100
101 TheISA::MachInst *fetchInsts =
102 reinterpret_cast<TheISA::MachInst *>((*fetch_it)->block);
103
104 MachInst mach_inst =
105 TheISA::gtoh(fetchInsts[fetch_offset]);
106
107 predecoder.setTC(cpu->thread[tid]->getTC());
108 predecoder.moreBytes(instPC, inst->instAddr(), mach_inst);
109 ext_inst = predecoder.getExtMachInst(instPC);
110
111 inst->pcState(instPC);
112 inst->setMachInst(ext_inst);
113 }
114
115 int
116 FetchUnit::getSlot(DynInstPtr inst)
117 {
118 if (tlbBlocked[inst->threadNumber]) {
119 return -1;
120 }
121
122 if (!inst->validMemAddr()) {
123 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
124 "cache access\n", inst->readTid(), inst->seqNum);
125 }
126
127 int new_slot = Resource::getSlot(inst);
128
129 if (new_slot == -1)
130 return -1;
131
132 inst->memTime = curTick();
133 return new_slot;
134 }
135
136 void
137 FetchUnit::removeAddrDependency(DynInstPtr inst)
138 {
139 inst->unsetMemAddr();
140 }
141
142 ResReqPtr
143 FetchUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
144 int slot_num, unsigned cmd)
145 {
146 ScheduleEntry* sched_entry = *inst->curSkedEntry;
147 CacheRequest* cache_req = dynamic_cast<CacheRequest*>(reqs[slot_num]);
148
149 if (!inst->validMemAddr()) {
150 panic("Mem. Addr. must be set before requesting cache access\n");
151 }
152
153 assert(sched_entry->cmd == InitiateFetch);
154
155 DPRINTF(InOrderCachePort,
156 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
157 inst->readTid(), inst->seqNum, inst->getMemAddr());
158
159 cache_req->setRequest(inst, stage_num, id, slot_num,
160 sched_entry->cmd, MemCmd::ReadReq,
161 inst->curSkedEntry->idx);
162
163 return cache_req;
164 }
165
166 void
167 FetchUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
168 int acc_size, int flags)
169 {
170 ThreadID tid = inst->readTid();
171 Addr aligned_addr = cacheBlockAlign(inst->getMemAddr());
172
173 if (inst->fetchMemReq == NULL)
174 inst->fetchMemReq =
175 new Request(tid, aligned_addr, acc_size, flags,
176 inst->instAddr(), cpu->readCpuId(), tid);
177
178
179 cache_req->memReq = inst->fetchMemReq;
180 }
181
182 std::list<FetchUnit::FetchBlock*>::iterator
183 FetchUnit::findBlock(std::list<FetchBlock*> &fetch_blocks, int asid,
184 Addr block_addr)
185 {
186 std::list<FetchBlock*>::iterator fetch_it = fetch_blocks.begin();
187 std::list<FetchBlock*>::iterator end_it = fetch_blocks.end();
188
189 while (fetch_it != end_it) {
190 if ((*fetch_it)->asid == asid &&
191 (*fetch_it)->addr == block_addr) {
192 return fetch_it;
193 }
194
195 fetch_it++;
196 }
197
198 return fetch_it;
199 }
200
201 std::list<FetchUnit::FetchBlock*>::iterator
202 FetchUnit::findReplacementBlock()
203 {
204 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
205 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
206
207 while (fetch_it != end_it) {
208 if ((*fetch_it)->cnt == 0) {
209 return fetch_it;
210 } else {
211 DPRINTF(InOrderCachePort, "Block %08p has %i insts pending.\n",
212 (*fetch_it)->addr, (*fetch_it)->cnt);
213 }
214 fetch_it++;
215 }
216
217 return fetch_it;
218 }
219
220 void
221 FetchUnit::markBlockUsed(std::list<FetchBlock*>::iterator block_it)
222 {
223 // Move block from whatever location it is in fetch buffer
224 // to the back (represents most-recently-used location)
225 if (block_it != fetchBuffer.end()) {
226 FetchBlock *mru_blk = *block_it;
227 fetchBuffer.erase(block_it);
228 fetchBuffer.push_back(mru_blk);
229 }
230 }
231
232 void
233 FetchUnit::execute(int slot_num)
234 {
235 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
236 assert(cache_req);
237
238 if (cachePortBlocked && cache_req->cmd == InitiateFetch) {
239 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
240 cache_req->done(false);
241 return;
242 }
243
244 DynInstPtr inst = cache_req->inst;
245 ThreadID tid = inst->readTid();
246 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
247 int asid = cpu->asid[tid];
248
249 inst->fault = NoFault;
250
251 switch (cache_req->cmd)
252 {
253 case InitiateFetch:
254 {
255 // Check to see if we've already got this request buffered
256 // or pending to be buffered
257 bool do_fetch = true;
258 std::list<FetchBlock*>::iterator pending_it;
259 pending_it = findBlock(pendingFetch, asid, block_addr);
260 if (pending_it != pendingFetch.end()) {
261 (*pending_it)->cnt++;
262 do_fetch = false;
263
264 DPRINTF(InOrderCachePort, "%08p is a pending fetch block "
265 "(pending:%i).\n", block_addr,
266 (*pending_it)->cnt);
267 } else if (pendingFetch.size() < fetchBuffSize) {
268 std::list<FetchBlock*>::iterator buff_it;
269 buff_it = findBlock(fetchBuffer, asid, block_addr);
270 if (buff_it != fetchBuffer.end()) {
271 (*buff_it)->cnt++;
272 do_fetch = false;
273
274 DPRINTF(InOrderCachePort, "%08p is in fetch buffer"
275 "(pending:%i).\n", block_addr, (*buff_it)->cnt);
276 }
277 }
278
279 if (!do_fetch) {
280 DPRINTF(InOrderCachePort, "Inst. [sn:%i] marked to be filled "
281 "through fetch buffer.\n", inst->seqNum);
282 cache_req->fetchBufferFill = true;
283 cache_req->setCompleted(true);
284 return;
285 }
286
287 // Check to see if there is room in the fetchbuffer for this instruction.
288 // If not, block this request.
289 if (pendingFetch.size() >= fetchBuffSize) {
290 DPRINTF(InOrderCachePort, "No room available in fetch buffer.\n");
291 cache_req->done();
292 return;
293 }
294
295 doTLBAccess(inst, cache_req, cacheBlkSize, 0, TheISA::TLB::Execute);
296
297 if (inst->fault == NoFault) {
298 DPRINTF(InOrderCachePort,
299 "[tid:%u]: Initiating fetch access to %s for "
300 "addr:%#x (block:%#x)\n", tid, name(),
301 cache_req->inst->getMemAddr(), block_addr);
302
303 cache_req->reqData = new uint8_t[cacheBlkSize];
304
305 inst->setCurResSlot(slot_num);
306
307 doCacheAccess(inst);
308
309 if (cache_req->isMemAccPending()) {
310 pendingFetch.push_back(new FetchBlock(asid, block_addr));
311 }
312 }
313
314 break;
315 }
316
317 case CompleteFetch:
318 if (cache_req->fetchBufferFill) {
319 // Block request if it's depending on a previous fetch, but it hasnt made it yet
320 std::list<FetchBlock*>::iterator fetch_it = findBlock(fetchBuffer, asid, block_addr);
321 if (fetch_it == fetchBuffer.end()) {
322 DPRINTF(InOrderCachePort, "%#x not available yet\n",
323 block_addr);
324 cache_req->setCompleted(false);
325 return;
326 }
327
328 // Make New Instruction
329 createMachInst(fetch_it, inst);
330 if (inst->traceData) {
331 inst->traceData->setStaticInst(inst->staticInst);
332 inst->traceData->setPC(inst->pcState());
333 }
334
335 // FetchBuffer Book-Keeping
336 (*fetch_it)->cnt--;
337 assert((*fetch_it)->cnt >= 0);
338 markBlockUsed(fetch_it);
339
340 cache_req->done();
341 return;
342 }
343
344 if (cache_req->isMemAccComplete()) {
345 if (fetchBuffer.size() >= fetchBuffSize) {
346 // If there is no replacement block, then we'll just have
347 // to wait till that gets cleared before satisfying the fetch
348 // for this instruction
349 std::list<FetchBlock*>::iterator repl_it =
350 findReplacementBlock();
351 if (repl_it == fetchBuffer.end()) {
352 DPRINTF(InOrderCachePort, "Unable to find replacement block"
353 " and complete fetch.\n");
354 cache_req->setCompleted(false);
355 return;
356 }
357
358 delete [] (*repl_it)->block;
359 delete *repl_it;
360 fetchBuffer.erase(repl_it);
361 }
362
363 DPRINTF(InOrderCachePort,
364 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
365 tid, inst->seqNum);
366
367 // Make New Instruction
368 std::list<FetchBlock*>::iterator fetch_it =
369 findBlock(pendingFetch, asid, block_addr);
370
371 assert(fetch_it != pendingFetch.end());
372 assert((*fetch_it)->valid);
373
374 createMachInst(fetch_it, inst);
375 if (inst->traceData) {
376 inst->traceData->setStaticInst(inst->staticInst);
377 inst->traceData->setPC(inst->pcState());
378 }
379
380
381 // Update instructions waiting on new fetch block
382 FetchBlock *new_block = (*fetch_it);
383 new_block->cnt--;
384 assert(new_block->cnt >= 0);
385
386 // Finally, update FetchBuffer w/Pending Block into the
387 // MRU location
388 pendingFetch.erase(fetch_it);
389 fetchBuffer.push_back(new_block);
390
391 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
392 tid, inst->seqNum,
393 inst->staticInst->disassemble(inst->instAddr()));
394
395 inst->unsetMemAddr();
396
397 delete cache_req->dataPkt;
398
399 cache_req->done();
400 } else {
401 DPRINTF(InOrderCachePort,
402 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
403 tid, inst->seqNum);
404 DPRINTF(InOrderStall,
405 "STALL: [tid:%i]: Fetch miss from %08p\n",
406 tid, cache_req->inst->instAddr());
407 cache_req->setCompleted(false);
408 // NOTE: For SwitchOnCacheMiss ThreadModel, we *don't* switch on
409 // fetch miss, but we could ...
410 // cache_req->setMemStall(true);
411 }
412 break;
413
414 default:
415 fatal("Unrecognized command to %s", resName);
416 }
417 }
418
419 void
420 FetchUnit::processCacheCompletion(PacketPtr pkt)
421 {
422 // Cast to correct packet type
423 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
424 assert(cache_pkt);
425
426 if (cache_pkt->cacheReq->isSquashed()) {
427 DPRINTF(InOrderCachePort,
428 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
429 cache_pkt->cacheReq->getInst()->readTid(),
430 cache_pkt->cacheReq->getInst()->seqNum);
431 DPRINTF(RefCount,
432 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
433 cache_pkt->cacheReq->getTid(),
434 cache_pkt->cacheReq->seqNum);
435
436 cache_pkt->cacheReq->done();
437 cache_pkt->cacheReq->freeSlot();
438 delete cache_pkt;
439
440 cpu->wakeCPU();
441 return;
442 }
443
444 Addr block_addr = cacheBlockAlign(cache_pkt->cacheReq->
445 getInst()->getMemAddr());
446
447 DPRINTF(InOrderCachePort,
448 "[tid:%u]: [sn:%i]: Waking from fetch access to addr:%#x(phys:%#x), size:%i\n",
449 cache_pkt->cacheReq->getInst()->readTid(),
450 cache_pkt->cacheReq->getInst()->seqNum,
451 block_addr, cache_pkt->getAddr(), cache_pkt->getSize());
452
453 // Cast to correct request type
454 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
455 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
456
457 if (!cache_req) {
458 panic("[tid:%u]: [sn:%i]: Can't find slot for fetch access to "
459 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
460 cache_pkt->cacheReq->getInst()->seqNum,
461 block_addr);
462 }
463
464 // Get resource request info
465 unsigned stage_num = cache_req->getStageNum();
466 DynInstPtr inst = cache_req->inst;
467 ThreadID tid = cache_req->inst->readTid();
468 short asid = cpu->asid[tid];
469
470 assert(!cache_req->isSquashed());
471 assert(inst->curSkedEntry->cmd == CompleteFetch);
472
473 DPRINTF(InOrderCachePort,
474 "[tid:%u]: [sn:%i]: Processing fetch access for block %#x\n",
475 tid, inst->seqNum, block_addr);
476
477 std::list<FetchBlock*>::iterator pend_it = findBlock(pendingFetch, asid,
478 block_addr);
479 assert(pend_it != pendingFetch.end());
480
481 // Copy Data to pendingFetch queue...
482 (*pend_it)->block = new uint8_t[cacheBlkSize];
483 memcpy((*pend_it)->block, cache_pkt->getPtr<uint8_t>(), cacheBlkSize);
484 (*pend_it)->valid = true;
485
486 cache_req->setMemAccPending(false);
487 cache_req->setMemAccCompleted();
488
489 if (cache_req->isMemStall() &&
490 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
491 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
492 tid);
493
494 cpu->activateContext(tid);
495
496 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
497 "miss.\n", tid);
498 }
499
500 // Wake up the CPU (if it went to sleep and was waiting on this
501 // completion event).
502 cpu->wakeCPU();
503
504 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
505 tid, cpu->pipelineStage[stage_num]->name());
506
507 cpu->switchToActive(stage_num);
508 }
509
510 void
511 FetchUnit::squashCacheRequest(CacheReqPtr req_ptr)
512 {
513 DynInstPtr inst = req_ptr->getInst();
514 ThreadID tid = inst->readTid();
515 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
516 int asid = cpu->asid[tid];
517
518 // Check Fetch Buffer (or pending fetch) for this block and
519 // update pending counts
520 std::list<FetchBlock*>::iterator buff_it = findBlock(fetchBuffer,
521 asid,
522 block_addr);
523 if (buff_it != fetchBuffer.end()) {
524 (*buff_it)->cnt--;
525 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Fetch "
526 "for Buffer block %08p (cnt=%i)\n", inst->seqNum,
527 block_addr, (*buff_it)->cnt);
528 } else {
529 std::list<FetchBlock*>::iterator block_it = findBlock(pendingFetch,
530 asid,
531 block_addr);
532 if (block_it != pendingFetch.end()) {
533 (*block_it)->cnt--;
534 if ((*block_it)->cnt == 0) {
535 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Fetch "
536 "for block %08p (cnt=%i)\n", inst->seqNum,
537 block_addr, (*block_it)->cnt);
538 if ((*block_it)->block) {
539 delete [] (*block_it)->block;
540 }
541 delete *block_it;
542 pendingFetch.erase(block_it);
543 }
544 }
545 }
546
547 CacheUnit::squashCacheRequest(req_ptr);
548 }
549