inorder: add a fetch buffer to fetch unit
[gem5.git] / src / cpu / inorder / resources / fetch_unit.cc
1 /*
2 * Copyright (c) 2011 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/resources/fetch_unit.hh"
42 #include "cpu/inorder/pipeline_traits.hh"
43 #include "cpu/inorder/cpu.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "mem/request.hh"
46
47 using namespace std;
48 using namespace TheISA;
49 using namespace ThePipeline;
50
51 FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
52 int res_latency, InOrderCPU *_cpu,
53 ThePipeline::Params *params)
54 : CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
55 instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize),
56 predecoder(NULL)
57 { }
58
59 void
60 FetchUnit::createMachInst(std::list<FetchBlock*>::iterator fetch_it,
61 DynInstPtr inst)
62 {
63 ExtMachInst ext_inst;
64 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
65 Addr fetch_addr = inst->getMemAddr();
66 unsigned fetch_offset = (fetch_addr - block_addr) / instSize;
67 ThreadID tid = inst->readTid();
68 TheISA::PCState instPC = inst->pcState();
69
70
71 DPRINTF(InOrderCachePort, "Creating instruction [sn:%i] w/fetch data @"
72 "addr:%08p block:%08p\n", inst->seqNum, fetch_addr, block_addr);
73
74 assert((*fetch_it)->valid);
75
76 TheISA::MachInst *fetchInsts =
77 reinterpret_cast<TheISA::MachInst *>((*fetch_it)->block);
78
79 MachInst mach_inst =
80 TheISA::gtoh(fetchInsts[fetch_offset]);
81
82 predecoder.setTC(cpu->thread[tid]->getTC());
83 predecoder.moreBytes(instPC, inst->instAddr(), mach_inst);
84 ext_inst = predecoder.getExtMachInst(instPC);
85
86 inst->pcState(instPC);
87 inst->setMachInst(ext_inst);
88 }
89
90 int
91 FetchUnit::getSlot(DynInstPtr inst)
92 {
93 if (tlbBlocked[inst->threadNumber]) {
94 return -1;
95 }
96
97 if (!inst->validMemAddr()) {
98 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
99 "cache access\n", inst->readTid(), inst->seqNum);
100 }
101
102 int new_slot = Resource::getSlot(inst);
103
104 if (new_slot == -1)
105 return -1;
106
107 inst->memTime = curTick();
108 return new_slot;
109 }
110
111 void
112 FetchUnit::removeAddrDependency(DynInstPtr inst)
113 {
114 inst->unsetMemAddr();
115 }
116
117 ResReqPtr
118 FetchUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
119 int slot_num, unsigned cmd)
120 {
121 ScheduleEntry* sched_entry = inst->resSched.top();
122
123 if (!inst->validMemAddr()) {
124 panic("Mem. Addr. must be set before requesting cache access\n");
125 }
126
127 MemCmd::Command pkt_cmd;
128
129 switch (sched_entry->cmd)
130 {
131 case InitiateFetch:
132 pkt_cmd = MemCmd::ReadReq;
133
134 DPRINTF(InOrderCachePort,
135 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
136 inst->readTid(), inst->seqNum, inst->getMemAddr());
137 break;
138
139 default:
140 panic("%i: Unexpected request type (%i) to %s", curTick(),
141 sched_entry->cmd, name());
142 }
143
144 return new CacheRequest(this, inst, stage_num, id, slot_num,
145 sched_entry->cmd, 0, pkt_cmd,
146 0/*flags*/, this->cpu->readCpuId(),
147 inst->resSched.top()->idx);
148 }
149
150 void
151 FetchUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
152 int acc_size, int flags)
153 {
154 ThreadID tid = inst->readTid();
155 Addr aligned_addr = cacheBlockAlign(inst->getMemAddr());
156
157 inst->fetchMemReq =
158 new Request(tid, aligned_addr, acc_size, flags,
159 inst->instAddr(), cpu->readCpuId(), tid);
160
161 cache_req->memReq = inst->fetchMemReq;
162 }
163
164 std::list<FetchUnit::FetchBlock*>::iterator
165 FetchUnit::findBlock(std::list<FetchBlock*> &fetch_blocks, int asid,
166 Addr block_addr)
167 {
168 std::list<FetchBlock*>::iterator fetch_it = fetch_blocks.begin();
169 std::list<FetchBlock*>::iterator end_it = fetch_blocks.end();
170
171 while (fetch_it != end_it) {
172 if ((*fetch_it)->asid == asid &&
173 (*fetch_it)->addr == block_addr) {
174 return fetch_it;
175 }
176
177 fetch_it++;
178 }
179
180 return fetch_it;
181 }
182
183 std::list<FetchUnit::FetchBlock*>::iterator
184 FetchUnit::findReplacementBlock()
185 {
186 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
187 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
188
189 while (fetch_it != end_it) {
190 if ((*fetch_it)->cnt == 0) {
191 return fetch_it;
192 } else {
193 DPRINTF(InOrderCachePort, "Block %08p has %i insts pending.\n",
194 (*fetch_it)->addr, (*fetch_it)->cnt);
195 }
196 fetch_it++;
197 }
198
199 return fetch_it;
200 }
201
202 void
203 FetchUnit::markBlockUsed(std::list<FetchBlock*>::iterator block_it)
204 {
205 // Move block from whatever location it is in fetch buffer
206 // to the back (represents most-recently-used location)
207 if (block_it != fetchBuffer.end()) {
208 FetchBlock *mru_blk = *block_it;
209 fetchBuffer.erase(block_it);
210 fetchBuffer.push_back(mru_blk);
211 }
212 }
213
214 void
215 FetchUnit::execute(int slot_num)
216 {
217 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
218 assert(cache_req);
219
220 if (cachePortBlocked) {
221 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
222 cache_req->setCompleted(false);
223 return;
224 }
225
226 DynInstPtr inst = cache_req->inst;
227 ThreadID tid = inst->readTid();
228 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
229 int asid = cpu->asid[tid];
230 cache_req->fault = NoFault;
231
232 switch (cache_req->cmd)
233 {
234 case InitiateFetch:
235 {
236 // Check to see if we've already got this request buffered
237 // or pending to be buffered
238 bool do_fetch = true;
239 std::list<FetchBlock*>::iterator pending_it;
240 pending_it = findBlock(pendingFetch, asid, block_addr);
241 if (pending_it != pendingFetch.end()) {
242 (*pending_it)->cnt++;
243 do_fetch = false;
244
245 DPRINTF(InOrderCachePort, "%08p is a pending fetch block "
246 "(pending:%i).\n", block_addr,
247 (*pending_it)->cnt);
248 } else if (pendingFetch.size() < fetchBuffSize) {
249 std::list<FetchBlock*>::iterator buff_it;
250 buff_it = findBlock(fetchBuffer, asid, block_addr);
251 if (buff_it != fetchBuffer.end()) {
252 (*buff_it)->cnt++;
253 do_fetch = false;
254
255 DPRINTF(InOrderCachePort, "%08p is in fetch buffer"
256 "(pending:%i).\n", block_addr, (*buff_it)->cnt);
257 }
258 }
259
260 if (!do_fetch) {
261 DPRINTF(InOrderCachePort, "Inst. [sn:%i] marked to be filled "
262 "through fetch buffer.\n", inst->seqNum);
263 cache_req->fetchBufferFill = true;
264 cache_req->setCompleted(true);
265 return;
266 }
267
268 // Check to see if there is room in the fetchbuffer for this instruction.
269 // If not, block this request.
270 if (pendingFetch.size() >= fetchBuffSize) {
271 DPRINTF(InOrderCachePort, "No room available in fetch buffer.\n");
272 cache_req->setCompleted(false);
273 return;
274 }
275
276 doTLBAccess(inst, cache_req, cacheBlkSize, 0, TheISA::TLB::Execute);
277
278 if (cache_req->fault == NoFault) {
279 DPRINTF(InOrderCachePort,
280 "[tid:%u]: Initiating fetch access to %s for "
281 "addr:%#x (block:%#x)\n", tid, name(),
282 cache_req->inst->getMemAddr(), block_addr);
283
284 cache_req->reqData = new uint8_t[cacheBlkSize];
285
286 inst->setCurResSlot(slot_num);
287
288 doCacheAccess(inst);
289
290 if (cache_req->isMemAccPending()) {
291 pendingFetch.push_back(new FetchBlock(asid, block_addr));
292 }
293 }
294
295 break;
296 }
297
298 case CompleteFetch:
299 if (cache_req->fetchBufferFill) {
300 // Block request if it's depending on a previous fetch, but it hasnt made it yet
301 std::list<FetchBlock*>::iterator fetch_it = findBlock(fetchBuffer, asid, block_addr);
302 if (fetch_it == fetchBuffer.end()) {
303 DPRINTF(InOrderCachePort, "%#x not available yet\n",
304 block_addr);
305 cache_req->setCompleted(false);
306 return;
307 }
308
309 // Make New Instruction
310 createMachInst(fetch_it, inst);
311 if (inst->traceData) {
312 inst->traceData->setStaticInst(inst->staticInst);
313 inst->traceData->setPC(inst->pcState());
314 }
315
316 // FetchBuffer Book-Keeping
317 (*fetch_it)->cnt--;
318 assert((*fetch_it)->cnt >= 0);
319 markBlockUsed(fetch_it);
320
321 cache_req->done();
322 return;
323 }
324
325 if (cache_req->isMemAccComplete()) {
326 if (fetchBuffer.size() >= fetchBuffSize) {
327 // If there is no replacement block, then we'll just have
328 // to wait till that gets cleared before satisfying the fetch
329 // for this instruction
330 std::list<FetchBlock*>::iterator repl_it =
331 findReplacementBlock();
332 if (repl_it == fetchBuffer.end()) {
333 DPRINTF(InOrderCachePort, "Unable to find replacement block"
334 " and complete fetch.\n");
335 cache_req->setCompleted(false);
336 return;
337 }
338
339 fetchBuffer.erase(repl_it);
340 }
341
342 DPRINTF(InOrderCachePort,
343 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
344 tid, inst->seqNum);
345
346 // Make New Instruction
347 std::list<FetchBlock*>::iterator fetch_it =
348 findBlock(pendingFetch, asid, block_addr);
349
350 assert(fetch_it != pendingFetch.end());
351 assert((*fetch_it)->valid);
352
353 createMachInst(fetch_it, inst);
354 if (inst->traceData) {
355 inst->traceData->setStaticInst(inst->staticInst);
356 inst->traceData->setPC(inst->pcState());
357 }
358
359
360 // Update instructions waiting on new fetch block
361 FetchBlock *new_block = (*fetch_it);
362 new_block->cnt--;
363 assert(new_block->cnt >= 0);
364
365 // Finally, update FetchBuffer w/Pending Block into the
366 // MRU location
367 pendingFetch.erase(fetch_it);
368 fetchBuffer.push_back(new_block);
369
370 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
371 tid, inst->seqNum,
372 inst->staticInst->disassemble(inst->instAddr()));
373
374 inst->unsetMemAddr();
375
376 delete cache_req->dataPkt;
377
378 cache_req->done();
379 } else {
380 DPRINTF(InOrderCachePort,
381 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
382 tid, inst->seqNum);
383 DPRINTF(InOrderStall,
384 "STALL: [tid:%i]: Fetch miss from %08p\n",
385 tid, cache_req->inst->instAddr());
386 cache_req->setCompleted(false);
387 // NOTE: For SwitchOnCacheMiss ThreadModel, we *don't* switch on
388 // fetch miss, but we could ...
389 // cache_req->setMemStall(true);
390 }
391 break;
392
393 default:
394 fatal("Unrecognized command to %s", resName);
395 }
396 }
397
398 void
399 FetchUnit::processCacheCompletion(PacketPtr pkt)
400 {
401 // Cast to correct packet type
402 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
403 assert(cache_pkt);
404
405 if (cache_pkt->cacheReq->isSquashed()) {
406 DPRINTF(InOrderCachePort,
407 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
408 cache_pkt->cacheReq->getInst()->readTid(),
409 cache_pkt->cacheReq->getInst()->seqNum);
410 DPRINTF(RefCount,
411 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
412 cache_pkt->cacheReq->getTid(),
413 cache_pkt->cacheReq->seqNum);
414
415 cache_pkt->cacheReq->done();
416 delete cache_pkt;
417
418 cpu->wakeCPU();
419 return;
420 }
421
422 Addr block_addr = cacheBlockAlign(cache_pkt->cacheReq->
423 getInst()->getMemAddr());
424
425 DPRINTF(InOrderCachePort,
426 "[tid:%u]: [sn:%i]: Waking from fetch access to addr:%#x(phys:%#x), size:%i\n",
427 cache_pkt->cacheReq->getInst()->readTid(),
428 cache_pkt->cacheReq->getInst()->seqNum,
429 block_addr, cache_pkt->getAddr(), cache_pkt->getSize());
430
431 // Cast to correct request type
432 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
433 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
434
435 if (!cache_req) {
436 panic("[tid:%u]: [sn:%i]: Can't find slot for fetch access to "
437 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
438 cache_pkt->cacheReq->getInst()->seqNum,
439 block_addr);
440 }
441
442 // Get resource request info
443 unsigned stage_num = cache_req->getStageNum();
444 DynInstPtr inst = cache_req->inst;
445 ThreadID tid = cache_req->inst->readTid();
446 short asid = cpu->asid[tid];
447
448 assert(!cache_req->isSquashed());
449 assert(inst->resSched.top()->cmd == CompleteFetch);
450
451 DPRINTF(InOrderCachePort,
452 "[tid:%u]: [sn:%i]: Processing fetch access for block %#x\n",
453 tid, inst->seqNum, block_addr);
454
455 std::list<FetchBlock*>::iterator pend_it = findBlock(pendingFetch, asid,
456 block_addr);
457 assert(pend_it != pendingFetch.end());
458
459 // Copy Data to pendingFetch queue...
460 (*pend_it)->block = new uint8_t[cacheBlkSize];
461 memcpy((*pend_it)->block, cache_pkt->getPtr<uint8_t>(), cacheBlkSize);
462 (*pend_it)->valid = true;
463
464 cache_req->setMemAccPending(false);
465 cache_req->setMemAccCompleted();
466
467 if (cache_req->isMemStall() &&
468 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
469 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
470 tid);
471
472 cpu->activateContext(tid);
473
474 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
475 "miss.\n", tid);
476 }
477
478 // Wake up the CPU (if it went to sleep and was waiting on this
479 // completion event).
480 cpu->wakeCPU();
481
482 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
483 tid, cpu->pipelineStage[stage_num]->name());
484
485 cpu->switchToActive(stage_num);
486 }
487
488 void
489 FetchUnit::squashCacheRequest(CacheReqPtr req_ptr)
490 {
491 DynInstPtr inst = req_ptr->getInst();
492 ThreadID tid = inst->readTid();
493 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
494 int asid = cpu->asid[tid];
495
496 // Check Fetch Buffer (or pending fetch) for this block and
497 // update pending counts
498 std::list<FetchBlock*>::iterator buff_it = findBlock(fetchBuffer,
499 asid,
500 block_addr);
501 if (buff_it != fetchBuffer.end()) {
502 (*buff_it)->cnt--;
503 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Fetch "
504 "for Buffer block %08p (cnt=%i)\n", inst->seqNum,
505 block_addr, (*buff_it)->cnt);
506 } else {
507 std::list<FetchBlock*>::iterator block_it = findBlock(pendingFetch,
508 asid,
509 block_addr);
510 if (block_it != pendingFetch.end()) {
511 (*block_it)->cnt--;
512 if ((*block_it)->cnt == 0) {
513 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Fetch "
514 "for block %08p (cnt=%i)\n", inst->seqNum,
515 block_addr, (*block_it)->cnt);
516 pendingFetch.erase(block_it);
517 }
518 }
519 }
520
521 CacheUnit::squashCacheRequest(req_ptr);
522 }
523