inorder: update pipeline interface for handling finished resource reqs
[gem5.git] / src / cpu / inorder / resources / fetch_unit.cc
1 /*
2 * Copyright (c) 2011 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/resources/fetch_unit.hh"
42 #include "cpu/inorder/pipeline_traits.hh"
43 #include "cpu/inorder/cpu.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "mem/request.hh"
46
47 using namespace std;
48 using namespace TheISA;
49 using namespace ThePipeline;
50
51 FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
52 int res_latency, InOrderCPU *_cpu,
53 ThePipeline::Params *params)
54 : CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
55 instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize),
56 predecoder(NULL)
57 { }
58
59 void
60 FetchUnit::createMachInst(std::list<FetchBlock*>::iterator fetch_it,
61 DynInstPtr inst)
62 {
63 ExtMachInst ext_inst;
64 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
65 Addr fetch_addr = inst->getMemAddr();
66 unsigned fetch_offset = (fetch_addr - block_addr) / instSize;
67 ThreadID tid = inst->readTid();
68 TheISA::PCState instPC = inst->pcState();
69
70
71 DPRINTF(InOrderCachePort, "Creating instruction [sn:%i] w/fetch data @"
72 "addr:%08p block:%08p\n", inst->seqNum, fetch_addr, block_addr);
73
74 assert((*fetch_it)->valid);
75
76 TheISA::MachInst *fetchInsts =
77 reinterpret_cast<TheISA::MachInst *>((*fetch_it)->block);
78
79 MachInst mach_inst =
80 TheISA::gtoh(fetchInsts[fetch_offset]);
81
82 predecoder.setTC(cpu->thread[tid]->getTC());
83 predecoder.moreBytes(instPC, inst->instAddr(), mach_inst);
84 ext_inst = predecoder.getExtMachInst(instPC);
85
86 inst->pcState(instPC);
87 inst->setMachInst(ext_inst);
88 }
89
90 int
91 FetchUnit::getSlot(DynInstPtr inst)
92 {
93 if (tlbBlocked[inst->threadNumber]) {
94 return -1;
95 }
96
97 if (!inst->validMemAddr()) {
98 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
99 "cache access\n", inst->readTid(), inst->seqNum);
100 }
101
102 int new_slot = Resource::getSlot(inst);
103
104 if (new_slot == -1)
105 return -1;
106
107 inst->memTime = curTick();
108 return new_slot;
109 }
110
111 void
112 FetchUnit::removeAddrDependency(DynInstPtr inst)
113 {
114 inst->unsetMemAddr();
115 }
116
117 ResReqPtr
118 FetchUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
119 int slot_num, unsigned cmd)
120 {
121 ScheduleEntry* sched_entry = *inst->curSkedEntry;
122 CacheRequest* cache_req = dynamic_cast<CacheRequest*>(reqs[slot_num]);
123
124 if (!inst->validMemAddr()) {
125 panic("Mem. Addr. must be set before requesting cache access\n");
126 }
127
128 assert(sched_entry->cmd == InitiateFetch);
129
130 DPRINTF(InOrderCachePort,
131 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
132 inst->readTid(), inst->seqNum, inst->getMemAddr());
133
134 cache_req->setRequest(inst, stage_num, id, slot_num,
135 sched_entry->cmd, MemCmd::ReadReq,
136 inst->curSkedEntry->idx);
137
138 return cache_req;
139 }
140
141 void
142 FetchUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
143 int acc_size, int flags)
144 {
145 ThreadID tid = inst->readTid();
146 Addr aligned_addr = cacheBlockAlign(inst->getMemAddr());
147
148 inst->fetchMemReq =
149 new Request(tid, aligned_addr, acc_size, flags,
150 inst->instAddr(), cpu->readCpuId(), tid);
151
152 cache_req->memReq = inst->fetchMemReq;
153 }
154
155 std::list<FetchUnit::FetchBlock*>::iterator
156 FetchUnit::findBlock(std::list<FetchBlock*> &fetch_blocks, int asid,
157 Addr block_addr)
158 {
159 std::list<FetchBlock*>::iterator fetch_it = fetch_blocks.begin();
160 std::list<FetchBlock*>::iterator end_it = fetch_blocks.end();
161
162 while (fetch_it != end_it) {
163 if ((*fetch_it)->asid == asid &&
164 (*fetch_it)->addr == block_addr) {
165 return fetch_it;
166 }
167
168 fetch_it++;
169 }
170
171 return fetch_it;
172 }
173
174 std::list<FetchUnit::FetchBlock*>::iterator
175 FetchUnit::findReplacementBlock()
176 {
177 std::list<FetchBlock*>::iterator fetch_it = fetchBuffer.begin();
178 std::list<FetchBlock*>::iterator end_it = fetchBuffer.end();
179
180 while (fetch_it != end_it) {
181 if ((*fetch_it)->cnt == 0) {
182 return fetch_it;
183 } else {
184 DPRINTF(InOrderCachePort, "Block %08p has %i insts pending.\n",
185 (*fetch_it)->addr, (*fetch_it)->cnt);
186 }
187 fetch_it++;
188 }
189
190 return fetch_it;
191 }
192
193 void
194 FetchUnit::markBlockUsed(std::list<FetchBlock*>::iterator block_it)
195 {
196 // Move block from whatever location it is in fetch buffer
197 // to the back (represents most-recently-used location)
198 if (block_it != fetchBuffer.end()) {
199 FetchBlock *mru_blk = *block_it;
200 fetchBuffer.erase(block_it);
201 fetchBuffer.push_back(mru_blk);
202 }
203 }
204
205 void
206 FetchUnit::execute(int slot_num)
207 {
208 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
209 assert(cache_req);
210
211 if (cachePortBlocked && cache_req->cmd == InitiateFetch) {
212 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
213 cache_req->done(false);
214 return;
215 }
216
217 DynInstPtr inst = cache_req->inst;
218 ThreadID tid = inst->readTid();
219 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
220 int asid = cpu->asid[tid];
221
222 inst->fault = NoFault;
223
224 switch (cache_req->cmd)
225 {
226 case InitiateFetch:
227 {
228 // Check to see if we've already got this request buffered
229 // or pending to be buffered
230 bool do_fetch = true;
231 std::list<FetchBlock*>::iterator pending_it;
232 pending_it = findBlock(pendingFetch, asid, block_addr);
233 if (pending_it != pendingFetch.end()) {
234 (*pending_it)->cnt++;
235 do_fetch = false;
236
237 DPRINTF(InOrderCachePort, "%08p is a pending fetch block "
238 "(pending:%i).\n", block_addr,
239 (*pending_it)->cnt);
240 } else if (pendingFetch.size() < fetchBuffSize) {
241 std::list<FetchBlock*>::iterator buff_it;
242 buff_it = findBlock(fetchBuffer, asid, block_addr);
243 if (buff_it != fetchBuffer.end()) {
244 (*buff_it)->cnt++;
245 do_fetch = false;
246
247 DPRINTF(InOrderCachePort, "%08p is in fetch buffer"
248 "(pending:%i).\n", block_addr, (*buff_it)->cnt);
249 }
250 }
251
252 if (!do_fetch) {
253 DPRINTF(InOrderCachePort, "Inst. [sn:%i] marked to be filled "
254 "through fetch buffer.\n", inst->seqNum);
255 cache_req->fetchBufferFill = true;
256 cache_req->setCompleted(true);
257 return;
258 }
259
260 // Check to see if there is room in the fetchbuffer for this instruction.
261 // If not, block this request.
262 if (pendingFetch.size() >= fetchBuffSize) {
263 DPRINTF(InOrderCachePort, "No room available in fetch buffer.\n");
264 cache_req->done();
265 return;
266 }
267
268 doTLBAccess(inst, cache_req, cacheBlkSize, 0, TheISA::TLB::Execute);
269
270 if (inst->fault == NoFault) {
271 DPRINTF(InOrderCachePort,
272 "[tid:%u]: Initiating fetch access to %s for "
273 "addr:%#x (block:%#x)\n", tid, name(),
274 cache_req->inst->getMemAddr(), block_addr);
275
276 cache_req->reqData = new uint8_t[cacheBlkSize];
277
278 inst->setCurResSlot(slot_num);
279
280 doCacheAccess(inst);
281
282 if (cache_req->isMemAccPending()) {
283 pendingFetch.push_back(new FetchBlock(asid, block_addr));
284 }
285 }
286
287 break;
288 }
289
290 case CompleteFetch:
291 if (cache_req->fetchBufferFill) {
292 // Block request if it's depending on a previous fetch, but it hasnt made it yet
293 std::list<FetchBlock*>::iterator fetch_it = findBlock(fetchBuffer, asid, block_addr);
294 if (fetch_it == fetchBuffer.end()) {
295 DPRINTF(InOrderCachePort, "%#x not available yet\n",
296 block_addr);
297 cache_req->setCompleted(false);
298 return;
299 }
300
301 // Make New Instruction
302 createMachInst(fetch_it, inst);
303 if (inst->traceData) {
304 inst->traceData->setStaticInst(inst->staticInst);
305 inst->traceData->setPC(inst->pcState());
306 }
307
308 // FetchBuffer Book-Keeping
309 (*fetch_it)->cnt--;
310 assert((*fetch_it)->cnt >= 0);
311 markBlockUsed(fetch_it);
312
313 cache_req->done();
314 return;
315 }
316
317 if (cache_req->isMemAccComplete()) {
318 if (fetchBuffer.size() >= fetchBuffSize) {
319 // If there is no replacement block, then we'll just have
320 // to wait till that gets cleared before satisfying the fetch
321 // for this instruction
322 std::list<FetchBlock*>::iterator repl_it =
323 findReplacementBlock();
324 if (repl_it == fetchBuffer.end()) {
325 DPRINTF(InOrderCachePort, "Unable to find replacement block"
326 " and complete fetch.\n");
327 cache_req->setCompleted(false);
328 return;
329 }
330
331 fetchBuffer.erase(repl_it);
332 }
333
334 DPRINTF(InOrderCachePort,
335 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
336 tid, inst->seqNum);
337
338 // Make New Instruction
339 std::list<FetchBlock*>::iterator fetch_it =
340 findBlock(pendingFetch, asid, block_addr);
341
342 assert(fetch_it != pendingFetch.end());
343 assert((*fetch_it)->valid);
344
345 createMachInst(fetch_it, inst);
346 if (inst->traceData) {
347 inst->traceData->setStaticInst(inst->staticInst);
348 inst->traceData->setPC(inst->pcState());
349 }
350
351
352 // Update instructions waiting on new fetch block
353 FetchBlock *new_block = (*fetch_it);
354 new_block->cnt--;
355 assert(new_block->cnt >= 0);
356
357 // Finally, update FetchBuffer w/Pending Block into the
358 // MRU location
359 pendingFetch.erase(fetch_it);
360 fetchBuffer.push_back(new_block);
361
362 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
363 tid, inst->seqNum,
364 inst->staticInst->disassemble(inst->instAddr()));
365
366 inst->unsetMemAddr();
367
368 delete cache_req->dataPkt;
369
370 cache_req->done();
371 } else {
372 DPRINTF(InOrderCachePort,
373 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
374 tid, inst->seqNum);
375 DPRINTF(InOrderStall,
376 "STALL: [tid:%i]: Fetch miss from %08p\n",
377 tid, cache_req->inst->instAddr());
378 cache_req->setCompleted(false);
379 // NOTE: For SwitchOnCacheMiss ThreadModel, we *don't* switch on
380 // fetch miss, but we could ...
381 // cache_req->setMemStall(true);
382 }
383 break;
384
385 default:
386 fatal("Unrecognized command to %s", resName);
387 }
388 }
389
390 void
391 FetchUnit::processCacheCompletion(PacketPtr pkt)
392 {
393 // Cast to correct packet type
394 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
395 assert(cache_pkt);
396
397 if (cache_pkt->cacheReq->isSquashed()) {
398 DPRINTF(InOrderCachePort,
399 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
400 cache_pkt->cacheReq->getInst()->readTid(),
401 cache_pkt->cacheReq->getInst()->seqNum);
402 DPRINTF(RefCount,
403 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
404 cache_pkt->cacheReq->getTid(),
405 cache_pkt->cacheReq->seqNum);
406
407 cache_pkt->cacheReq->done();
408 cache_pkt->cacheReq->freeSlot();
409 delete cache_pkt;
410
411 cpu->wakeCPU();
412 return;
413 }
414
415 Addr block_addr = cacheBlockAlign(cache_pkt->cacheReq->
416 getInst()->getMemAddr());
417
418 DPRINTF(InOrderCachePort,
419 "[tid:%u]: [sn:%i]: Waking from fetch access to addr:%#x(phys:%#x), size:%i\n",
420 cache_pkt->cacheReq->getInst()->readTid(),
421 cache_pkt->cacheReq->getInst()->seqNum,
422 block_addr, cache_pkt->getAddr(), cache_pkt->getSize());
423
424 // Cast to correct request type
425 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
426 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
427
428 if (!cache_req) {
429 panic("[tid:%u]: [sn:%i]: Can't find slot for fetch access to "
430 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
431 cache_pkt->cacheReq->getInst()->seqNum,
432 block_addr);
433 }
434
435 // Get resource request info
436 unsigned stage_num = cache_req->getStageNum();
437 DynInstPtr inst = cache_req->inst;
438 ThreadID tid = cache_req->inst->readTid();
439 short asid = cpu->asid[tid];
440
441 assert(!cache_req->isSquashed());
442 assert(inst->curSkedEntry->cmd == CompleteFetch);
443
444 DPRINTF(InOrderCachePort,
445 "[tid:%u]: [sn:%i]: Processing fetch access for block %#x\n",
446 tid, inst->seqNum, block_addr);
447
448 std::list<FetchBlock*>::iterator pend_it = findBlock(pendingFetch, asid,
449 block_addr);
450 assert(pend_it != pendingFetch.end());
451
452 // Copy Data to pendingFetch queue...
453 (*pend_it)->block = new uint8_t[cacheBlkSize];
454 memcpy((*pend_it)->block, cache_pkt->getPtr<uint8_t>(), cacheBlkSize);
455 (*pend_it)->valid = true;
456
457 cache_req->setMemAccPending(false);
458 cache_req->setMemAccCompleted();
459
460 if (cache_req->isMemStall() &&
461 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
462 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
463 tid);
464
465 cpu->activateContext(tid);
466
467 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
468 "miss.\n", tid);
469 }
470
471 // Wake up the CPU (if it went to sleep and was waiting on this
472 // completion event).
473 cpu->wakeCPU();
474
475 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
476 tid, cpu->pipelineStage[stage_num]->name());
477
478 cpu->switchToActive(stage_num);
479 }
480
481 void
482 FetchUnit::squashCacheRequest(CacheReqPtr req_ptr)
483 {
484 DynInstPtr inst = req_ptr->getInst();
485 ThreadID tid = inst->readTid();
486 Addr block_addr = cacheBlockAlign(inst->getMemAddr());
487 int asid = cpu->asid[tid];
488
489 // Check Fetch Buffer (or pending fetch) for this block and
490 // update pending counts
491 std::list<FetchBlock*>::iterator buff_it = findBlock(fetchBuffer,
492 asid,
493 block_addr);
494 if (buff_it != fetchBuffer.end()) {
495 (*buff_it)->cnt--;
496 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Fetch "
497 "for Buffer block %08p (cnt=%i)\n", inst->seqNum,
498 block_addr, (*buff_it)->cnt);
499 } else {
500 std::list<FetchBlock*>::iterator block_it = findBlock(pendingFetch,
501 asid,
502 block_addr);
503 if (block_it != pendingFetch.end()) {
504 (*block_it)->cnt--;
505 if ((*block_it)->cnt == 0) {
506 DPRINTF(InOrderCachePort, "[sn:%i] Removing Pending Fetch "
507 "for block %08p (cnt=%i)\n", inst->seqNum,
508 block_addr, (*block_it)->cnt);
509 pendingFetch.erase(block_it);
510 }
511 }
512 }
513
514 CacheUnit::squashCacheRequest(req_ptr);
515 }
516