inorder: add updatePC event to resPool
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/pipeline_traits.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "mem/request.hh"
44
45 using namespace std;
46 using namespace TheISA;
47 using namespace ThePipeline;
48
49 Tick
50 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
51 {
52 panic("CacheUnit::CachePort doesn't expect recvAtomic callback!");
53 return curTick;
54 }
55
56 void
57 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
58 {
59 panic("CacheUnit::CachePort doesn't expect recvFunctional callback!");
60 }
61
62 void
63 CacheUnit::CachePort::recvStatusChange(Status status)
64 {
65 if (status == RangeChange)
66 return;
67
68 panic("CacheUnit::CachePort doesn't expect recvStatusChange callback!");
69 }
70
71 bool
72 CacheUnit::CachePort::recvTiming(Packet *pkt)
73 {
74 cachePortUnit->processCacheCompletion(pkt);
75 return true;
76 }
77
78 void
79 CacheUnit::CachePort::recvRetry()
80 {
81 cachePortUnit->recvRetry();
82 }
83
84 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
85 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
86 : Resource(res_name, res_id, res_width, res_latency, _cpu),
87 retryPkt(NULL), retrySlot(-1), cacheBlocked(false),
88 predecoder(NULL)
89 {
90 cachePort = new CachePort(this);
91
92 // Hard-Code Selection For Now
93 if (res_name == "icache_port")
94 _tlb = params->itb;
95 else if (res_name == "dcache_port")
96 _tlb = params->dtb;
97 else
98 fatal("Unrecognized TLB name passed by user");
99
100 for (int i=0; i < MaxThreads; i++) {
101 tlbBlocked[i] = false;
102 }
103 }
104
105 TheISA::TLB*
106 CacheUnit::tlb()
107 {
108 return _tlb;
109
110 }
111
112 Port *
113 CacheUnit::getPort(const string &if_name, int idx)
114 {
115 if (if_name == resName)
116 return cachePort;
117 else
118 return NULL;
119 }
120
121 void
122 CacheUnit::init()
123 {
124 // Currently Used to Model TLB Latency. Eventually
125 // Switch to Timing TLB translations.
126 resourceEvent = new CacheUnitEvent[width];
127
128 initSlots();
129 }
130
131 int
132 CacheUnit::getSlot(DynInstPtr inst)
133 {
134 ThreadID tid = inst->readTid();
135
136 if (tlbBlocked[inst->threadNumber]) {
137 return -1;
138 }
139
140 if (!inst->validMemAddr()) {
141 panic("Mem. Addr. must be set before requesting cache access\n");
142 }
143
144 Addr req_addr = inst->getMemAddr();
145
146 if (resName == "icache_port" ||
147 find(addrList[tid].begin(), addrList[tid].end(), req_addr) == addrList[tid].end()) {
148
149 int new_slot = Resource::getSlot(inst);
150
151 if (new_slot == -1)
152 return -1;
153
154 inst->memTime = curTick;
155 addrList[tid].push_back(req_addr);
156 addrMap[tid][req_addr] = inst->seqNum;
157 DPRINTF(InOrderCachePort,
158 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
159 inst->readTid(), inst->seqNum, req_addr);
160 return new_slot;
161 } else {
162 DPRINTF(InOrderCachePort,
163 "[tid:%i] Denying request because there is an outstanding"
164 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
165 inst->readTid(), req_addr, addrMap[tid][req_addr], inst->memTime);
166 return -1;
167 }
168 }
169
170 void
171 CacheUnit::freeSlot(int slot_num)
172 {
173 ThreadID tid = reqMap[slot_num]->inst->readTid();
174
175 vector<Addr>::iterator vect_it = find(addrList[tid].begin(), addrList[tid].end(),
176 reqMap[slot_num]->inst->getMemAddr());
177 assert(vect_it != addrList[tid].end());
178
179 DPRINTF(InOrderCachePort,
180 "[tid:%i]: Address %08p removed from dependency list\n",
181 reqMap[slot_num]->inst->readTid(), (*vect_it));
182
183 addrList[tid].erase(vect_it);
184
185 Resource::freeSlot(slot_num);
186 }
187
188 ResReqPtr
189 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
190 int slot_num, unsigned cmd)
191 {
192 ScheduleEntry* sched_entry = inst->resSched.top();
193
194 if (!inst->validMemAddr()) {
195 panic("Mem. Addr. must be set before requesting cache access\n");
196 }
197
198 MemCmd::Command pkt_cmd;
199
200 switch (sched_entry->cmd)
201 {
202 case InitiateReadData:
203 pkt_cmd = MemCmd::ReadReq;
204
205 DPRINTF(InOrderCachePort,
206 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
207 inst->readTid(), inst->seqNum, inst->getMemAddr());
208 break;
209
210 case InitiateWriteData:
211 pkt_cmd = MemCmd::WriteReq;
212
213 DPRINTF(InOrderCachePort,
214 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
215 inst->readTid(), inst->seqNum, inst->getMemAddr());
216 break;
217
218 case InitiateFetch:
219 pkt_cmd = MemCmd::ReadReq;
220
221 DPRINTF(InOrderCachePort,
222 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
223 inst->readTid(), inst->seqNum, inst->getMemAddr());
224 break;
225
226 default:
227 panic("%i: Unexpected request type (%i) to %s", curTick,
228 sched_entry->cmd, name());
229 }
230
231 return new CacheRequest(this, inst, stage_num, id, slot_num,
232 sched_entry->cmd, 0, pkt_cmd,
233 0/*flags*/, this->cpu->readCpuId());
234 }
235
236 void
237 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
238 {
239 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
240 assert(cache_req);
241
242 // Check to see if this instruction is requesting the same command
243 // or a different one
244 if (cache_req->cmd != inst->resSched.top()->cmd) {
245 // If different, then update command in the request
246 cache_req->cmd = inst->resSched.top()->cmd;
247 DPRINTF(InOrderCachePort,
248 "[tid:%i]: [sn:%i]: Updating the command for this "
249 "instruction\n ", inst->readTid(), inst->seqNum);
250
251 service_request = true;
252 } else {
253 // If same command, just check to see if memory access was completed
254 // but dont try to re-execute
255 DPRINTF(InOrderCachePort,
256 "[tid:%i]: [sn:%i]: requesting this resource again\n",
257 inst->readTid(), inst->seqNum);
258
259 service_request = true;
260 }
261 }
262
263 Fault
264 CacheUnit::doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
265 int flags, TheISA::TLB::Mode tlb_mode)
266 {
267 ThreadID tid = inst->readTid();
268 Addr aligned_addr = inst->getMemAddr();
269 unsigned stage_num = cache_req->getStageNum();
270 unsigned slot_idx = cache_req->getSlot();
271
272 if (tlb_mode == TheISA::TLB::Execute) {
273 inst->fetchMemReq = new Request(inst->readTid(), aligned_addr,
274 acc_size, flags, inst->readPC(),
275 cpu->readCpuId(), inst->readTid());
276 cache_req->memReq = inst->fetchMemReq;
277 } else {
278 inst->dataMemReq = new Request(inst->readTid(), aligned_addr,
279 acc_size, flags, inst->readPC(),
280 cpu->readCpuId(), inst->readTid());
281 cache_req->memReq = inst->dataMemReq;
282 }
283
284
285 cache_req->fault =
286 _tlb->translateAtomic(cache_req->memReq,
287 cpu->thread[tid]->getTC(), tlb_mode);
288
289 if (cache_req->fault != NoFault) {
290 DPRINTF(InOrderTLB, "[tid:%i]: %s encountered while translating "
291 "addr:%08p for [sn:%i].\n", tid, cache_req->fault->name(),
292 cache_req->memReq->getVaddr(), inst->seqNum);
293
294 cpu->pipelineStage[stage_num]->setResStall(cache_req, tid);
295
296 tlbBlocked[tid] = true;
297
298 cache_req->tlbStall = true;
299
300 scheduleEvent(slot_idx, 1);
301
302 cpu->trap(cache_req->fault, tid);
303 } else {
304 DPRINTF(InOrderTLB, "[tid:%i]: [sn:%i] virt. addr %08p translated "
305 "to phys. addr:%08p.\n", tid, inst->seqNum,
306 cache_req->memReq->getVaddr(),
307 cache_req->memReq->getPaddr());
308 }
309
310 return cache_req->fault;
311 }
312
313 template <class T>
314 Fault
315 CacheUnit::read(DynInstPtr inst, Addr addr, T &data, unsigned flags)
316 {
317 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
318 assert(cache_req);
319
320 int acc_size = sizeof(T);
321 doTLBAccess(inst, cache_req, acc_size, flags, TheISA::TLB::Read);
322
323 if (cache_req->fault == NoFault) {
324 cache_req->reqData = new uint8_t[acc_size];
325 doCacheAccess(inst, NULL);
326 }
327
328 return cache_req->fault;
329 }
330
331 template <class T>
332 Fault
333 CacheUnit::write(DynInstPtr inst, T data, Addr addr, unsigned flags,
334 uint64_t *write_res)
335 {
336 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
337 assert(cache_req);
338
339 int acc_size = sizeof(T);
340 doTLBAccess(inst, cache_req, acc_size, flags, TheISA::TLB::Write);
341
342 if (cache_req->fault == NoFault) {
343 cache_req->reqData = new uint8_t[acc_size];
344 doCacheAccess(inst, write_res);
345 }
346
347 return cache_req->fault;
348 }
349
350
351 void
352 CacheUnit::execute(int slot_num)
353 {
354 if (cacheBlocked) {
355 DPRINTF(InOrderCachePort, "Cache Blocked. Cannot Access\n");
356 return;
357 }
358
359 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
360 assert(cache_req);
361
362 DynInstPtr inst = cache_req->inst;
363 #if TRACING_ON
364 ThreadID tid = inst->readTid();
365 int seq_num = inst->seqNum;
366 #endif
367
368 cache_req->fault = NoFault;
369
370 switch (cache_req->cmd)
371 {
372 case InitiateFetch:
373 {
374 //@TODO: Switch to size of full cache block. Store in fetch buffer
375 int acc_size = sizeof(TheISA::MachInst);
376
377 doTLBAccess(inst, cache_req, acc_size, 0, TheISA::TLB::Execute);
378
379 // Only Do Access if no fault from TLB
380 if (cache_req->fault == NoFault) {
381
382 DPRINTF(InOrderCachePort,
383 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
384 tid, name(), cache_req->inst->getMemAddr());
385
386 cache_req->reqData = new uint8_t[acc_size];
387
388 inst->setCurResSlot(slot_num);
389
390 doCacheAccess(inst);
391 }
392
393 break;
394 }
395
396 case InitiateReadData:
397 case InitiateWriteData:
398 DPRINTF(InOrderCachePort,
399 "[tid:%u]: Initiating data access to %s for addr. %08p\n",
400 tid, name(), cache_req->inst->getMemAddr());
401
402 inst->setCurResSlot(slot_num);
403
404 if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
405 inst->execute();
406 } else {
407 inst->initiateAcc();
408 }
409
410 break;
411
412 case CompleteFetch:
413 if (cache_req->isMemAccComplete()) {
414 DPRINTF(InOrderCachePort,
415 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
416 tid, inst->seqNum);
417
418
419 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
420 tid, seq_num, inst->staticInst->disassemble(inst->PC));
421
422 delete cache_req->dataPkt;
423 //cache_req->setMemStall(false);
424 cache_req->done();
425 } else {
426 DPRINTF(InOrderCachePort,
427 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
428 tid, inst->seqNum);
429 DPRINTF(InOrderStall,
430 "STALL: [tid:%i]: Fetch miss from %08p\n",
431 tid, cache_req->inst->readPC());
432 cache_req->setCompleted(false);
433 //cache_req->setMemStall(true);
434 }
435 break;
436
437 case CompleteReadData:
438 case CompleteWriteData:
439 DPRINTF(InOrderCachePort,
440 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
441 tid, inst->seqNum);
442
443 if (cache_req->isMemAccComplete() ||
444 inst->isDataPrefetch() ||
445 inst->isInstPrefetch()) {
446 cache_req->setMemStall(false);
447 cache_req->done();
448 } else {
449 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
450 tid, cache_req->inst->getMemAddr());
451 cache_req->setCompleted(false);
452 cache_req->setMemStall(true);
453 }
454 break;
455
456 default:
457 fatal("Unrecognized command to %s", resName);
458 }
459 }
460
461 void
462 CacheUnit::prefetch(DynInstPtr inst)
463 {
464 warn_once("Prefetching currently unimplemented");
465
466 CacheReqPtr cache_req
467 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
468 assert(cache_req);
469
470 // Clean-Up cache resource request so
471 // other memory insts. can use them
472 cache_req->setCompleted();
473 cacheStatus = cacheAccessComplete;
474 cacheBlocked = false;
475 cache_req->setMemAccPending(false);
476 cache_req->setMemAccCompleted();
477 inst->unsetMemAddr();
478 }
479
480
481 void
482 CacheUnit::writeHint(DynInstPtr inst)
483 {
484 warn_once("Write Hints currently unimplemented");
485
486 CacheReqPtr cache_req
487 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
488 assert(cache_req);
489
490 // Clean-Up cache resource request so
491 // other memory insts. can use them
492 cache_req->setCompleted();
493 cacheStatus = cacheAccessComplete;
494 cacheBlocked = false;
495 cache_req->setMemAccPending(false);
496 cache_req->setMemAccCompleted();
497 inst->unsetMemAddr();
498 }
499
500 // @TODO: Split into doCacheRead() and doCacheWrite()
501 Fault
502 CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res)
503 {
504 Fault fault = NoFault;
505 #if TRACING_ON
506 ThreadID tid = inst->readTid();
507 #endif
508
509 CacheReqPtr cache_req
510 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
511 assert(cache_req);
512
513 // Check for LL/SC and if so change command
514 if (cache_req->memReq->isLLSC() && cache_req->pktCmd == MemCmd::ReadReq) {
515 cache_req->pktCmd = MemCmd::LoadLockedReq;
516 }
517
518 if (cache_req->pktCmd == MemCmd::WriteReq) {
519 cache_req->pktCmd =
520 cache_req->memReq->isSwap() ? MemCmd::SwapReq :
521 (cache_req->memReq->isLLSC() ? MemCmd::StoreCondReq
522 : MemCmd::WriteReq);
523 }
524
525 cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
526 Packet::Broadcast);
527
528 if (cache_req->dataPkt->isRead()) {
529 cache_req->dataPkt->dataStatic(cache_req->reqData);
530 } else if (cache_req->dataPkt->isWrite()) {
531 cache_req->dataPkt->dataStatic(&cache_req->inst->storeData);
532
533 if (cache_req->memReq->isCondSwap()) {
534 assert(write_res);
535 cache_req->memReq->setExtraData(*write_res);
536 }
537 }
538
539 cache_req->dataPkt->time = curTick;
540
541 bool do_access = true; // flag to suppress cache access
542
543 Request *memReq = cache_req->dataPkt->req;
544
545 if (cache_req->dataPkt->isWrite() && cache_req->memReq->isLLSC()) {
546 assert(cache_req->inst->isStoreConditional());
547 DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
548 do_access = TheISA::handleLockedWrite(cpu, memReq);
549 }
550
551 DPRINTF(InOrderCachePort,
552 "[tid:%i] [sn:%i] attempting to access cache\n",
553 tid, inst->seqNum);
554
555 if (do_access) {
556 if (!cachePort->sendTiming(cache_req->dataPkt)) {
557 DPRINTF(InOrderCachePort,
558 "[tid:%i] [sn:%i] is waiting to retry request\n",
559 tid, inst->seqNum);
560
561 retrySlot = cache_req->getSlot();
562 retryReq = cache_req;
563 retryPkt = cache_req->dataPkt;
564
565 cacheStatus = cacheWaitRetry;
566
567 //cacheBlocked = true;
568
569 DPRINTF(InOrderStall, "STALL: \n");
570
571 cache_req->setCompleted(false);
572 } else {
573 DPRINTF(InOrderCachePort,
574 "[tid:%i] [sn:%i] is now waiting for cache response\n",
575 tid, inst->seqNum);
576 cache_req->setCompleted();
577 cache_req->setMemAccPending();
578 cacheStatus = cacheWaitResponse;
579 cacheBlocked = false;
580 }
581 } else if (!do_access && memReq->isLLSC()){
582 // Store-Conditional instructions complete even if they "failed"
583 assert(cache_req->inst->isStoreConditional());
584 cache_req->setCompleted(true);
585
586 DPRINTF(LLSC,
587 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
588 tid, tid);
589
590 processCacheCompletion(cache_req->dataPkt);
591 } else {
592 // Make cache request again since access due to
593 // inability to access
594 DPRINTF(InOrderStall, "STALL: \n");
595 cache_req->setCompleted(false);
596 }
597
598 return fault;
599 }
600
601 void
602 CacheUnit::processCacheCompletion(PacketPtr pkt)
603 {
604 // Cast to correct packet type
605 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
606 assert(cache_pkt);
607
608 if (cache_pkt->cacheReq->isSquashed()) {
609 DPRINTF(InOrderCachePort,
610 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
611 cache_pkt->cacheReq->getInst()->readTid(),
612 cache_pkt->cacheReq->getInst()->seqNum);
613
614 cache_pkt->cacheReq->done();
615 delete cache_pkt;
616 return;
617 }
618
619 DPRINTF(InOrderCachePort,
620 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
621 cache_pkt->cacheReq->getInst()->readTid(),
622 cache_pkt->cacheReq->getInst()->seqNum,
623 cache_pkt->cacheReq->getInst()->getMemAddr());
624
625 // Cast to correct request type
626 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
627 findRequest(cache_pkt->cacheReq->getInst()));
628 assert(cache_req);
629
630
631 // Get resource request info
632 unsigned stage_num = cache_req->getStageNum();
633 DynInstPtr inst = cache_req->inst;
634 ThreadID tid = cache_req->inst->readTid();
635
636 if (!cache_req->isSquashed()) {
637 if (inst->resSched.top()->cmd == CompleteFetch) {
638 DPRINTF(InOrderCachePort,
639 "[tid:%u]: [sn:%i]: Processing fetch access\n",
640 tid, inst->seqNum);
641
642 // NOTE: This is only allowing a thread to fetch one line
643 // at a time. Re-examine when/if prefetching
644 // gets implemented.
645 //memcpy(fetchData[tid], cache_pkt->getPtr<uint8_t>(),
646 // cache_pkt->getSize());
647
648 // Get the instruction from the array of the cache line.
649 // @todo: update thsi
650 ExtMachInst ext_inst;
651 StaticInstPtr staticInst = NULL;
652 Addr inst_pc = inst->readPC();
653 MachInst mach_inst =
654 TheISA::gtoh(*reinterpret_cast<TheISA::MachInst *>
655 (cache_pkt->getPtr<uint8_t>()));
656
657 predecoder.setTC(cpu->thread[tid]->getTC());
658 predecoder.moreBytes(inst_pc, inst_pc, mach_inst);
659 ext_inst = predecoder.getExtMachInst();
660
661 inst->setMachInst(ext_inst);
662
663 // Set Up More TraceData info
664 if (inst->traceData) {
665 inst->traceData->setStaticInst(inst->staticInst);
666 inst->traceData->setPC(inst->readPC());
667 }
668
669 } else if (inst->staticInst && inst->isMemRef()) {
670 DPRINTF(InOrderCachePort,
671 "[tid:%u]: [sn:%i]: Processing cache access\n",
672 tid, inst->seqNum);
673
674 inst->completeAcc(pkt);
675
676 if (inst->isLoad()) {
677 assert(cache_pkt->isRead());
678
679 if (cache_pkt->req->isLLSC()) {
680 DPRINTF(InOrderCachePort,
681 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
682 tid, inst->seqNum);
683 TheISA::handleLockedRead(cpu, cache_pkt->req);
684 }
685
686 // @NOTE: Hardcoded to for load instructions. Assumes that
687 // the dest. idx 0 is always where the data is loaded to.
688 DPRINTF(InOrderCachePort,
689 "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
690 tid, inst->seqNum, inst->readIntResult(0));
691 DPRINTF(InOrderCachePort,
692 "[tid:%u]: [sn:%i]: FP Data loaded was: %08p\n",
693 tid, inst->seqNum, inst->readFloatResult(0));
694 } else if(inst->isStore()) {
695 assert(cache_pkt->isWrite());
696
697 DPRINTF(InOrderCachePort,
698 "[tid:%u]: [sn:%i]: Data stored was: FIX ME\n",
699 tid, inst->seqNum/*,
700 getMemData(cache_pkt)*/);
701 }
702
703 delete cache_pkt;
704 }
705
706 cache_req->setMemAccPending(false);
707 cache_req->setMemAccCompleted();
708
709 if (cache_req->isMemStall() &&
710 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
711 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n", tid);
712
713 cpu->activateContext(tid);
714
715 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
716 "miss.\n", tid);
717 }
718
719 // Wake up the CPU (if it went to sleep and was waiting on this
720 // completion event).
721 cpu->wakeCPU();
722
723 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
724 tid, cpu->pipelineStage[stage_num]->name());
725
726 cpu->switchToActive(stage_num);
727 } else {
728 DPRINTF(InOrderCachePort,
729 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
730 tid, cache_req->inst->readPC());
731 cache_req->setMemAccCompleted();
732 }
733
734 inst->unsetMemAddr();
735 }
736
737 void
738 CacheUnit::recvRetry()
739 {
740 DPRINTF(InOrderCachePort, "Retrying Request for [tid:%i] [sn:%i]\n",
741 retryReq->inst->readTid(), retryReq->inst->seqNum);
742
743 assert(retryPkt != NULL);
744 assert(cacheBlocked);
745 assert(cacheStatus == cacheWaitRetry);
746
747 if (cachePort->sendTiming(retryPkt)) {
748 cacheStatus = cacheWaitResponse;
749 retryPkt = NULL;
750 cacheBlocked = false;
751 } else {
752 DPRINTF(InOrderCachePort,
753 "Retry Request for [tid:%i] [sn:%i] failed\n",
754 retryReq->inst->readTid(), retryReq->inst->seqNum);
755 }
756 }
757
758 CacheUnitEvent::CacheUnitEvent()
759 : ResourceEvent()
760 { }
761
762 void
763 CacheUnitEvent::process()
764 {
765 DynInstPtr inst = resource->reqMap[slotIdx]->inst;
766 int stage_num = resource->reqMap[slotIdx]->getStageNum();
767 ThreadID tid = inst->threadNumber;
768 CacheReqPtr req_ptr = dynamic_cast<CacheReqPtr>(resource->reqMap[slotIdx]);
769
770 DPRINTF(InOrderTLB, "Waking up from TLB Miss caused by [sn:%i].\n",
771 inst->seqNum);
772
773 CacheUnit* tlb_res = dynamic_cast<CacheUnit*>(resource);
774 assert(tlb_res);
775
776 tlb_res->tlbBlocked[tid] = false;
777
778 tlb_res->cpu->pipelineStage[stage_num]->
779 unsetResStall(tlb_res->reqMap[slotIdx], tid);
780
781 req_ptr->tlbStall = false;
782
783 if (req_ptr->isSquashed()) {
784 req_ptr->done();
785 }
786 }
787
788 void
789 CacheUnit::squashDueToMemStall(DynInstPtr inst, int stage_num,
790 InstSeqNum squash_seq_num, ThreadID tid)
791 {
792 // If squashing due to memory stall, then we do NOT want to
793 // squash the instruction that caused the stall so we
794 // increment the sequence number here to prevent that.
795 //
796 // NOTE: This is only for the SwitchOnCacheMiss Model
797 // NOTE: If you have multiple outstanding misses from the same
798 // thread then you need to reevaluate this code
799 // NOTE: squash should originate from
800 // pipeline_stage.cc:processInstSchedule
801 DPRINTF(InOrderCachePort, "Squashing above [sn:%u]\n",
802 squash_seq_num + 1);
803
804 squash(inst, stage_num, squash_seq_num + 1, tid);
805 }
806
807
808 void
809 CacheUnit::squash(DynInstPtr inst, int stage_num,
810 InstSeqNum squash_seq_num, ThreadID tid)
811 {
812 vector<int> slot_remove_list;
813
814 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
815 map<int, ResReqPtr>::iterator map_end = reqMap.end();
816
817 while (map_it != map_end) {
818 ResReqPtr req_ptr = (*map_it).second;
819
820 if (req_ptr &&
821 req_ptr->getInst()->readTid() == tid &&
822 req_ptr->getInst()->seqNum > squash_seq_num) {
823
824 DPRINTF(InOrderCachePort,
825 "[tid:%i] Squashing request from [sn:%i]\n",
826 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
827
828 req_ptr->setSquashed();
829
830 req_ptr->getInst()->setSquashed();
831
832 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
833 assert(cache_req);
834
835 int req_slot_num = req_ptr->getSlot();
836
837 if (cache_req->tlbStall) {
838 tlbBlocked[tid] = false;
839
840 int stall_stage = reqMap[req_slot_num]->getStageNum();
841
842 cpu->pipelineStage[stall_stage]->
843 unsetResStall(reqMap[req_slot_num], tid);
844 }
845
846 if (!cache_req->tlbStall && !cache_req->isMemAccPending()) {
847 // Mark request for later removal
848 cpu->reqRemoveList.push(req_ptr);
849
850 // Mark slot for removal from resource
851 slot_remove_list.push_back(req_ptr->getSlot());
852 }
853 }
854
855 map_it++;
856 }
857
858 // Now Delete Slot Entry from Req. Map
859 for (int i = 0; i < slot_remove_list.size(); i++)
860 freeSlot(slot_remove_list[i]);
861 }
862
863 uint64_t
864 CacheUnit::getMemData(Packet *packet)
865 {
866 switch (packet->getSize())
867 {
868 case 8:
869 return packet->get<uint8_t>();
870
871 case 16:
872 return packet->get<uint16_t>();
873
874 case 32:
875 return packet->get<uint32_t>();
876
877 case 64:
878 return packet->get<uint64_t>();
879
880 default:
881 panic("bad store data size = %d\n", packet->getSize());
882 }
883 }
884
885 // Extra Template Definitions
886 #ifndef DOXYGEN_SHOULD_SKIP_THIS
887
888 template
889 Fault
890 CacheUnit::read(DynInstPtr inst, Addr addr, Twin32_t &data, unsigned flags);
891
892 template
893 Fault
894 CacheUnit::read(DynInstPtr inst, Addr addr, Twin64_t &data, unsigned flags);
895
896 template
897 Fault
898 CacheUnit::read(DynInstPtr inst, Addr addr, uint64_t &data, unsigned flags);
899
900 template
901 Fault
902 CacheUnit::read(DynInstPtr inst, Addr addr, uint32_t &data, unsigned flags);
903
904 template
905 Fault
906 CacheUnit::read(DynInstPtr inst, Addr addr, uint16_t &data, unsigned flags);
907
908 template
909 Fault
910 CacheUnit::read(DynInstPtr inst, Addr addr, uint8_t &data, unsigned flags);
911
912 #endif //DOXYGEN_SHOULD_SKIP_THIS
913
914 template<>
915 Fault
916 CacheUnit::read(DynInstPtr inst, Addr addr, double &data, unsigned flags)
917 {
918 return read(inst, addr, *(uint64_t*)&data, flags);
919 }
920
921 template<>
922 Fault
923 CacheUnit::read(DynInstPtr inst, Addr addr, float &data, unsigned flags)
924 {
925 return read(inst, addr, *(uint32_t*)&data, flags);
926 }
927
928
929 template<>
930 Fault
931 CacheUnit::read(DynInstPtr inst, Addr addr, int32_t &data, unsigned flags)
932 {
933 return read(inst, addr, (uint32_t&)data, flags);
934 }
935
936 #ifndef DOXYGEN_SHOULD_SKIP_THIS
937
938 template
939 Fault
940 CacheUnit::write(DynInstPtr inst, Twin32_t data, Addr addr,
941 unsigned flags, uint64_t *res);
942
943 template
944 Fault
945 CacheUnit::write(DynInstPtr inst, Twin64_t data, Addr addr,
946 unsigned flags, uint64_t *res);
947
948 template
949 Fault
950 CacheUnit::write(DynInstPtr inst, uint64_t data, Addr addr,
951 unsigned flags, uint64_t *res);
952
953 template
954 Fault
955 CacheUnit::write(DynInstPtr inst, uint32_t data, Addr addr,
956 unsigned flags, uint64_t *res);
957
958 template
959 Fault
960 CacheUnit::write(DynInstPtr inst, uint16_t data, Addr addr,
961 unsigned flags, uint64_t *res);
962
963 template
964 Fault
965 CacheUnit::write(DynInstPtr inst, uint8_t data, Addr addr,
966 unsigned flags, uint64_t *res);
967
968 #endif //DOXYGEN_SHOULD_SKIP_THIS
969
970 template<>
971 Fault
972 CacheUnit::write(DynInstPtr inst, double data, Addr addr, unsigned flags,
973 uint64_t *res)
974 {
975 return write(inst, *(uint64_t*)&data, addr, flags, res);
976 }
977
978 template<>
979 Fault
980 CacheUnit::write(DynInstPtr inst, float data, Addr addr, unsigned flags,
981 uint64_t *res)
982 {
983 return write(inst, *(uint32_t*)&data, addr, flags, res);
984 }
985
986
987 template<>
988 Fault
989 CacheUnit::write(DynInstPtr inst, int32_t data, Addr addr, unsigned flags,
990 uint64_t *res)
991 {
992 return write(inst, (uint32_t)data, addr, flags, res);
993 }