arch: nuke arch/isa_specific.hh and move stuff to generated config/the_isa.hh
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/pipeline_traits.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "mem/request.hh"
44
45 using namespace std;
46 using namespace TheISA;
47 using namespace ThePipeline;
48
49 Tick
50 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
51 {
52 panic("DefaultFetch doesn't expect recvAtomic callback!");
53 return curTick;
54 }
55
56 void
57 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
58 {
59 panic("DefaultFetch doesn't expect recvFunctional callback!");
60 }
61
62 void
63 CacheUnit::CachePort::recvStatusChange(Status status)
64 {
65 if (status == RangeChange)
66 return;
67
68 panic("DefaultFetch doesn't expect recvStatusChange callback!");
69 }
70
71 bool
72 CacheUnit::CachePort::recvTiming(Packet *pkt)
73 {
74 cachePortUnit->processCacheCompletion(pkt);
75 return true;
76 }
77
78 void
79 CacheUnit::CachePort::recvRetry()
80 {
81 cachePortUnit->recvRetry();
82 }
83
84 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
85 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
86 : Resource(res_name, res_id, res_width, res_latency, _cpu),
87 retryPkt(NULL), retrySlot(-1), cacheBlocked(false),
88 predecoder(NULL)
89 {
90 cachePort = new CachePort(this);
91
92 // Hard-Code Selection For Now
93 if (res_name == "icache_port")
94 _tlb = params->itb;
95 else if (res_name == "dcache_port")
96 _tlb = params->dtb;
97 else
98 fatal("Unrecognized TLB name passed by user");
99
100 for (int i=0; i < MaxThreads; i++) {
101 tlbBlocked[i] = false;
102 }
103 }
104
105 TheISA::TLB*
106 CacheUnit::tlb()
107 {
108 return _tlb;
109
110 }
111
112 Port *
113 CacheUnit::getPort(const string &if_name, int idx)
114 {
115 if (if_name == resName)
116 return cachePort;
117 else
118 return NULL;
119 }
120
121 void
122 CacheUnit::init()
123 {
124 // Currently Used to Model TLB Latency. Eventually
125 // Switch to Timing TLB translations.
126 resourceEvent = new CacheUnitEvent[width];
127
128 initSlots();
129 }
130
131 int
132 CacheUnit::getSlot(DynInstPtr inst)
133 {
134 if (tlbBlocked[inst->threadNumber]) {
135 return -1;
136 }
137
138 if (!inst->validMemAddr()) {
139 panic("Mem. Addr. must be set before requesting cache access\n");
140 }
141
142 Addr req_addr = inst->getMemAddr();
143
144 if (resName == "icache_port" ||
145 find(addrList.begin(), addrList.end(), req_addr) == addrList.end()) {
146
147 int new_slot = Resource::getSlot(inst);
148
149 if (new_slot == -1)
150 return -1;
151
152 inst->memTime = curTick;
153 addrList.push_back(req_addr);
154 addrMap[req_addr] = inst->seqNum;
155 DPRINTF(InOrderCachePort,
156 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
157 inst->readTid(), inst->seqNum, req_addr);
158 return new_slot;
159 } else {
160 DPRINTF(InOrderCachePort,
161 "Denying request because there is an outstanding"
162 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
163 req_addr, addrMap[req_addr], inst->memTime);
164 return -1;
165 }
166 }
167
168 void
169 CacheUnit::freeSlot(int slot_num)
170 {
171 vector<Addr>::iterator vect_it = find(addrList.begin(), addrList.end(),
172 reqMap[slot_num]->inst->getMemAddr());
173 assert(vect_it != addrList.end());
174
175 DPRINTF(InOrderCachePort,
176 "[tid:%i]: Address %08p removed from dependency list\n",
177 reqMap[slot_num]->inst->readTid(), (*vect_it));
178
179 addrList.erase(vect_it);
180
181 Resource::freeSlot(slot_num);
182 }
183
184 ResReqPtr
185 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
186 int slot_num, unsigned cmd)
187 {
188 ScheduleEntry* sched_entry = inst->resSched.top();
189
190 if (!inst->validMemAddr()) {
191 panic("Mem. Addr. must be set before requesting cache access\n");
192 }
193
194 MemCmd::Command pkt_cmd;
195
196 switch (sched_entry->cmd)
197 {
198 case InitiateReadData:
199 pkt_cmd = MemCmd::ReadReq;
200
201 DPRINTF(InOrderCachePort,
202 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
203 inst->readTid(), inst->seqNum, inst->getMemAddr());
204 break;
205
206 case InitiateWriteData:
207 pkt_cmd = MemCmd::WriteReq;
208
209 DPRINTF(InOrderCachePort,
210 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
211 inst->readTid(), inst->seqNum, inst->getMemAddr());
212 break;
213
214 case InitiateFetch:
215 pkt_cmd = MemCmd::ReadReq;
216
217 DPRINTF(InOrderCachePort,
218 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
219 inst->readTid(), inst->seqNum, inst->getMemAddr());
220 break;
221
222 default:
223 panic("%i: Unexpected request type (%i) to %s", curTick,
224 sched_entry->cmd, name());
225 }
226
227 return new CacheRequest(this, inst, stage_num, id, slot_num,
228 sched_entry->cmd, 0, pkt_cmd,
229 0/*flags*/, this->cpu->readCpuId());
230 }
231
232 void
233 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
234 {
235 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
236 assert(cache_req);
237
238 // Check to see if this instruction is requesting the same command
239 // or a different one
240 if (cache_req->cmd != inst->resSched.top()->cmd) {
241 // If different, then update command in the request
242 cache_req->cmd = inst->resSched.top()->cmd;
243 DPRINTF(InOrderCachePort,
244 "[tid:%i]: [sn:%i]: Updating the command for this instruction\n",
245 inst->readTid(), inst->seqNum);
246
247 service_request = true;
248 } else {
249 // If same command, just check to see if memory access was completed
250 // but dont try to re-execute
251 DPRINTF(InOrderCachePort,
252 "[tid:%i]: [sn:%i]: requesting this resource again\n",
253 inst->readTid(), inst->seqNum);
254
255 service_request = true;
256 }
257 }
258
259 Fault
260 CacheUnit::doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
261 int flags, TheISA::TLB::Mode tlb_mode)
262 {
263 ThreadID tid = inst->readTid();
264 Addr aligned_addr = inst->getMemAddr();
265 unsigned stage_num = cache_req->getStageNum();
266 unsigned slot_idx = cache_req->getSlot();
267
268 if (tlb_mode == TheISA::TLB::Execute) {
269 inst->fetchMemReq = new Request(inst->readTid(), aligned_addr,
270 acc_size, flags, inst->readPC(),
271 cpu->readCpuId(), inst->readTid());
272 cache_req->memReq = inst->fetchMemReq;
273 } else {
274 inst->dataMemReq = new Request(inst->readTid(), aligned_addr,
275 acc_size, flags, inst->readPC(),
276 cpu->readCpuId(), inst->readTid());
277 cache_req->memReq = inst->dataMemReq;
278 }
279
280
281 cache_req->fault =
282 _tlb->translateAtomic(cache_req->memReq,
283 cpu->thread[tid]->getTC(), tlb_mode);
284
285 if (cache_req->fault != NoFault) {
286 DPRINTF(InOrderTLB, "[tid:%i]: %s encountered while translating "
287 "addr:%08p for [sn:%i].\n", tid, cache_req->fault->name(),
288 cache_req->memReq->getVaddr(), inst->seqNum);
289
290 cpu->pipelineStage[stage_num]->setResStall(cache_req, tid);
291
292 tlbBlocked[tid] = true;
293
294 cache_req->tlbStall = true;
295
296 scheduleEvent(slot_idx, 1);
297
298 cpu->trap(cache_req->fault, tid);
299 } else {
300 DPRINTF(InOrderTLB, "[tid:%i]: [sn:%i] virt. addr %08p translated "
301 "to phys. addr:%08p.\n", tid, inst->seqNum,
302 cache_req->memReq->getVaddr(),
303 cache_req->memReq->getPaddr());
304 }
305
306 return cache_req->fault;
307 }
308
309 template <class T>
310 Fault
311 CacheUnit::read(DynInstPtr inst, Addr addr, T &data, unsigned flags)
312 {
313 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
314 assert(cache_req);
315
316 int acc_size = sizeof(T);
317 doTLBAccess(inst, cache_req, acc_size, flags, TheISA::TLB::Read);
318
319 if (cache_req->fault == NoFault) {
320 cache_req->reqData = new uint8_t[acc_size];
321 doCacheAccess(inst, NULL);
322 }
323
324 return cache_req->fault;
325 }
326
327 template <class T>
328 Fault
329 CacheUnit::write(DynInstPtr inst, T data, Addr addr, unsigned flags,
330 uint64_t *write_res)
331 {
332 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
333 assert(cache_req);
334
335 int acc_size = sizeof(T);
336 doTLBAccess(inst, cache_req, acc_size, flags, TheISA::TLB::Write);
337
338 if (cache_req->fault == NoFault) {
339 cache_req->reqData = new uint8_t[acc_size];
340 doCacheAccess(inst, write_res);
341 }
342
343 return cache_req->fault;
344 }
345
346
347 void
348 CacheUnit::execute(int slot_num)
349 {
350 if (cacheBlocked) {
351 DPRINTF(InOrderCachePort, "Cache Blocked. Cannot Access\n");
352 return;
353 }
354
355 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
356 assert(cache_req);
357
358 DynInstPtr inst = cache_req->inst;
359 #if TRACING_ON
360 ThreadID tid = inst->readTid();
361 int seq_num = inst->seqNum;
362 #endif
363
364 cache_req->fault = NoFault;
365
366 switch (cache_req->cmd)
367 {
368 case InitiateFetch:
369 {
370 //@TODO: Switch to size of full cache block. Store in fetch buffer
371 int acc_size = sizeof(TheISA::MachInst);
372
373 doTLBAccess(inst, cache_req, acc_size, 0, TheISA::TLB::Execute);
374
375 // Only Do Access if no fault from TLB
376 if (cache_req->fault == NoFault) {
377
378 DPRINTF(InOrderCachePort,
379 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
380 tid, name(), cache_req->inst->getMemAddr());
381
382 cache_req->reqData = new uint8_t[acc_size];
383
384 inst->setCurResSlot(slot_num);
385
386 doCacheAccess(inst);
387 }
388
389 break;
390 }
391
392 case InitiateReadData:
393 case InitiateWriteData:
394 DPRINTF(InOrderCachePort,
395 "[tid:%u]: Initiating data access to %s for addr. %08p\n",
396 tid, name(), cache_req->inst->getMemAddr());
397
398 inst->setCurResSlot(slot_num);
399
400 if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
401 inst->execute();
402 } else {
403 inst->initiateAcc();
404 }
405
406 break;
407
408 case CompleteFetch:
409 if (cache_req->isMemAccComplete()) {
410 DPRINTF(InOrderCachePort,
411 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
412 tid, inst->seqNum);
413
414
415 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
416 tid, seq_num, inst->staticInst->disassemble(inst->PC));
417
418 delete cache_req->dataPkt;
419 cache_req->done();
420 } else {
421 DPRINTF(InOrderCachePort,
422 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
423 tid, inst->seqNum);
424 DPRINTF(InOrderStall,
425 "STALL: [tid:%i]: Fetch miss from %08p\n",
426 tid, cache_req->inst->readPC());
427 cache_req->setCompleted(false);
428 }
429 break;
430
431 case CompleteReadData:
432 case CompleteWriteData:
433 DPRINTF(InOrderCachePort,
434 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
435 tid, inst->seqNum);
436
437 if (cache_req->isMemAccComplete() ||
438 inst->isDataPrefetch() ||
439 inst->isInstPrefetch()) {
440 cache_req->done();
441 } else {
442 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
443 tid, cache_req->inst->getMemAddr());
444 cache_req->setCompleted(false);
445 }
446 break;
447
448 default:
449 fatal("Unrecognized command to %s", resName);
450 }
451 }
452
453 void
454 CacheUnit::prefetch(DynInstPtr inst)
455 {
456 warn_once("Prefetching currently unimplemented");
457
458 CacheReqPtr cache_req
459 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
460 assert(cache_req);
461
462 // Clean-Up cache resource request so
463 // other memory insts. can use them
464 cache_req->setCompleted();
465 cacheStatus = cacheAccessComplete;
466 cacheBlocked = false;
467 cache_req->setMemAccPending(false);
468 cache_req->setMemAccCompleted();
469 inst->unsetMemAddr();
470 }
471
472
473 void
474 CacheUnit::writeHint(DynInstPtr inst)
475 {
476 warn_once("Write Hints currently unimplemented");
477
478 CacheReqPtr cache_req
479 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
480 assert(cache_req);
481
482 // Clean-Up cache resource request so
483 // other memory insts. can use them
484 cache_req->setCompleted();
485 cacheStatus = cacheAccessComplete;
486 cacheBlocked = false;
487 cache_req->setMemAccPending(false);
488 cache_req->setMemAccCompleted();
489 inst->unsetMemAddr();
490 }
491
492 // @TODO: Split into doCacheRead() and doCacheWrite()
493 Fault
494 CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res)
495 {
496 Fault fault = NoFault;
497 #if TRACING_ON
498 ThreadID tid = inst->readTid();
499 #endif
500
501 CacheReqPtr cache_req
502 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
503 assert(cache_req);
504
505 // Check for LL/SC and if so change command
506 if (cache_req->memReq->isLLSC() && cache_req->pktCmd == MemCmd::ReadReq) {
507 cache_req->pktCmd = MemCmd::LoadLockedReq;
508 }
509
510 if (cache_req->pktCmd == MemCmd::WriteReq) {
511 cache_req->pktCmd =
512 cache_req->memReq->isSwap() ? MemCmd::SwapReq :
513 (cache_req->memReq->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
514 }
515
516 cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
517 Packet::Broadcast);
518
519 if (cache_req->dataPkt->isRead()) {
520 cache_req->dataPkt->dataStatic(cache_req->reqData);
521 } else if (cache_req->dataPkt->isWrite()) {
522 cache_req->dataPkt->dataStatic(&cache_req->inst->storeData);
523
524 if (cache_req->memReq->isCondSwap()) {
525 assert(write_res);
526 cache_req->memReq->setExtraData(*write_res);
527 }
528 }
529
530 cache_req->dataPkt->time = curTick;
531
532 bool do_access = true; // flag to suppress cache access
533
534 Request *memReq = cache_req->dataPkt->req;
535
536 if (cache_req->dataPkt->isWrite() && cache_req->memReq->isLLSC()) {
537 assert(cache_req->inst->isStoreConditional());
538 DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
539 do_access = TheISA::handleLockedWrite(cpu, memReq);
540 }
541
542 DPRINTF(InOrderCachePort,
543 "[tid:%i] [sn:%i] attempting to access cache\n",
544 tid, inst->seqNum);
545
546 if (do_access) {
547 if (!cachePort->sendTiming(cache_req->dataPkt)) {
548 DPRINTF(InOrderCachePort,
549 "[tid:%i] [sn:%i] is waiting to retry request\n",
550 tid, inst->seqNum);
551
552 retrySlot = cache_req->getSlot();
553 retryReq = cache_req;
554 retryPkt = cache_req->dataPkt;
555
556 cacheStatus = cacheWaitRetry;
557
558 //cacheBlocked = true;
559
560 DPRINTF(InOrderStall, "STALL: \n");
561
562 cache_req->setCompleted(false);
563 } else {
564 DPRINTF(InOrderCachePort,
565 "[tid:%i] [sn:%i] is now waiting for cache response\n",
566 tid, inst->seqNum);
567 cache_req->setCompleted();
568 cache_req->setMemAccPending();
569 cacheStatus = cacheWaitResponse;
570 cacheBlocked = false;
571 }
572 } else if (!do_access && memReq->isLLSC()){
573 // Store-Conditional instructions complete even if they "failed"
574 assert(cache_req->inst->isStoreConditional());
575 cache_req->setCompleted(true);
576
577 DPRINTF(LLSC,
578 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
579 tid, tid);
580
581 processCacheCompletion(cache_req->dataPkt);
582 } else {
583 // Make cache request again since access due to
584 // inability to access
585 DPRINTF(InOrderStall, "STALL: \n");
586 cache_req->setCompleted(false);
587 }
588
589 return fault;
590 }
591
592 void
593 CacheUnit::processCacheCompletion(PacketPtr pkt)
594 {
595 // Cast to correct packet type
596 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
597 assert(cache_pkt);
598
599 if (cache_pkt->cacheReq->isSquashed()) {
600 DPRINTF(InOrderCachePort,
601 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
602 cache_pkt->cacheReq->getInst()->readTid(),
603 cache_pkt->cacheReq->getInst()->seqNum);
604
605 cache_pkt->cacheReq->done();
606 delete cache_pkt;
607 return;
608 }
609
610 DPRINTF(InOrderCachePort,
611 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
612 cache_pkt->cacheReq->getInst()->readTid(),
613 cache_pkt->cacheReq->getInst()->seqNum,
614 cache_pkt->cacheReq->getInst()->getMemAddr());
615
616 // Cast to correct request type
617 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
618 findRequest(cache_pkt->cacheReq->getInst()));
619 assert(cache_req);
620
621
622 // Get resource request info
623 unsigned stage_num = cache_req->getStageNum();
624 DynInstPtr inst = cache_req->inst;
625 ThreadID tid = cache_req->inst->readTid();
626
627 if (!cache_req->isSquashed()) {
628 if (inst->resSched.top()->cmd == CompleteFetch) {
629 DPRINTF(InOrderCachePort,
630 "[tid:%u]: [sn:%i]: Processing fetch access\n",
631 tid, inst->seqNum);
632
633 // NOTE: This is only allowing a thread to fetch one line
634 // at a time. Re-examine when/if prefetching
635 // gets implemented.
636 //memcpy(fetchData[tid], cache_pkt->getPtr<uint8_t>(),
637 // cache_pkt->getSize());
638
639 // Get the instruction from the array of the cache line.
640 // @todo: update thsi
641 ExtMachInst ext_inst;
642 StaticInstPtr staticInst = NULL;
643 Addr inst_pc = inst->readPC();
644 MachInst mach_inst = TheISA::gtoh(*reinterpret_cast<TheISA::MachInst *>
645 (cache_pkt->getPtr<uint8_t>()));
646
647 predecoder.setTC(cpu->thread[tid]->getTC());
648 predecoder.moreBytes(inst_pc, inst_pc, mach_inst);
649 ext_inst = predecoder.getExtMachInst();
650
651 inst->setMachInst(ext_inst);
652
653 // Set Up More TraceData info
654 if (inst->traceData) {
655 inst->traceData->setStaticInst(inst->staticInst);
656 inst->traceData->setPC(inst->readPC());
657 }
658
659 } else if (inst->staticInst && inst->isMemRef()) {
660 DPRINTF(InOrderCachePort,
661 "[tid:%u]: [sn:%i]: Processing cache access\n",
662 tid, inst->seqNum);
663
664 inst->completeAcc(pkt);
665
666 if (inst->isLoad()) {
667 assert(cache_pkt->isRead());
668
669 if (cache_pkt->req->isLLSC()) {
670 DPRINTF(InOrderCachePort,
671 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
672 tid, inst->seqNum);
673 TheISA::handleLockedRead(cpu, cache_pkt->req);
674 }
675
676 // @NOTE: Hardcoded to for load instructions. Assumes that
677 // the dest. idx 0 is always where the data is loaded to.
678 DPRINTF(InOrderCachePort,
679 "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
680 tid, inst->seqNum, inst->readIntResult(0));
681 DPRINTF(InOrderCachePort,
682 "[tid:%u]: [sn:%i]: FP Data loaded was: %08p\n",
683 tid, inst->seqNum, inst->readFloatResult(0));
684 } else if(inst->isStore()) {
685 assert(cache_pkt->isWrite());
686
687 DPRINTF(InOrderCachePort,
688 "[tid:%u]: [sn:%i]: Data stored was: FIX ME\n",
689 tid, inst->seqNum/*,
690 getMemData(cache_pkt)*/);
691 }
692
693 delete cache_pkt;
694 }
695
696 cache_req->setMemAccPending(false);
697 cache_req->setMemAccCompleted();
698
699 // Wake up the CPU (if it went to sleep and was waiting on this
700 // completion event).
701 cpu->wakeCPU();
702
703 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
704 tid, cpu->pipelineStage[stage_num]->name());
705
706 cpu->switchToActive(stage_num);
707 } else {
708 DPRINTF(InOrderCachePort,
709 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
710 tid, cache_req->inst->readPC());
711 cache_req->setMemAccCompleted();
712 }
713
714 inst->unsetMemAddr();
715 }
716
717 void
718 CacheUnit::recvRetry()
719 {
720 DPRINTF(InOrderCachePort, "Retrying Request for [tid:%i] [sn:%i]\n",
721 retryReq->inst->readTid(), retryReq->inst->seqNum);
722
723 assert(retryPkt != NULL);
724 assert(cacheBlocked);
725 assert(cacheStatus == cacheWaitRetry);
726
727 if (cachePort->sendTiming(retryPkt)) {
728 cacheStatus = cacheWaitResponse;
729 retryPkt = NULL;
730 cacheBlocked = false;
731 } else {
732 DPRINTF(InOrderCachePort,
733 "Retry Request for [tid:%i] [sn:%i] failed\n",
734 retryReq->inst->readTid(), retryReq->inst->seqNum);
735 }
736 }
737
738 CacheUnitEvent::CacheUnitEvent()
739 : ResourceEvent()
740 { }
741
742 void
743 CacheUnitEvent::process()
744 {
745 DynInstPtr inst = resource->reqMap[slotIdx]->inst;
746 int stage_num = resource->reqMap[slotIdx]->getStageNum();
747 ThreadID tid = inst->threadNumber;
748 CacheReqPtr req_ptr = dynamic_cast<CacheReqPtr>(resource->reqMap[slotIdx]);
749
750 DPRINTF(InOrderTLB, "Waking up from TLB Miss caused by [sn:%i].\n",
751 inst->seqNum);
752
753 CacheUnit* tlb_res = dynamic_cast<CacheUnit*>(resource);
754 assert(tlb_res);
755
756 tlb_res->tlbBlocked[tid] = false;
757
758 tlb_res->cpu->pipelineStage[stage_num]->unsetResStall(tlb_res->reqMap[slotIdx], tid);
759
760 req_ptr->tlbStall = false;
761
762 if (req_ptr->isSquashed()) {
763 req_ptr->done();
764 }
765 }
766
767 void
768 CacheUnit::squash(DynInstPtr inst, int stage_num,
769 InstSeqNum squash_seq_num, ThreadID tid)
770 {
771 vector<int> slot_remove_list;
772
773 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
774 map<int, ResReqPtr>::iterator map_end = reqMap.end();
775
776 while (map_it != map_end) {
777 ResReqPtr req_ptr = (*map_it).second;
778
779 if (req_ptr &&
780 req_ptr->getInst()->readTid() == tid &&
781 req_ptr->getInst()->seqNum > squash_seq_num) {
782
783 DPRINTF(InOrderCachePort,
784 "[tid:%i] Squashing request from [sn:%i]\n",
785 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
786
787 req_ptr->setSquashed();
788
789 req_ptr->getInst()->setSquashed();
790
791 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
792 assert(cache_req);
793
794 int req_slot_num = req_ptr->getSlot();
795
796 if (cache_req->tlbStall) {
797 tlbBlocked[tid] = false;
798
799 int stall_stage = reqMap[req_slot_num]->getStageNum();
800
801 cpu->pipelineStage[stall_stage]->unsetResStall(reqMap[req_slot_num], tid);
802 }
803
804 if (!cache_req->tlbStall && !cache_req->isMemAccPending()) {
805 // Mark request for later removal
806 cpu->reqRemoveList.push(req_ptr);
807
808 // Mark slot for removal from resource
809 slot_remove_list.push_back(req_ptr->getSlot());
810 }
811 }
812
813 map_it++;
814 }
815
816 // Now Delete Slot Entry from Req. Map
817 for (int i = 0; i < slot_remove_list.size(); i++)
818 freeSlot(slot_remove_list[i]);
819 }
820
821 uint64_t
822 CacheUnit::getMemData(Packet *packet)
823 {
824 switch (packet->getSize())
825 {
826 case 8:
827 return packet->get<uint8_t>();
828
829 case 16:
830 return packet->get<uint16_t>();
831
832 case 32:
833 return packet->get<uint32_t>();
834
835 case 64:
836 return packet->get<uint64_t>();
837
838 default:
839 panic("bad store data size = %d\n", packet->getSize());
840 }
841 }
842
843 // Extra Template Definitions
844 #ifndef DOXYGEN_SHOULD_SKIP_THIS
845
846 template
847 Fault
848 CacheUnit::read(DynInstPtr inst, Addr addr, Twin32_t &data, unsigned flags);
849
850 template
851 Fault
852 CacheUnit::read(DynInstPtr inst, Addr addr, Twin64_t &data, unsigned flags);
853
854 template
855 Fault
856 CacheUnit::read(DynInstPtr inst, Addr addr, uint64_t &data, unsigned flags);
857
858 template
859 Fault
860 CacheUnit::read(DynInstPtr inst, Addr addr, uint32_t &data, unsigned flags);
861
862 template
863 Fault
864 CacheUnit::read(DynInstPtr inst, Addr addr, uint16_t &data, unsigned flags);
865
866 template
867 Fault
868 CacheUnit::read(DynInstPtr inst, Addr addr, uint8_t &data, unsigned flags);
869
870 #endif //DOXYGEN_SHOULD_SKIP_THIS
871
872 template<>
873 Fault
874 CacheUnit::read(DynInstPtr inst, Addr addr, double &data, unsigned flags)
875 {
876 return read(inst, addr, *(uint64_t*)&data, flags);
877 }
878
879 template<>
880 Fault
881 CacheUnit::read(DynInstPtr inst, Addr addr, float &data, unsigned flags)
882 {
883 return read(inst, addr, *(uint32_t*)&data, flags);
884 }
885
886
887 template<>
888 Fault
889 CacheUnit::read(DynInstPtr inst, Addr addr, int32_t &data, unsigned flags)
890 {
891 return read(inst, addr, (uint32_t&)data, flags);
892 }
893
894 #ifndef DOXYGEN_SHOULD_SKIP_THIS
895
896 template
897 Fault
898 CacheUnit::write(DynInstPtr inst, Twin32_t data, Addr addr,
899 unsigned flags, uint64_t *res);
900
901 template
902 Fault
903 CacheUnit::write(DynInstPtr inst, Twin64_t data, Addr addr,
904 unsigned flags, uint64_t *res);
905
906 template
907 Fault
908 CacheUnit::write(DynInstPtr inst, uint64_t data, Addr addr,
909 unsigned flags, uint64_t *res);
910
911 template
912 Fault
913 CacheUnit::write(DynInstPtr inst, uint32_t data, Addr addr,
914 unsigned flags, uint64_t *res);
915
916 template
917 Fault
918 CacheUnit::write(DynInstPtr inst, uint16_t data, Addr addr,
919 unsigned flags, uint64_t *res);
920
921 template
922 Fault
923 CacheUnit::write(DynInstPtr inst, uint8_t data, Addr addr,
924 unsigned flags, uint64_t *res);
925
926 #endif //DOXYGEN_SHOULD_SKIP_THIS
927
928 template<>
929 Fault
930 CacheUnit::write(DynInstPtr inst, double data, Addr addr, unsigned flags, uint64_t *res)
931 {
932 return write(inst, *(uint64_t*)&data, addr, flags, res);
933 }
934
935 template<>
936 Fault
937 CacheUnit::write(DynInstPtr inst, float data, Addr addr, unsigned flags, uint64_t *res)
938 {
939 return write(inst, *(uint32_t*)&data, addr, flags, res);
940 }
941
942
943 template<>
944 Fault
945 CacheUnit::write(DynInstPtr inst, int32_t data, Addr addr, unsigned flags, uint64_t *res)
946 {
947 return write(inst, (uint32_t)data, addr, flags, res);
948 }