inorder/dtb: make sure DTB translate correct address
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <list>
33 #include <vector>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/predecoder.hh"
38 #include "arch/utility.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/cpu.hh"
42 #include "cpu/inorder/pipeline_traits.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "debug/Activity.hh"
45 #include "debug/AddrDep.hh"
46 #include "debug/InOrderCachePort.hh"
47 #include "debug/InOrderStall.hh"
48 #include "debug/InOrderTLB.hh"
49 #include "debug/LLSC.hh"
50 #include "debug/RefCount.hh"
51 #include "debug/ThreadModel.hh"
52 #include "mem/request.hh"
53
54 using namespace std;
55 using namespace TheISA;
56 using namespace ThePipeline;
57
58 #if TRACING_ON
59 static std::string
60 printMemData(uint8_t *data, unsigned size)
61 {
62 std::stringstream dataStr;
63 for (unsigned pos = 0; pos < size; pos++) {
64 ccprintf(dataStr, "%02x", data[pos]);
65 }
66 return dataStr.str();
67 }
68 #endif
69
70 Tick
71 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
72 {
73 panic("%s doesn't expect recvAtomic callback!", cachePortUnit->name());
74 return curTick();
75 }
76
77 void
78 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
79 {
80 DPRINTF(InOrderCachePort, "Doesn't update state on a recvFunctional."
81 "Ignoring packet for %x.\n", pkt->getAddr());
82 }
83
84 void
85 CacheUnit::CachePort::recvStatusChange(Status status)
86 {
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("CacheUnit::CachePort doesn't expect recvStatusChange callback!");
96 }
97
98 bool
99 CacheUnit::CachePort::recvTiming(Packet *pkt)
100 {
101 DPRINTF(Cache, "RecvTiming: Pkt %x,\n", pkt->getAddr());
102
103 if (pkt->isError())
104 DPRINTF(InOrderCachePort, "Got error packet back for address: %x\n",
105 pkt->getAddr());
106 else if (pkt->isResponse())
107 cachePortUnit->processCacheCompletion(pkt);
108 else {
109 //@note: depending on consistency model, update here
110 DPRINTF(InOrderCachePort, "Received snoop pkt %x,Ignoring\n", pkt->getAddr());
111 }
112
113 return true;
114 }
115
116 void
117 CacheUnit::CachePort::recvRetry()
118 {
119 cachePortUnit->recvRetry();
120 }
121
122 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
123 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
124 : Resource(res_name, res_id, res_width, res_latency, _cpu),
125 cachePortBlocked(false)
126 {
127 cachePort = new CachePort(this);
128
129 // Hard-Code Selection For Now
130 if (res_name == "icache_port")
131 _tlb = params->itb;
132 else if (res_name == "dcache_port")
133 _tlb = params->dtb;
134 else
135 fatal("Unrecognized TLB name passed by user");
136
137 for (int i=0; i < MaxThreads; i++) {
138 tlbBlocked[i] = false;
139 tlbBlockSeqNum[i] = 0;
140 }
141 }
142
143 TheISA::TLB*
144 CacheUnit::tlb()
145 {
146 return _tlb;
147
148 }
149
150 void
151 CacheUnit::CachePort::setPeer(Port *port)
152 {
153 Port::setPeer(port);
154
155 #if FULL_SYSTEM
156 // Update the ThreadContext's memory ports (Functional/Virtual
157 // Ports)
158 if (cachePortUnit->resName == "dcache_port") {
159 cachePortUnit->cpu->updateMemPorts();
160 }
161
162 #endif
163 }
164
165 Port *
166 CacheUnit::getPort(const string &if_name, int idx)
167 {
168 if (if_name == resName)
169 return cachePort;
170 else
171 return NULL;
172 }
173
174 void
175 CacheUnit::init()
176 {
177 for (int i = 0; i < width; i++) {
178 reqs[i] = new CacheRequest(this);
179 }
180
181 // Currently Used to Model TLB Latency. Eventually
182 // Switch to Timing TLB translations.
183 resourceEvent = new CacheUnitEvent[width];
184
185 cacheBlkSize = this->cachePort->peerBlockSize();
186 cacheBlkMask = cacheBlkSize - 1;
187
188 initSlots();
189 }
190
191 int
192 CacheUnit::getSlot(DynInstPtr inst)
193 {
194 ThreadID tid = inst->readTid();
195 if (tlbBlocked[tid]) {
196 return -1;
197 }
198
199 // For a Split-Load, the instruction would have processed once already
200 // causing the address to be unset.
201 if (!inst->validMemAddr() && !inst->splitInst) {
202 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
203 "cache access\n", inst->readTid(), inst->seqNum);
204 }
205
206 int new_slot = Resource::getSlot(inst);
207 inst->memTime = curTick();
208 //@note: add back in if you want speculative loads/store capability
209 //setAddrDependency(inst);
210 return new_slot;
211 }
212
213 void
214 CacheUnit::setAddrDependency(DynInstPtr inst)
215 {
216 Addr req_addr = inst->getMemAddr();
217 ThreadID tid = inst->readTid();
218
219 addrList[tid].push_back(req_addr);
220 addrMap[tid][req_addr] = inst->seqNum;
221
222 DPRINTF(AddrDep,
223 "[tid:%i]: [sn:%i]: Address %08p added to dependency list (size=%i)\n",
224 inst->readTid(), inst->seqNum, req_addr, addrList[tid].size());
225
226 //@NOTE: 10 is an arbitrarily "high" number, but to be exact
227 // we would need to know the # of outstanding accesses
228 // a priori. Information like fetch width, stage width,
229 // fetch buffer, and the branch resolution stage would be
230 // useful for the icache_port. For the dcache port, the #
231 // of outstanding cache accesses (mshrs) would be a good
232 // sanity check here.
233 //assert(addrList[tid].size() < 10);
234 }
235
236 void
237 CacheUnit::removeAddrDependency(DynInstPtr inst)
238 {
239 ThreadID tid = inst->readTid();
240
241 Addr mem_addr = inst->getMemAddr();
242
243 inst->unsetMemAddr();
244
245 // Erase from Address List
246 std::list<Addr>::iterator list_it = find(addrList[tid].begin(),
247 addrList[tid].end(),
248 mem_addr);
249 assert(list_it != addrList[tid].end() || inst->splitInst);
250
251 if (list_it != addrList[tid].end()) {
252 DPRINTF(AddrDep,
253 "[tid:%i]: [sn:%i] Address %08p removed from dependency "
254 "list\n", inst->readTid(), inst->seqNum, (*list_it));
255
256 addrList[tid].erase(list_it);
257
258 // Erase From Address Map (Used for Debugging)
259 addrMap[tid].erase(addrMap[tid].find(mem_addr));
260 }
261
262
263 }
264
265 ResReqPtr
266 CacheUnit::findRequest(DynInstPtr inst)
267 {
268 for (int i = 0; i < width; i++) {
269 CacheRequest* cache_req =
270 dynamic_cast<CacheRequest*>(reqs[i]);
271 assert(cache_req);
272
273 if (cache_req->valid &&
274 cache_req->getInst() == inst &&
275 cache_req->instIdx == inst->curSkedEntry->idx) {
276 return cache_req;
277 }
278 }
279
280 return NULL;
281 }
282
283 ResReqPtr
284 CacheUnit::findRequest(DynInstPtr inst, int idx)
285 {
286 for (int i = 0; i < width; i++) {
287 CacheRequest* cache_req =
288 dynamic_cast<CacheRequest*>(reqs[i]);
289 assert(cache_req);
290
291 if (cache_req->valid &&
292 cache_req->getInst() == inst &&
293 cache_req->instIdx == idx) {
294 return cache_req;
295 }
296 }
297
298 return NULL;
299 }
300
301
302 ResReqPtr
303 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
304 int slot_num, unsigned cmd)
305 {
306 ScheduleEntry* sched_entry = *inst->curSkedEntry;
307 CacheRequest* cache_req = dynamic_cast<CacheRequest*>(reqs[slot_num]);
308
309 if (!inst->validMemAddr()) {
310 panic("Mem. Addr. must be set before requesting cache access\n");
311 }
312
313 MemCmd::Command pkt_cmd;
314
315 switch (sched_entry->cmd)
316 {
317 case InitSecondSplitRead:
318 pkt_cmd = MemCmd::ReadReq;
319
320 DPRINTF(InOrderCachePort,
321 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
322 inst->readTid(), inst->seqNum, inst->split2ndAddr);
323 break;
324
325 case InitiateReadData:
326 pkt_cmd = MemCmd::ReadReq;
327
328 DPRINTF(InOrderCachePort,
329 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
330 inst->readTid(), inst->seqNum, inst->getMemAddr());
331 break;
332
333 case InitSecondSplitWrite:
334 pkt_cmd = MemCmd::WriteReq;
335
336 DPRINTF(InOrderCachePort,
337 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
338 inst->readTid(), inst->seqNum, inst->split2ndAddr);
339 break;
340
341 case InitiateWriteData:
342 pkt_cmd = MemCmd::WriteReq;
343
344 DPRINTF(InOrderCachePort,
345 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
346 inst->readTid(), inst->seqNum, inst->getMemAddr());
347 break;
348
349 default:
350 panic("%i: Unexpected request type (%i) to %s", curTick(),
351 sched_entry->cmd, name());
352 }
353
354 cache_req->setRequest(inst, stage_num, id, slot_num,
355 sched_entry->cmd, pkt_cmd,
356 inst->curSkedEntry->idx);
357 return cache_req;
358 }
359
360 void
361 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
362 {
363 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
364 assert(cache_req);
365
366 // Check to see if this instruction is requesting the same command
367 // or a different one
368 if (cache_req->cmd != inst->curSkedEntry->cmd &&
369 cache_req->instIdx == inst->curSkedEntry->idx) {
370 // If different, then update command in the request
371 cache_req->cmd = inst->curSkedEntry->cmd;
372 DPRINTF(InOrderCachePort,
373 "[tid:%i]: [sn:%i]: Updating the command for this "
374 "instruction\n", inst->readTid(), inst->seqNum);
375
376 service_request = true;
377 } else if (inst->curSkedEntry->idx != CacheUnit::InitSecondSplitRead &&
378 inst->curSkedEntry->idx != CacheUnit::InitSecondSplitWrite) {
379 // If same command, just check to see if memory access was completed
380 // but dont try to re-execute
381 DPRINTF(InOrderCachePort,
382 "[tid:%i]: [sn:%i]: requesting this resource again\n",
383 inst->readTid(), inst->seqNum);
384
385 service_request = true;
386 }
387 }
388
389 void
390 CacheUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
391 int acc_size, int flags)
392 {
393 ThreadID tid = inst->readTid();
394 Addr aligned_addr = inst->getMemAddr();
395
396 if (!cache_req->is2ndSplit()) {
397 if (cache_req->memReq == NULL) {
398 cache_req->memReq =
399 new Request(cpu->asid[tid], aligned_addr, acc_size, flags,
400 inst->instAddr(), cpu->readCpuId(), //@todo: use context id
401 tid);
402 DPRINTF(InOrderCachePort, "[sn:%i] Created memReq @%x, ->%x\n",
403 inst->seqNum, &cache_req->memReq, cache_req->memReq);
404 }
405 } else {
406 assert(inst->splitInst);
407
408 if (inst->splitMemReq == NULL) {
409 inst->splitMemReq = new Request(cpu->asid[tid],
410 inst->split2ndAddr,
411 acc_size,
412 flags,
413 inst->instAddr(),
414 cpu->readCpuId(),
415 tid);
416 }
417
418 cache_req->memReq = inst->splitMemReq;
419 }
420 }
421
422 void
423 CacheUnit::doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
424 int flags, TheISA::TLB::Mode tlb_mode)
425 {
426 ThreadID tid = inst->readTid();
427
428 setupMemRequest(inst, cache_req, acc_size, flags);
429
430 //@todo: HACK: the DTB expects the correct PC in the ThreadContext
431 // but how if the memory accesses are speculative? Shouldn't
432 // we send along the requestor's PC to the translate functions?
433 ThreadContext *tc = cpu->thread[tid]->getTC();
434 PCState old_pc = tc->pcState();
435 tc->pcState() = inst->pcState();
436 inst->fault =
437 _tlb->translateAtomic(cache_req->memReq, tc, tlb_mode);
438 tc->pcState() = old_pc;
439
440 if (inst->fault != NoFault) {
441 DPRINTF(InOrderTLB, "[tid:%i]: %s encountered while translating "
442 "addr:%08p for [sn:%i].\n", tid, inst->fault->name(),
443 cache_req->memReq->getVaddr(), inst->seqNum);
444
445 tlbBlocked[tid] = true;
446 tlbBlockSeqNum[tid] = inst->seqNum;
447
448 #if !FULL_SYSTEM
449 unsigned stage_num = cache_req->getStageNum();
450
451 cpu->pipelineStage[stage_num]->setResStall(cache_req, tid);
452 cache_req->tlbStall = true;
453
454 // schedule a time to process the tlb miss.
455 // latency hardcoded to 1 (for now), but will be updated
456 // when timing translation gets added in
457 scheduleEvent(slot_idx, 1);
458 unsigned slot_idx = cache_req->getSlot();
459 #endif
460
461 // Mark it as complete so it can pass through next stage.
462 // Fault Handling will happen at commit/graduation
463 cache_req->setCompleted();
464 } else {
465 DPRINTF(InOrderTLB, "[tid:%i]: [sn:%i] virt. addr %08p translated "
466 "to phys. addr:%08p.\n", tid, inst->seqNum,
467 cache_req->memReq->getVaddr(),
468 cache_req->memReq->getPaddr());
469 }
470
471 }
472
473 Fault
474 CacheUnit::read(DynInstPtr inst, Addr addr,
475 uint8_t *data, unsigned size, unsigned flags)
476 {
477 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
478 assert(cache_req && "Can't Find Instruction for Read!");
479
480 // The block size of our peer
481 unsigned blockSize = this->cachePort->peerBlockSize();
482
483 //The size of the data we're trying to read.
484 int fullSize = size;
485 inst->totalSize = size;
486
487 if (inst->traceData) {
488 inst->traceData->setAddr(addr);
489 }
490
491 if (inst->split2ndAccess) {
492 size = inst->split2ndSize;
493 cache_req->splitAccess = true;
494 cache_req->split2ndAccess = true;
495
496 DPRINTF(InOrderCachePort, "[sn:%i] Split Read Access (2 of 2) for "
497 "(%#x, %#x).\n", inst->seqNum, inst->getMemAddr(),
498 inst->split2ndAddr);
499 }
500
501
502 //The address of the second part of this access if it needs to be split
503 //across a cache line boundary.
504 Addr secondAddr = roundDown(addr + size - 1, blockSize);
505
506
507 if (secondAddr > addr && !inst->split2ndAccess) {
508
509 if (!inst->splitInst) {
510 DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for "
511 "(%#x, %#x).\n", curTick(), inst->seqNum, addr, secondAddr);
512
513 unsigned stage_num = cache_req->getStageNum();
514 unsigned cmd = inst->curSkedEntry->cmd;
515
516 // 1. Make A New Inst. Schedule w/Split Read/Complete Entered on
517 // the schedule
518 // ==============================
519 // 2. Reassign curSkedPtr to current command (InitiateRead) on new
520 // schedule
521 // ==============================
522 inst->splitInst = true;
523 inst->setBackSked(cpu->createBackEndSked(inst));
524 inst->curSkedEntry = inst->backSked->find(stage_num, cmd);
525 } else {
526 DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] Retrying Split Read "
527 "Access (1 of 2) for (%#x, %#x).\n", inst->readTid(),
528 inst->seqNum, addr, secondAddr);
529 }
530
531 // Save All "Total" Split Information
532 // ==============================
533 inst->splitMemData = new uint8_t[size];
534
535 // Split Information for First Access
536 // ==============================
537 size = secondAddr - addr;
538 cache_req->splitAccess = true;
539
540 // Split Information for Second Access
541 // ==============================
542 inst->split2ndSize = addr + fullSize - secondAddr;
543 inst->split2ndAddr = secondAddr;
544 inst->split2ndDataPtr = inst->splitMemData + size;
545 inst->split2ndFlags = flags;
546 }
547
548 doTLBAccess(inst, cache_req, size, flags, TheISA::TLB::Read);
549
550 if (inst->fault == NoFault) {
551 if (!cache_req->splitAccess) {
552 cache_req->reqData = new uint8_t[size];
553 doCacheAccess(inst, NULL);
554 } else {
555 if (!inst->split2ndAccess) {
556 cache_req->reqData = inst->splitMemData;
557 } else {
558 cache_req->reqData = inst->split2ndDataPtr;
559 }
560
561 doCacheAccess(inst, NULL, cache_req);
562 }
563 }
564
565 return inst->fault;
566 }
567
568 Fault
569 CacheUnit::write(DynInstPtr inst, uint8_t *data, unsigned size,
570 Addr addr, unsigned flags, uint64_t *write_res)
571 {
572 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
573 assert(cache_req && "Can't Find Instruction for Write!");
574
575 // The block size of our peer
576 unsigned blockSize = this->cachePort->peerBlockSize();
577
578 //The size of the data we're trying to write.
579 int fullSize = size;
580 inst->totalSize = size;
581
582 if (inst->traceData) {
583 inst->traceData->setAddr(addr);
584 }
585
586 if (inst->split2ndAccess) {
587 size = inst->split2ndSize;
588 cache_req->splitAccess = true;
589 cache_req->split2ndAccess = true;
590
591 DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (2 of 2) for "
592 "(%#x, %#x).\n", inst->seqNum, inst->getMemAddr(),
593 inst->split2ndAddr);
594 }
595
596 //The address of the second part of this access if it needs to be split
597 //across a cache line boundary.
598 Addr secondAddr = roundDown(addr + size - 1, blockSize);
599
600 if (secondAddr > addr && !inst->split2ndAccess) {
601
602 DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (1 of 2) for "
603 "(%#x, %#x).\n", inst->seqNum, addr, secondAddr);
604
605 // Save All "Total" Split Information
606 // ==============================
607 inst->splitInst = true;
608
609 if (!inst->splitInstSked) {
610 assert(0 && "Split Requests Not Supported for Now...");
611
612 // Schedule Split Read/Complete for Instruction
613 // ==============================
614 int stage_num = cache_req->getStageNum();
615 RSkedPtr inst_sked = (stage_num >= ThePipeline::BackEndStartStage) ?
616 inst->backSked : inst->frontSked;
617
618 // this is just an arbitrarily high priority to ensure that this
619 // gets pushed to the back of the list
620 int stage_pri = 20;
621
622 int isplit_cmd = CacheUnit::InitSecondSplitWrite;
623 inst_sked->push(new
624 ScheduleEntry(stage_num,
625 stage_pri,
626 cpu->resPool->getResIdx(DCache),
627 isplit_cmd,
628 1));
629
630 int csplit_cmd = CacheUnit::CompleteSecondSplitWrite;
631 inst_sked->push(new
632 ScheduleEntry(stage_num + 1,
633 1/*stage_pri*/,
634 cpu->resPool->getResIdx(DCache),
635 csplit_cmd,
636 1));
637 inst->splitInstSked = true;
638 } else {
639 DPRINTF(InOrderCachePort, "[tid:%i] sn:%i] Retrying Split Read "
640 "Access (1 of 2) for (%#x, %#x).\n",
641 inst->readTid(), inst->seqNum, addr, secondAddr);
642 }
643
644
645
646 // Split Information for First Access
647 // ==============================
648 size = secondAddr - addr;
649 cache_req->splitAccess = true;
650
651 // Split Information for Second Access
652 // ==============================
653 inst->split2ndSize = addr + fullSize - secondAddr;
654 inst->split2ndAddr = secondAddr;
655 inst->split2ndFlags = flags;
656 inst->splitInstSked = true;
657 }
658
659 doTLBAccess(inst, cache_req, size, flags, TheISA::TLB::Write);
660
661 if (inst->fault == NoFault) {
662 if (!cache_req->splitAccess) {
663 cache_req->reqData = new uint8_t[size];
664 memcpy(cache_req->reqData, data, size);
665
666 //inst->split2ndStoreDataPtr = cache_req->reqData;
667 //inst->split2ndStoreDataPtr += size;
668
669 doCacheAccess(inst, write_res);
670 } else {
671 doCacheAccess(inst, write_res, cache_req);
672 }
673
674 }
675
676 return inst->fault;
677 }
678
679
680 void
681 CacheUnit::execute(int slot_num)
682 {
683 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
684 assert(cache_req);
685
686 if (cachePortBlocked &&
687 (cache_req->cmd == InitiateReadData ||
688 cache_req->cmd == InitiateWriteData ||
689 cache_req->cmd == InitSecondSplitRead ||
690 cache_req->cmd == InitSecondSplitWrite)) {
691 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
692 cache_req->done(false);
693 return;
694 }
695
696 DynInstPtr inst = cache_req->inst;
697 if (inst->fault != NoFault) {
698 DPRINTF(InOrderCachePort,
699 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
700 "next stage.\n", inst->readTid(), inst->seqNum, inst->fault->name(),
701 inst->getMemAddr());
702 finishCacheUnitReq(inst, cache_req);
703 return;
704 }
705
706 #if TRACING_ON
707 ThreadID tid = inst->readTid();
708 std::string acc_type = "write";
709 #endif
710
711 switch (cache_req->cmd)
712 {
713
714 case InitiateReadData:
715 #if TRACING_ON
716 acc_type = "read";
717 #endif
718 case InitiateWriteData:
719 if (cachePortBlocked) {
720 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
721 cache_req->done(false);
722 return;
723 }
724
725 DPRINTF(InOrderCachePort,
726 "[tid:%u]: [sn:%i] Initiating data %s access to %s for "
727 "addr. %08p\n", tid, inst->seqNum, acc_type, name(),
728 cache_req->inst->getMemAddr());
729
730 inst->setCurResSlot(slot_num);
731
732 if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
733 inst->execute();
734 } else {
735 inst->initiateAcc();
736 }
737
738 break;
739
740 case InitSecondSplitRead:
741 DPRINTF(InOrderCachePort,
742 "[tid:%u]: [sn:%i] Initiating split data read access to %s "
743 "for addr. %08p\n", tid, inst->seqNum, name(),
744 cache_req->inst->split2ndAddr);
745 inst->split2ndAccess = true;
746 assert(inst->split2ndAddr != 0);
747 read(inst, inst->split2ndAddr, &inst->split2ndData,
748 inst->totalSize, inst->split2ndFlags);
749 break;
750
751 case InitSecondSplitWrite:
752 DPRINTF(InOrderCachePort,
753 "[tid:%u]: [sn:%i] Initiating split data write access to %s "
754 "for addr. %08p\n", tid, inst->seqNum, name(),
755 cache_req->inst->getMemAddr());
756
757 inst->split2ndAccess = true;
758 assert(inst->split2ndAddr != 0);
759 write(inst, &inst->split2ndData, inst->totalSize,
760 inst->split2ndAddr, inst->split2ndFlags, NULL);
761 break;
762
763 case CompleteReadData:
764 DPRINTF(InOrderCachePort,
765 "[tid:%i]: [sn:%i]: Trying to Complete Data Read Access\n",
766 tid, inst->seqNum);
767
768
769 //@todo: timing translations need to check here...
770 assert(!inst->isInstPrefetch() && "Can't Handle Inst. Prefecthes");
771 if (cache_req->isMemAccComplete() || inst->isDataPrefetch()) {
772 finishCacheUnitReq(inst, cache_req);
773 } else {
774 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
775 tid, cache_req->inst->getMemAddr());
776 cache_req->setCompleted(false);
777 cache_req->setMemStall(true);
778 }
779 break;
780
781 case CompleteWriteData:
782 {
783 DPRINTF(InOrderCachePort,
784 "[tid:%i]: [sn:%i]: Trying to Complete Data Write Access\n",
785 tid, inst->seqNum);
786
787
788 //@todo: check that timing translation is finished here
789 RequestPtr mem_req = cache_req->memReq;
790 DPRINTF(InOrderCachePort,
791 "[tid:%i]: [sn:%i]: cSwap:%i LLSC:%i isSwap:%i isCond:%i\n",
792 tid, inst->seqNum,
793 mem_req->isCondSwap(),
794 mem_req->isLLSC(),
795 mem_req->isSwap(),
796 inst->isStoreConditional());
797
798 if (mem_req->isCondSwap() || mem_req->isLLSC() || mem_req->isSwap()) {
799 DPRINTF(InOrderCachePort, "Detected Conditional Store Inst.\n");
800
801 if (!cache_req->isMemAccComplete()) {
802 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
803 tid, cache_req->inst->getMemAddr());
804 cache_req->setCompleted(false);
805 cache_req->setMemStall(true);
806 return;
807 } else {
808 DPRINTF(InOrderStall, "Mem Acc Completed\n");
809 }
810 }
811
812 if (cache_req->isMemAccPending()) {
813 DPRINTF(InOrderCachePort, "Store Instruction Pending Completion.\n");
814 cache_req->dataPkt->reqData = cache_req->reqData;
815 cache_req->dataPkt->memReq = cache_req->memReq;
816 } else
817 DPRINTF(InOrderCachePort, "Store Instruction Finished Completion.\n");
818
819 //@todo: if split inst save data
820
821 finishCacheUnitReq(inst, cache_req);
822 }
823 break;
824
825 case CompleteSecondSplitRead:
826 DPRINTF(InOrderCachePort,
827 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Read "
828 "Access\n", tid, inst->seqNum);
829
830 //@todo: check that timing translation is finished here
831 assert(!inst->isInstPrefetch() && "Can't Handle Inst. Prefecthes");
832 if (cache_req->isMemAccComplete() || inst->isDataPrefetch()) {
833 finishCacheUnitReq(inst, cache_req);
834 } else {
835 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
836 tid, cache_req->inst->split2ndAddr);
837 cache_req->setCompleted(false);
838 cache_req->setMemStall(true);
839 }
840 break;
841
842 case CompleteSecondSplitWrite:
843 DPRINTF(InOrderCachePort,
844 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Write "
845 "Access\n", tid, inst->seqNum);
846 //@todo: illegal to have a unaligned cond.swap or llsc?
847 assert(!cache_req->memReq->isSwap() && !cache_req->memReq->isCondSwap()
848 && !cache_req->memReq->isLLSC());
849
850 if (cache_req->isMemAccPending()) {
851 cache_req->dataPkt->reqData = cache_req->reqData;
852 cache_req->dataPkt->memReq = cache_req->memReq;
853 }
854
855 //@todo: check that timing translation is finished here
856 finishCacheUnitReq(inst, cache_req);
857 break;
858
859 default:
860 fatal("Unrecognized command to %s", resName);
861 }
862 }
863
864 void
865 CacheUnit::finishCacheUnitReq(DynInstPtr inst, CacheRequest *cache_req)
866 {
867 //@note: add back in for speculative load/store capability
868 //removeAddrDependency(inst);
869 cache_req->setMemStall(false);
870 cache_req->done();
871 }
872
873 void
874 CacheUnit::buildDataPacket(CacheRequest *cache_req)
875 {
876 // Check for LL/SC and if so change command
877 if (cache_req->memReq->isLLSC() && cache_req->pktCmd == MemCmd::ReadReq) {
878 cache_req->pktCmd = MemCmd::LoadLockedReq;
879 }
880
881 if (cache_req->pktCmd == MemCmd::WriteReq) {
882 cache_req->pktCmd =
883 cache_req->memReq->isSwap() ? MemCmd::SwapReq :
884 (cache_req->memReq->isLLSC() ? MemCmd::StoreCondReq
885 : MemCmd::WriteReq);
886 }
887
888 cache_req->dataPkt = new CacheReqPacket(cache_req,
889 cache_req->pktCmd,
890 Packet::Broadcast,
891 cache_req->instIdx);
892 DPRINTF(InOrderCachePort, "[slot:%i]: Slot marked for %x [pkt:%x->%x]\n",
893 cache_req->getSlot(),
894 cache_req->dataPkt->getAddr(),
895 &cache_req->dataPkt,
896 cache_req->dataPkt);
897
898 cache_req->dataPkt->hasSlot = true;
899 cache_req->dataPkt->dataStatic(cache_req->reqData);
900 }
901
902 void
903 CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res,
904 CacheReqPtr split_req)
905 {
906 Fault fault = NoFault;
907 #if TRACING_ON
908 ThreadID tid = inst->readTid();
909 #endif
910 bool do_access = true; // flag to suppress cache access
911
912 // Special Handling if this is a split request
913 CacheReqPtr cache_req;
914 if (split_req == NULL)
915 cache_req = dynamic_cast<CacheReqPtr>(reqs[inst->getCurResSlot()]);
916 else {
917 cache_req = split_req;
918 assert(0);
919 }
920
921 // Make a new packet inside the CacheRequest object
922 assert(cache_req);
923 buildDataPacket(cache_req);
924
925 // Special Handling for LL/SC or Compare/Swap
926 bool is_write = cache_req->dataPkt->isWrite();
927 RequestPtr mem_req = cache_req->dataPkt->req;
928 if (is_write) {
929 DPRINTF(InOrderCachePort,
930 "[tid:%u]: [sn:%i]: Storing data: %s\n",
931 tid, inst->seqNum,
932 printMemData(cache_req->dataPkt->getPtr<uint8_t>(),
933 cache_req->dataPkt->getSize()));
934
935 if (mem_req->isCondSwap()) {
936 assert(write_res);
937 cache_req->memReq->setExtraData(*write_res);
938 }
939 if (mem_req->isLLSC()) {
940 assert(cache_req->inst->isStoreConditional());
941 DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
942 do_access = TheISA::handleLockedWrite(inst.get(), mem_req);
943 }
944 }
945
946 // Finally, go ahead and make the access if we can...
947 DPRINTF(InOrderCachePort,
948 "[tid:%i] [sn:%i] attempting to access cache for addr %08p\n",
949 tid, inst->seqNum, cache_req->dataPkt->getAddr());
950
951 if (do_access) {
952 if (!cachePort->sendTiming(cache_req->dataPkt)) {
953 DPRINTF(InOrderCachePort,
954 "[tid:%i] [sn:%i] cannot access cache, because port "
955 "is blocked. now waiting to retry request\n", tid,
956 inst->seqNum);
957 delete cache_req->dataPkt;
958 cache_req->dataPkt = NULL;
959
960 delete cache_req->memReq;
961 cache_req->memReq = NULL;
962
963 cache_req->done(false);
964 cachePortBlocked = true;
965 } else {
966 DPRINTF(InOrderCachePort,
967 "[tid:%i] [sn:%i] is now waiting for cache response\n",
968 tid, inst->seqNum);
969 cache_req->setCompleted();
970 cache_req->setMemAccPending();
971 cachePortBlocked = false;
972 }
973 } else if (mem_req->isLLSC()){
974 // Store-Conditional instructions complete even if they "failed"
975 assert(cache_req->inst->isStoreConditional());
976 cache_req->setCompleted(true);
977
978 DPRINTF(LLSC,
979 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
980 tid, tid);
981
982 processCacheCompletion(cache_req->dataPkt);
983 } else {
984 delete cache_req->dataPkt;
985 cache_req->dataPkt = NULL;
986
987 delete cache_req->memReq;
988 cache_req->memReq = NULL;
989
990 // Make cache request again since access due to
991 // inability to access
992 DPRINTF(InOrderStall, "STALL: \n");
993 cache_req->done(false);
994 }
995
996 }
997
998 bool
999 CacheUnit::processSquash(CacheReqPacket *cache_pkt)
1000 {
1001 // The resource may no longer be actively servicing this
1002 // packet. Scenarios like a store that has been sent to the
1003 // memory system or access that's been squashed. If that's
1004 // the case, we can't access the request slot because it
1005 // will be either invalid or servicing another request.
1006 if (!cache_pkt->hasSlot) {
1007 DPRINTF(InOrderCachePort,
1008 "%x does not have a slot in unit, ignoring.\n",
1009 cache_pkt->getAddr());
1010
1011 if (cache_pkt->reqData) {
1012 delete [] cache_pkt->reqData;
1013 cache_pkt->reqData = NULL;
1014 }
1015
1016 if (cache_pkt->memReq) {
1017 delete cache_pkt->memReq;
1018 cache_pkt->memReq = NULL;
1019 }
1020
1021 delete cache_pkt;
1022 cache_pkt = NULL;
1023 cpu->wakeCPU();
1024 return true;
1025 } else {
1026 DPRINTF(InOrderCachePort, "%x has slot %i\n",
1027 cache_pkt->getAddr(), cache_pkt->cacheReq->getSlot());
1028 }
1029
1030
1031 // It's possible that the request is squashed but the
1032 // packet is still acknowledged by the resource. Squashes
1033 // should happen at the end of the cycles and trigger the
1034 // code above, but if not, this would handle any timing
1035 // variations due to diff. user parameters.
1036 if (cache_pkt->cacheReq->isSquashed()) {
1037 DPRINTF(InOrderCachePort,
1038 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
1039 cache_pkt->cacheReq->getInst()->readTid(),
1040 cache_pkt->cacheReq->getInst()->seqNum);
1041
1042 cache_pkt->cacheReq->setMemAccPending(false);
1043 cache_pkt->cacheReq->freeSlot();
1044 delete cache_pkt;
1045 cache_pkt = NULL;
1046 cpu->wakeCPU();
1047 return true;
1048 }
1049
1050
1051 return false;
1052 }
1053
1054 void
1055 CacheUnit::processCacheCompletion(PacketPtr pkt)
1056 {
1057 //@todo: use packet sender state instead of deriving from packet class to
1058 // get special state
1059 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
1060 assert(cache_pkt);
1061
1062 DPRINTF(InOrderCachePort, "Finished request for %x [pkt:%x->%x]\n",
1063 pkt->getAddr(), &cache_pkt, cache_pkt);
1064
1065 //@todo: process Squashed Completion
1066 if (processSquash(cache_pkt))
1067 return;
1068
1069 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
1070 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
1071
1072 if (!cache_req) {
1073 panic("[tid:%u]: [sn:%i]: Can't find slot for cache access to "
1074 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
1075 cache_pkt->cacheReq->getInst()->seqNum,
1076 cache_pkt->cacheReq->getInst()->getMemAddr());
1077 }
1078
1079 assert(cache_req);
1080 assert(cache_req == cache_pkt->cacheReq);
1081
1082 DPRINTF(InOrderCachePort,
1083 "[tid:%u]: [sn:%i]: [slot:%i] Waking from cache access (vaddr.%08p, paddr:%08p)\n",
1084 cache_pkt->cacheReq->getInst()->readTid(),
1085 cache_pkt->cacheReq->getInst()->seqNum,
1086 cache_req->getSlot(),
1087 cache_pkt->req->getVaddr(),
1088 cache_pkt->req->getPaddr());
1089
1090 // Get resource request info
1091 unsigned stage_num = cache_req->getStageNum();
1092 DynInstPtr inst = cache_req->inst;
1093 ThreadID tid = cache_req->inst->readTid();
1094
1095 assert(!cache_req->isSquashed());
1096 assert(inst->staticInst && inst->isMemRef());
1097
1098
1099 DPRINTF(InOrderCachePort,
1100 "[tid:%u]: [sn:%i]: Processing cache access\n",
1101 tid, inst->seqNum);
1102
1103 PacketPtr split_pkt = NULL;
1104 if (inst->splitInst) {
1105 inst->splitFinishCnt++;
1106
1107 if (inst->splitFinishCnt == 2) {
1108 cache_req->memReq->setVirt(0/*inst->tid*/,
1109 inst->getMemAddr(),
1110 inst->totalSize,
1111 0,
1112 0);
1113
1114 split_pkt = new Packet(cache_req->memReq, cache_req->pktCmd,
1115 Packet::Broadcast);
1116 split_pkt->dataStatic(inst->splitMemData);
1117
1118 DPRINTF(InOrderCachePort, "Completing Split Access.\n");
1119 inst->completeAcc(split_pkt);
1120 }
1121 } else {
1122 inst->completeAcc(cache_pkt);
1123 }
1124
1125 inst->setExecuted();
1126
1127 if (inst->isLoad()) {
1128 assert(cache_pkt->isRead());
1129
1130 if (cache_pkt->req->isLLSC()) {
1131 DPRINTF(InOrderCachePort,
1132 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
1133 tid, inst->seqNum);
1134 TheISA::handleLockedRead(inst.get(), cache_pkt->req);
1135 }
1136
1137 DPRINTF(InOrderCachePort,
1138 "[tid:%u]: [sn:%i]: Bytes loaded were: %s\n",
1139 tid, inst->seqNum,
1140 (split_pkt) ? printMemData(split_pkt->getPtr<uint8_t>(),
1141 split_pkt->getSize()) :
1142 printMemData(cache_pkt->getPtr<uint8_t>(),
1143 cache_pkt->getSize()));
1144 } else if(inst->isStore()) {
1145 assert(cache_pkt->isWrite());
1146
1147 DPRINTF(InOrderCachePort,
1148 "[tid:%u]: [sn:%i]: Bytes stored were: %s\n",
1149 tid, inst->seqNum,
1150 (split_pkt) ? printMemData(split_pkt->getPtr<uint8_t>(),
1151 split_pkt->getSize()) :
1152 printMemData(cache_pkt->getPtr<uint8_t>(),
1153 cache_pkt->getSize()));
1154 }
1155
1156 DPRINTF(InOrderCachePort, "Deleting packets %x (%x).\n",
1157 cache_pkt, cache_req->dataPkt);
1158
1159 if (split_pkt) {
1160 delete split_pkt;
1161 split_pkt = NULL;
1162 }
1163
1164 cache_req->setMemAccPending(false);
1165 cache_req->setMemAccCompleted();
1166
1167 if (cache_req->isMemStall() &&
1168 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
1169 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
1170 tid);
1171
1172 cpu->activateContext(tid);
1173
1174 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
1175 "miss.\n", tid);
1176 }
1177
1178 // Wake up the CPU (if it went to sleep and was waiting on this
1179 // completion event).
1180 cpu->wakeCPU();
1181
1182 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
1183 tid, cpu->pipelineStage[stage_num]->name());
1184
1185 cpu->switchToActive(stage_num);
1186 }
1187
1188 void
1189 CacheUnit::recvRetry()
1190 {
1191 DPRINTF(InOrderCachePort, "Unblocking Cache Port. \n");
1192
1193 assert(cachePortBlocked);
1194
1195 // Clear the cache port for use again
1196 cachePortBlocked = false;
1197
1198 cpu->wakeCPU();
1199 }
1200
1201 CacheUnitEvent::CacheUnitEvent()
1202 : ResourceEvent()
1203 { }
1204
1205 void
1206 CacheUnitEvent::process()
1207 {
1208 DynInstPtr inst = resource->reqs[slotIdx]->inst;
1209 int stage_num = resource->reqs[slotIdx]->getStageNum();
1210 ThreadID tid = inst->threadNumber;
1211 CacheReqPtr req_ptr = dynamic_cast<CacheReqPtr>(resource->reqs[slotIdx]);
1212
1213 DPRINTF(InOrderTLB, "Waking up from TLB Miss caused by [sn:%i].\n",
1214 inst->seqNum);
1215
1216 CacheUnit* tlb_res = dynamic_cast<CacheUnit*>(resource);
1217 assert(tlb_res);
1218
1219 //@todo: eventually, we should do a timing translation w/
1220 // hw page table walk on tlb miss
1221 DPRINTF(Fault, "Handling Fault %s : [sn:%i] %x\n", inst->fault->name(), inst->seqNum, inst->getMemAddr());
1222 inst->fault->invoke(tlb_res->cpu->tcBase(tid), inst->staticInst);
1223
1224 tlb_res->tlbBlocked[tid] = false;
1225
1226 tlb_res->cpu->pipelineStage[stage_num]->
1227 unsetResStall(tlb_res->reqs[slotIdx], tid);
1228
1229 req_ptr->tlbStall = false;
1230
1231 //@todo: timing translation needs to have some type of independent
1232 // info regarding if it's squashed or not so we can
1233 // free up the resource if a request gets squashed in the middle
1234 // of a table walk
1235 if (req_ptr->isSquashed()) {
1236 req_ptr->freeSlot();
1237 }
1238
1239 tlb_res->cpu->wakeCPU();
1240 }
1241
1242 void
1243 CacheUnit::squashDueToMemStall(DynInstPtr inst, int stage_num,
1244 InstSeqNum squash_seq_num, ThreadID tid)
1245 {
1246 // If squashing due to memory stall, then we do NOT want to
1247 // squash the instruction that caused the stall so we
1248 // increment the sequence number here to prevent that.
1249 //
1250 // NOTE: This is only for the SwitchOnCacheMiss Model
1251 // NOTE: If you have multiple outstanding misses from the same
1252 // thread then you need to reevaluate this code
1253 // NOTE: squash should originate from
1254 // pipeline_stage.cc:processInstSchedule
1255 DPRINTF(InOrderCachePort, "Squashing above [sn:%u]\n",
1256 squash_seq_num + 1);
1257
1258 squash(inst, stage_num, squash_seq_num + 1, tid);
1259 }
1260
1261 void
1262 CacheUnit::squashCacheRequest(CacheReqPtr req_ptr)
1263 {
1264 DynInstPtr inst = req_ptr->getInst();
1265 req_ptr->setSquashed();
1266 inst->setSquashed();
1267
1268 //@note: add back in for speculative load/store capability
1269 /*if (inst->validMemAddr()) {
1270 DPRINTF(AddrDep, "Squash of [tid:%i] [sn:%i], attempting to "
1271 "remove addr. %08p dependencies.\n",
1272 inst->readTid(),
1273 inst->seqNum,
1274 inst->getMemAddr());
1275
1276 removeAddrDependency(inst);
1277 }*/
1278 }
1279
1280
1281 void
1282 CacheUnit::squash(DynInstPtr inst, int stage_num,
1283 InstSeqNum squash_seq_num, ThreadID tid)
1284 {
1285 if (tlbBlocked[tid] &&
1286 tlbBlockSeqNum[tid] > squash_seq_num) {
1287 DPRINTF(InOrderCachePort, "Releasing TLB Block due to "
1288 " squash after [sn:%i].\n", squash_seq_num);
1289 tlbBlocked[tid] = false;
1290 }
1291
1292 for (int i = 0; i < width; i++) {
1293 ResReqPtr req_ptr = reqs[i];
1294
1295 if (req_ptr->valid &&
1296 req_ptr->getInst()->readTid() == tid &&
1297 req_ptr->getInst()->seqNum > squash_seq_num) {
1298
1299 DPRINTF(InOrderCachePort,
1300 "[tid:%i] Squashing request from [sn:%i]\n",
1301 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
1302
1303 if (req_ptr->isSquashed()) {
1304 DPRINTF(AddrDep, "Request for [tid:%i] [sn:%i] already "
1305 "squashed, ignoring squash process.\n",
1306 req_ptr->getInst()->readTid(),
1307 req_ptr->getInst()->seqNum);
1308 continue;
1309 }
1310
1311 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
1312 assert(cache_req);
1313
1314 squashCacheRequest(cache_req);
1315
1316 int req_slot_num = req_ptr->getSlot();
1317
1318 if (cache_req->tlbStall) {
1319 tlbBlocked[tid] = false;
1320
1321 int stall_stage = reqs[req_slot_num]->getStageNum();
1322
1323 cpu->pipelineStage[stall_stage]->
1324 unsetResStall(reqs[req_slot_num], tid);
1325 }
1326
1327 if (cache_req->isMemAccPending()) {
1328 cache_req->dataPkt->reqData = cache_req->reqData;
1329 cache_req->dataPkt->memReq = cache_req->memReq;
1330 }
1331
1332 if (!cache_req->tlbStall)
1333 freeSlot(req_slot_num);
1334 }
1335 }
1336
1337 }
1338
1339 void
1340 CacheRequest::clearRequest()
1341 {
1342 if (!memAccPending) {
1343 if (reqData && !splitAccess)
1344 delete [] reqData;
1345
1346 if (memReq) {
1347 DPRINTF(InOrderCachePort, "Clearing request for %x...%x\n",
1348 memReq->getVaddr(), (memReq->hasPaddr()) ? memReq->getPaddr() : 0);
1349 delete memReq;
1350 }
1351
1352 if (dataPkt)
1353 delete dataPkt;
1354 } else {
1355 if (dataPkt) {
1356 dataPkt->hasSlot = false;
1357 DPRINTF(InOrderCachePort, "[slot:%i]: Slot unmarked for %x for [pkt:%x->%x]\n",
1358 getSlot(), dataPkt->getAddr(), &dataPkt, dataPkt);
1359 }
1360 }
1361
1362 memReq = NULL;
1363 reqData = NULL;
1364 dataPkt = NULL;
1365 memAccComplete = false;
1366 memAccPending = false;
1367 tlbStall = false;
1368 splitAccess = false;
1369 splitAccessNum = -1;
1370 split2ndAccess = false;
1371 instIdx = 0;
1372 fetchBufferFill = false;
1373
1374 ResourceRequest::clearRequest();
1375 }