2 * Copyright (c) 2007 MIPS Technologies, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Korey Sewell
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/pipeline_traits.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "mem/request.hh"
47 using namespace TheISA
;
48 using namespace ThePipeline
;
52 printMemData(uint8_t *data
, unsigned size
)
54 std::stringstream dataStr
;
55 for (unsigned pos
= 0; pos
< size
; pos
++) {
56 ccprintf(dataStr
, "%02x", data
[pos
]);
63 CacheUnit::CachePort::recvAtomic(PacketPtr pkt
)
65 panic("CacheUnit::CachePort doesn't expect recvAtomic callback!");
70 CacheUnit::CachePort::recvFunctional(PacketPtr pkt
)
72 panic("CacheUnit::CachePort doesn't expect recvFunctional callback!");
76 CacheUnit::CachePort::recvStatusChange(Status status
)
78 if (status
== RangeChange
)
81 panic("CacheUnit::CachePort doesn't expect recvStatusChange callback!");
85 CacheUnit::CachePort::recvTiming(Packet
*pkt
)
87 cachePortUnit
->processCacheCompletion(pkt
);
92 CacheUnit::CachePort::recvRetry()
94 cachePortUnit
->recvRetry();
97 CacheUnit::CacheUnit(string res_name
, int res_id
, int res_width
,
98 int res_latency
, InOrderCPU
*_cpu
, ThePipeline::Params
*params
)
99 : Resource(res_name
, res_id
, res_width
, res_latency
, _cpu
),
100 cachePortBlocked(false)
102 cachePort
= new CachePort(this);
104 // Hard-Code Selection For Now
105 if (res_name
== "icache_port")
107 else if (res_name
== "dcache_port")
110 fatal("Unrecognized TLB name passed by user");
112 for (int i
=0; i
< MaxThreads
; i
++) {
113 tlbBlocked
[i
] = false;
125 CacheUnit::getPort(const string
&if_name
, int idx
)
127 if (if_name
== resName
)
136 // Currently Used to Model TLB Latency. Eventually
137 // Switch to Timing TLB translations.
138 resourceEvent
= new CacheUnitEvent
[width
];
140 cacheBlkSize
= this->cachePort
->peerBlockSize();
141 cacheBlkMask
= cacheBlkSize
- 1;
147 CacheUnit::getSlot(DynInstPtr inst
)
149 ThreadID tid
= inst
->readTid();
151 if (tlbBlocked
[inst
->threadNumber
]) {
155 // For a Split-Load, the instruction would have processed once already
156 // causing the address to be unset.
157 if (!inst
->validMemAddr() && !inst
->splitInst
) {
158 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
159 "cache access\n", inst
->readTid(), inst
->seqNum
);
162 Addr req_addr
= inst
->getMemAddr();
164 if (resName
== "icache_port" ||
165 find(addrList
[tid
].begin(), addrList
[tid
].end(), req_addr
) ==
166 addrList
[tid
].end()) {
168 int new_slot
= Resource::getSlot(inst
);
173 inst
->memTime
= curTick();
174 setAddrDependency(inst
);
177 // Allow same instruction multiple accesses to same address
178 // should only happen maybe after a squashed inst. needs to replay
179 if (addrMap
[tid
][req_addr
] == inst
->seqNum
) {
180 int new_slot
= Resource::getSlot(inst
);
187 DPRINTF(InOrderCachePort
,
188 "[tid:%i] Denying request because there is an outstanding"
189 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
190 inst
->readTid(), req_addr
, addrMap
[tid
][req_addr
], inst
->memTime
);
199 CacheUnit::setAddrDependency(DynInstPtr inst
)
201 Addr req_addr
= inst
->getMemAddr();
202 ThreadID tid
= inst
->readTid();
204 addrList
[tid
].push_back(req_addr
);
205 addrMap
[tid
][req_addr
] = inst
->seqNum
;
208 "[tid:%i]: [sn:%i]: Address %08p added to dependency list (size=%i)\n",
209 inst
->readTid(), inst
->seqNum
, req_addr
, addrList
[tid
].size());
211 //@NOTE: 10 is an arbitrarily "high" number, but to be exact
212 // we would need to know the # of outstanding accesses
213 // a priori. Information like fetch width, stage width,
214 // fetch buffer, and the branch resolution stage would be
215 // useful for the icache_port. For the dcache port, the #
216 // of outstanding cache accesses (mshrs) would be a good
217 // sanity check here.
218 //assert(addrList[tid].size() < 10);
222 CacheUnit::removeAddrDependency(DynInstPtr inst
)
224 ThreadID tid
= inst
->readTid();
226 Addr mem_addr
= inst
->getMemAddr();
228 inst
->unsetMemAddr();
230 // Erase from Address List
231 vector
<Addr
>::iterator vect_it
= find(addrList
[tid
].begin(),
234 assert(vect_it
!= addrList
[tid
].end() || inst
->splitInst
);
236 if (vect_it
!= addrList
[tid
].end()) {
238 "[tid:%i]: [sn:%i] Address %08p removed from dependency "
239 "list\n", inst
->readTid(), inst
->seqNum
, (*vect_it
));
241 addrList
[tid
].erase(vect_it
);
243 // Erase From Address Map (Used for Debugging)
244 addrMap
[tid
].erase(addrMap
[tid
].find(mem_addr
));
251 CacheUnit::findRequest(DynInstPtr inst
)
253 map
<int, ResReqPtr
>::iterator map_it
= reqMap
.begin();
254 map
<int, ResReqPtr
>::iterator map_end
= reqMap
.end();
256 while (map_it
!= map_end
) {
257 CacheRequest
* cache_req
=
258 dynamic_cast<CacheRequest
*>((*map_it
).second
);
262 cache_req
->getInst() == inst
&&
263 cache_req
->instIdx
== inst
->curSkedEntry
->idx
) {
273 CacheUnit::findRequest(DynInstPtr inst
, int idx
)
275 map
<int, ResReqPtr
>::iterator map_it
= reqMap
.begin();
276 map
<int, ResReqPtr
>::iterator map_end
= reqMap
.end();
278 while (map_it
!= map_end
) {
279 CacheRequest
* cache_req
=
280 dynamic_cast<CacheRequest
*>((*map_it
).second
);
284 cache_req
->getInst() == inst
&&
285 cache_req
->instIdx
== idx
) {
296 CacheUnit::getRequest(DynInstPtr inst
, int stage_num
, int res_idx
,
297 int slot_num
, unsigned cmd
)
299 ScheduleEntry
* sched_entry
= *inst
->curSkedEntry
;
301 if (!inst
->validMemAddr()) {
302 panic("Mem. Addr. must be set before requesting cache access\n");
305 MemCmd::Command pkt_cmd
;
307 switch (sched_entry
->cmd
)
309 case InitSecondSplitRead
:
310 pkt_cmd
= MemCmd::ReadReq
;
312 DPRINTF(InOrderCachePort
,
313 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
314 inst
->readTid(), inst
->seqNum
, inst
->split2ndAddr
);
317 case InitiateReadData
:
318 pkt_cmd
= MemCmd::ReadReq
;
320 DPRINTF(InOrderCachePort
,
321 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
322 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
325 case InitSecondSplitWrite
:
326 pkt_cmd
= MemCmd::WriteReq
;
328 DPRINTF(InOrderCachePort
,
329 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
330 inst
->readTid(), inst
->seqNum
, inst
->split2ndAddr
);
333 case InitiateWriteData
:
334 pkt_cmd
= MemCmd::WriteReq
;
336 DPRINTF(InOrderCachePort
,
337 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
338 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
342 panic("%i: Unexpected request type (%i) to %s", curTick(),
343 sched_entry
->cmd
, name());
346 return new CacheRequest(this, inst
, stage_num
, id
, slot_num
,
347 sched_entry
->cmd
, 0, pkt_cmd
,
348 0/*flags*/, this->cpu
->readCpuId(),
349 inst
->curSkedEntry
->idx
);
353 CacheUnit::requestAgain(DynInstPtr inst
, bool &service_request
)
355 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
358 // Check to see if this instruction is requesting the same command
359 // or a different one
360 if (cache_req
->cmd
!= inst
->curSkedEntry
->cmd
&&
361 cache_req
->instIdx
== inst
->curSkedEntry
->idx
) {
362 // If different, then update command in the request
363 cache_req
->cmd
= inst
->curSkedEntry
->cmd
;
364 DPRINTF(InOrderCachePort
,
365 "[tid:%i]: [sn:%i]: Updating the command for this "
366 "instruction\n ", inst
->readTid(), inst
->seqNum
);
368 service_request
= true;
369 } else if (inst
->curSkedEntry
->idx
!= CacheUnit::InitSecondSplitRead
&&
370 inst
->curSkedEntry
->idx
!= CacheUnit::InitSecondSplitWrite
) {
371 // If same command, just check to see if memory access was completed
372 // but dont try to re-execute
373 DPRINTF(InOrderCachePort
,
374 "[tid:%i]: [sn:%i]: requesting this resource again\n",
375 inst
->readTid(), inst
->seqNum
);
377 service_request
= true;
382 CacheUnit::setupMemRequest(DynInstPtr inst
, CacheReqPtr cache_req
,
383 int acc_size
, int flags
)
385 ThreadID tid
= inst
->readTid();
386 Addr aligned_addr
= inst
->getMemAddr();
388 if (!cache_req
->is2ndSplit()) {
390 new Request(cpu
->asid
[tid
], aligned_addr
, acc_size
, flags
,
391 inst
->instAddr(), cpu
->readCpuId(),
393 cache_req
->memReq
= inst
->dataMemReq
;
395 assert(inst
->splitInst
);
397 inst
->splitMemReq
= new Request(cpu
->asid
[tid
],
404 cache_req
->memReq
= inst
->splitMemReq
;
409 CacheUnit::doTLBAccess(DynInstPtr inst
, CacheReqPtr cache_req
, int acc_size
,
410 int flags
, TheISA::TLB::Mode tlb_mode
)
412 ThreadID tid
= inst
->readTid();
413 //Addr aligned_addr = inst->getMemAddr();
414 unsigned stage_num
= cache_req
->getStageNum();
415 unsigned slot_idx
= cache_req
->getSlot();
417 setupMemRequest(inst
, cache_req
, acc_size
, flags
);
420 _tlb
->translateAtomic(cache_req
->memReq
,
421 cpu
->thread
[tid
]->getTC(), tlb_mode
);
423 if (inst
->fault
!= NoFault
) {
424 DPRINTF(InOrderTLB
, "[tid:%i]: %s encountered while translating "
425 "addr:%08p for [sn:%i].\n", tid
, inst
->fault
->name(),
426 cache_req
->memReq
->getVaddr(), inst
->seqNum
);
428 cpu
->pipelineStage
[stage_num
]->setResStall(cache_req
, tid
);
430 tlbBlocked
[tid
] = true;
432 cache_req
->tlbStall
= true;
434 scheduleEvent(slot_idx
, 1);
436 cpu
->trap(inst
->fault
, tid
, inst
);
438 DPRINTF(InOrderTLB
, "[tid:%i]: [sn:%i] virt. addr %08p translated "
439 "to phys. addr:%08p.\n", tid
, inst
->seqNum
,
440 cache_req
->memReq
->getVaddr(),
441 cache_req
->memReq
->getPaddr());
447 CacheUnit::read(DynInstPtr inst
, Addr addr
,
448 uint8_t *data
, unsigned size
, unsigned flags
)
450 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
451 assert(cache_req
&& "Can't Find Instruction for Read!");
453 // The block size of our peer
454 unsigned blockSize
= this->cachePort
->peerBlockSize();
456 //The size of the data we're trying to read.
458 inst
->totalSize
= size
;
460 if (inst
->traceData
) {
461 inst
->traceData
->setAddr(addr
);
464 if (inst
->split2ndAccess
) {
465 size
= inst
->split2ndSize
;
466 cache_req
->splitAccess
= true;
467 cache_req
->split2ndAccess
= true;
469 DPRINTF(InOrderCachePort
, "[sn:%i] Split Read Access (2 of 2) for "
470 "(%#x, %#x).\n", inst
->seqNum
, inst
->getMemAddr(),
475 //The address of the second part of this access if it needs to be split
476 //across a cache line boundary.
477 Addr secondAddr
= roundDown(addr
+ size
- 1, blockSize
);
480 if (secondAddr
> addr
&& !inst
->split2ndAccess
) {
481 DPRINTF(InOrderCachePort
, "%i: sn[%i] Split Read Access (1 of 2) for "
482 "(%#x, %#x).\n", curTick(), inst
->seqNum
, addr
, secondAddr
);
484 // Save All "Total" Split Information
485 // ==============================
486 inst
->splitInst
= true;
487 inst
->splitMemData
= new uint8_t[size
];
489 if (!inst
->splitInstSked
) {
490 assert(0 && "Split Requests Not Supported for Now...");
492 // Schedule Split Read/Complete for Instruction
493 // ==============================
494 int stage_num
= cache_req
->getStageNum();
496 int stage_pri
= ThePipeline::getNextPriority(inst
, stage_num
);
498 int isplit_cmd
= CacheUnit::InitSecondSplitRead
;
499 inst
->resSched
.push(new
500 ScheduleEntry(stage_num
,
502 cpu
->resPool
->getResIdx(DCache
),
506 int csplit_cmd
= CacheUnit::CompleteSecondSplitRead
;
507 inst
->resSched
.push(new
508 ScheduleEntry(stage_num
+ 1,
510 cpu
->resPool
->getResIdx(DCache
),
513 inst
->splitInstSked
= true;
515 DPRINTF(InOrderCachePort
, "[tid:%i] [sn:%i] Retrying Split Read "
516 "Access (1 of 2) for (%#x, %#x).\n", inst
->readTid(),
517 inst
->seqNum
, addr
, secondAddr
);
520 // Split Information for First Access
521 // ==============================
522 size
= secondAddr
- addr
;
523 cache_req
->splitAccess
= true;
525 // Split Information for Second Access
526 // ==============================
527 inst
->split2ndSize
= addr
+ fullSize
- secondAddr
;
528 inst
->split2ndAddr
= secondAddr
;
529 inst
->split2ndDataPtr
= inst
->splitMemData
+ size
;
530 inst
->split2ndFlags
= flags
;
533 doTLBAccess(inst
, cache_req
, size
, flags
, TheISA::TLB::Read
);
535 if (inst
->fault
== NoFault
) {
536 if (!cache_req
->splitAccess
) {
537 cache_req
->reqData
= new uint8_t[size
];
538 doCacheAccess(inst
, NULL
);
540 if (!inst
->split2ndAccess
) {
541 cache_req
->reqData
= inst
->splitMemData
;
543 cache_req
->reqData
= inst
->split2ndDataPtr
;
546 doCacheAccess(inst
, NULL
, cache_req
);
554 CacheUnit::write(DynInstPtr inst
, uint8_t *data
, unsigned size
,
555 Addr addr
, unsigned flags
, uint64_t *write_res
)
557 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
558 assert(cache_req
&& "Can't Find Instruction for Write!");
560 // The block size of our peer
561 unsigned blockSize
= this->cachePort
->peerBlockSize();
563 //The size of the data we're trying to write.
565 inst
->totalSize
= size
;
567 if (inst
->traceData
) {
568 inst
->traceData
->setAddr(addr
);
571 if (inst
->split2ndAccess
) {
572 size
= inst
->split2ndSize
;
573 cache_req
->splitAccess
= true;
574 cache_req
->split2ndAccess
= true;
576 DPRINTF(InOrderCachePort
, "[sn:%i] Split Write Access (2 of 2) for "
577 "(%#x, %#x).\n", inst
->seqNum
, inst
->getMemAddr(),
581 //The address of the second part of this access if it needs to be split
582 //across a cache line boundary.
583 Addr secondAddr
= roundDown(addr
+ size
- 1, blockSize
);
585 if (secondAddr
> addr
&& !inst
->split2ndAccess
) {
587 DPRINTF(InOrderCachePort
, "[sn:%i] Split Write Access (1 of 2) for "
588 "(%#x, %#x).\n", inst
->seqNum
, addr
, secondAddr
);
590 // Save All "Total" Split Information
591 // ==============================
592 inst
->splitInst
= true;
594 if (!inst
->splitInstSked
) {
595 assert(0 && "Split Requests Not Supported for Now...");
597 // Schedule Split Read/Complete for Instruction
598 // ==============================
599 int stage_num
= cache_req
->getStageNum();
601 int stage_pri
= ThePipeline::getNextPriority(inst
, stage_num
);
603 int isplit_cmd
= CacheUnit::InitSecondSplitWrite
;
604 inst
->resSched
.push(new
605 ScheduleEntry(stage_num
,
607 cpu
->resPool
->getResIdx(DCache
),
611 int csplit_cmd
= CacheUnit::CompleteSecondSplitWrite
;
612 inst
->resSched
.push(new
613 ScheduleEntry(stage_num
+ 1,
615 cpu
->resPool
->getResIdx(DCache
),
618 inst
->splitInstSked
= true;
620 DPRINTF(InOrderCachePort
, "[tid:%i] sn:%i] Retrying Split Read "
621 "Access (1 of 2) for (%#x, %#x).\n",
622 inst
->readTid(), inst
->seqNum
, addr
, secondAddr
);
627 // Split Information for First Access
628 // ==============================
629 size
= secondAddr
- addr
;
630 cache_req
->splitAccess
= true;
632 // Split Information for Second Access
633 // ==============================
634 inst
->split2ndSize
= addr
+ fullSize
- secondAddr
;
635 inst
->split2ndAddr
= secondAddr
;
636 inst
->split2ndStoreDataPtr
= &cache_req
->inst
->storeData
;
637 inst
->split2ndStoreDataPtr
+= size
;
638 inst
->split2ndFlags
= flags
;
639 inst
->splitInstSked
= true;
642 doTLBAccess(inst
, cache_req
, size
, flags
, TheISA::TLB::Write
);
644 if (inst
->fault
== NoFault
) {
645 if (!cache_req
->splitAccess
) {
646 // Remove this line since storeData is saved in INST?
647 cache_req
->reqData
= new uint8_t[size
];
648 doCacheAccess(inst
, write_res
);
650 doCacheAccess(inst
, write_res
, cache_req
);
660 CacheUnit::execute(int slot_num
)
662 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(reqMap
[slot_num
]);
665 if (cachePortBlocked
) {
666 DPRINTF(InOrderCachePort
, "Cache Port Blocked. Cannot Access\n");
667 cache_req
->setCompleted(false);
672 DynInstPtr inst
= cache_req
->inst
;
674 ThreadID tid
= inst
->readTid();
675 std::string acc_type
= "write";
678 inst
->fault
= NoFault
;
680 switch (cache_req
->cmd
)
683 case InitiateReadData
:
687 case InitiateWriteData
:
689 DPRINTF(InOrderCachePort
,
690 "[tid:%u]: [sn:%i] Initiating data %s access to %s for "
691 "addr. %08p\n", tid
, inst
->seqNum
, acc_type
, name(),
692 cache_req
->inst
->getMemAddr());
694 inst
->setCurResSlot(slot_num
);
696 if (inst
->isDataPrefetch() || inst
->isInstPrefetch()) {
704 case InitSecondSplitRead
:
705 DPRINTF(InOrderCachePort
,
706 "[tid:%u]: [sn:%i] Initiating split data read access to %s "
707 "for addr. %08p\n", tid
, inst
->seqNum
, name(),
708 cache_req
->inst
->split2ndAddr
);
709 inst
->split2ndAccess
= true;
710 assert(inst
->split2ndAddr
!= 0);
711 read(inst
, inst
->split2ndAddr
, &inst
->split2ndData
,
712 inst
->totalSize
, inst
->split2ndFlags
);
715 case InitSecondSplitWrite
:
716 DPRINTF(InOrderCachePort
,
717 "[tid:%u]: [sn:%i] Initiating split data write access to %s "
718 "for addr. %08p\n", tid
, inst
->seqNum
, name(),
719 cache_req
->inst
->getMemAddr());
721 inst
->split2ndAccess
= true;
722 assert(inst
->split2ndAddr
!= 0);
723 write(inst
, &inst
->split2ndData
, inst
->totalSize
,
724 inst
->split2ndAddr
, inst
->split2ndFlags
, NULL
);
727 case CompleteReadData
:
728 case CompleteWriteData
:
729 DPRINTF(InOrderCachePort
,
730 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
733 if (cache_req
->isMemAccComplete() ||
734 inst
->isDataPrefetch() ||
735 inst
->isInstPrefetch()) {
736 removeAddrDependency(inst
);
737 cache_req
->setMemStall(false);
740 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
741 tid
, cache_req
->inst
->getMemAddr());
742 cache_req
->setCompleted(false);
743 cache_req
->setMemStall(true);
747 case CompleteSecondSplitRead
:
748 DPRINTF(InOrderCachePort
,
749 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Read "
750 "Access\n", tid
, inst
->seqNum
);
752 if (cache_req
->isMemAccComplete() ||
753 inst
->isDataPrefetch() ||
754 inst
->isInstPrefetch()) {
755 removeAddrDependency(inst
);
756 cache_req
->setMemStall(false);
759 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
760 tid
, cache_req
->inst
->split2ndAddr
);
761 cache_req
->setCompleted(false);
762 cache_req
->setMemStall(true);
766 case CompleteSecondSplitWrite
:
767 DPRINTF(InOrderCachePort
,
768 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Write "
769 "Access\n", tid
, inst
->seqNum
);
771 if (cache_req
->isMemAccComplete() ||
772 inst
->isDataPrefetch() ||
773 inst
->isInstPrefetch()) {
774 removeAddrDependency(inst
);
775 cache_req
->setMemStall(false);
778 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
779 tid
, cache_req
->inst
->split2ndAddr
);
780 cache_req
->setCompleted(false);
781 cache_req
->setMemStall(true);
786 fatal("Unrecognized command to %s", resName
);
790 // @TODO: Split into doCacheRead() and doCacheWrite()
792 CacheUnit::doCacheAccess(DynInstPtr inst
, uint64_t *write_res
,
793 CacheReqPtr split_req
)
795 Fault fault
= NoFault
;
797 ThreadID tid
= inst
->readTid();
800 CacheReqPtr cache_req
;
802 if (split_req
== NULL
) {
803 cache_req
= dynamic_cast<CacheReqPtr
>(reqMap
[inst
->getCurResSlot()]);
805 cache_req
= split_req
;
810 // Check for LL/SC and if so change command
811 if (cache_req
->memReq
->isLLSC() && cache_req
->pktCmd
== MemCmd::ReadReq
) {
812 cache_req
->pktCmd
= MemCmd::LoadLockedReq
;
815 if (cache_req
->pktCmd
== MemCmd::WriteReq
) {
817 cache_req
->memReq
->isSwap() ? MemCmd::SwapReq
:
818 (cache_req
->memReq
->isLLSC() ? MemCmd::StoreCondReq
822 cache_req
->dataPkt
= new CacheReqPacket(cache_req
,
827 if (cache_req
->dataPkt
->isRead()) {
828 cache_req
->dataPkt
->dataStatic(cache_req
->reqData
);
829 } else if (cache_req
->dataPkt
->isWrite()) {
830 if (inst
->split2ndAccess
) {
831 cache_req
->dataPkt
->dataStatic(inst
->split2ndStoreDataPtr
);
833 cache_req
->dataPkt
->dataStatic(&cache_req
->inst
->storeData
);
836 if (cache_req
->memReq
->isCondSwap()) {
838 cache_req
->memReq
->setExtraData(*write_res
);
842 bool do_access
= true; // flag to suppress cache access
844 Request
*memReq
= cache_req
->dataPkt
->req
;
846 if (cache_req
->dataPkt
->isWrite() && cache_req
->memReq
->isLLSC()) {
847 assert(cache_req
->inst
->isStoreConditional());
848 DPRINTF(InOrderCachePort
, "Evaluating Store Conditional access\n");
849 do_access
= TheISA::handleLockedWrite(cpu
, memReq
);
852 DPRINTF(InOrderCachePort
,
853 "[tid:%i] [sn:%i] attempting to access cache for addr %08p\n",
854 tid
, inst
->seqNum
, cache_req
->dataPkt
->getAddr());
857 if (!cachePort
->sendTiming(cache_req
->dataPkt
)) {
858 DPRINTF(InOrderCachePort
,
859 "[tid:%i] [sn:%i] cannot access cache, because port "
860 "is blocked. now waiting to retry request\n", tid
,
862 cache_req
->setCompleted(false);
863 cachePortBlocked
= true;
865 DPRINTF(InOrderCachePort
,
866 "[tid:%i] [sn:%i] is now waiting for cache response\n",
868 cache_req
->setCompleted();
869 cache_req
->setMemAccPending();
870 cachePortBlocked
= false;
872 } else if (!do_access
&& memReq
->isLLSC()){
873 // Store-Conditional instructions complete even if they "failed"
874 assert(cache_req
->inst
->isStoreConditional());
875 cache_req
->setCompleted(true);
878 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
881 processCacheCompletion(cache_req
->dataPkt
);
883 // Make cache request again since access due to
884 // inability to access
885 DPRINTF(InOrderStall
, "STALL: \n");
886 cache_req
->setCompleted(false);
892 CacheUnit::processCacheCompletion(PacketPtr pkt
)
894 // Cast to correct packet type
895 CacheReqPacket
* cache_pkt
= dynamic_cast<CacheReqPacket
*>(pkt
);
899 if (cache_pkt
->cacheReq
->isSquashed()) {
900 DPRINTF(InOrderCachePort
,
901 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
902 cache_pkt
->cacheReq
->getInst()->readTid(),
903 cache_pkt
->cacheReq
->getInst()->seqNum
);
905 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
906 cache_pkt
->cacheReq
->getTid(),
907 cache_pkt
->cacheReq
->seqNum
);
909 cache_pkt
->cacheReq
->done();
917 DPRINTF(InOrderCachePort
,
918 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
919 cache_pkt
->cacheReq
->getInst()->readTid(),
920 cache_pkt
->cacheReq
->getInst()->seqNum
,
921 cache_pkt
->cacheReq
->getInst()->getMemAddr());
923 // Cast to correct request type
924 CacheRequest
*cache_req
= dynamic_cast<CacheReqPtr
>(
925 findRequest(cache_pkt
->cacheReq
->getInst(), cache_pkt
->instIdx
));
928 panic("[tid:%u]: [sn:%i]: Can't find slot for cache access to "
929 "addr. %08p\n", cache_pkt
->cacheReq
->getInst()->readTid(),
930 cache_pkt
->cacheReq
->getInst()->seqNum
,
931 cache_pkt
->cacheReq
->getInst()->getMemAddr());
937 // Get resource request info
938 unsigned stage_num
= cache_req
->getStageNum();
939 DynInstPtr inst
= cache_req
->inst
;
940 ThreadID tid
= cache_req
->inst
->readTid();
942 if (!cache_req
->isSquashed()) {
943 if (inst
->staticInst
&& inst
->isMemRef()) {
944 DPRINTF(InOrderCachePort
,
945 "[tid:%u]: [sn:%i]: Processing cache access\n",
947 PacketPtr dataPkt
= NULL
;
949 if (inst
->splitInst
) {
950 inst
->splitFinishCnt
++;
952 if (inst
->splitFinishCnt
== 2) {
953 cache_req
->memReq
->setVirt(0/*inst->tid*/,
959 Packet
split_pkt(cache_req
->memReq
, cache_req
->pktCmd
,
963 if (inst
->isLoad()) {
964 split_pkt
.dataStatic(inst
->splitMemData
);
966 split_pkt
.dataStatic(&inst
->storeData
);
969 dataPkt
= &split_pkt
;
974 inst
->completeAcc(dataPkt
);
976 if (inst
->isLoad()) {
977 assert(cache_pkt
->isRead());
979 if (cache_pkt
->req
->isLLSC()) {
980 DPRINTF(InOrderCachePort
,
981 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
983 TheISA::handleLockedRead(cpu
, cache_pkt
->req
);
986 DPRINTF(InOrderCachePort
,
987 "[tid:%u]: [sn:%i]: Bytes loaded were: %s\n",
989 printMemData(dataPkt
->getPtr
<uint8_t>(),
990 dataPkt
->getSize()));
991 } else if(inst
->isStore()) {
992 assert(cache_pkt
->isWrite());
994 DPRINTF(InOrderCachePort
,
995 "[tid:%u]: [sn:%i]: Bytes stored were: %s\n",
997 printMemData(dataPkt
->getPtr
<uint8_t>(),
998 dataPkt
->getSize()));
1004 cache_req
->setMemAccPending(false);
1005 cache_req
->setMemAccCompleted();
1007 if (cache_req
->isMemStall() &&
1008 cpu
->threadModel
== InOrderCPU::SwitchOnCacheMiss
) {
1009 DPRINTF(InOrderCachePort
, "[tid:%u] Waking up from Cache Miss.\n",
1012 cpu
->activateContext(tid
);
1014 DPRINTF(ThreadModel
, "Activating [tid:%i] after return from cache"
1018 // Wake up the CPU (if it went to sleep and was waiting on this
1019 // completion event).
1022 DPRINTF(Activity
, "[tid:%u] Activating %s due to cache completion\n",
1023 tid
, cpu
->pipelineStage
[stage_num
]->name());
1025 cpu
->switchToActive(stage_num
);
1027 DPRINTF(InOrderCachePort
,
1028 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
1029 tid
, cache_req
->inst
->instAddr());
1030 cache_req
->setMemAccCompleted();
1035 CacheUnit::recvRetry()
1037 DPRINTF(InOrderCachePort
, "Unblocking Cache Port. \n");
1039 assert(cachePortBlocked
);
1041 // Clear the cache port for use again
1042 cachePortBlocked
= false;
1047 CacheUnitEvent::CacheUnitEvent()
1052 CacheUnitEvent::process()
1054 DynInstPtr inst
= resource
->reqMap
[slotIdx
]->inst
;
1055 int stage_num
= resource
->reqMap
[slotIdx
]->getStageNum();
1056 ThreadID tid
= inst
->threadNumber
;
1057 CacheReqPtr req_ptr
= dynamic_cast<CacheReqPtr
>(resource
->reqMap
[slotIdx
]);
1059 DPRINTF(InOrderTLB
, "Waking up from TLB Miss caused by [sn:%i].\n",
1062 CacheUnit
* tlb_res
= dynamic_cast<CacheUnit
*>(resource
);
1065 tlb_res
->tlbBlocked
[tid
] = false;
1067 tlb_res
->cpu
->pipelineStage
[stage_num
]->
1068 unsetResStall(tlb_res
->reqMap
[slotIdx
], tid
);
1070 req_ptr
->tlbStall
= false;
1072 if (req_ptr
->isSquashed()) {
1078 CacheUnit::squashDueToMemStall(DynInstPtr inst
, int stage_num
,
1079 InstSeqNum squash_seq_num
, ThreadID tid
)
1081 // If squashing due to memory stall, then we do NOT want to
1082 // squash the instruction that caused the stall so we
1083 // increment the sequence number here to prevent that.
1085 // NOTE: This is only for the SwitchOnCacheMiss Model
1086 // NOTE: If you have multiple outstanding misses from the same
1087 // thread then you need to reevaluate this code
1088 // NOTE: squash should originate from
1089 // pipeline_stage.cc:processInstSchedule
1090 DPRINTF(InOrderCachePort
, "Squashing above [sn:%u]\n",
1091 squash_seq_num
+ 1);
1093 squash(inst
, stage_num
, squash_seq_num
+ 1, tid
);
1097 CacheUnit::squashCacheRequest(CacheReqPtr req_ptr
)
1099 DynInstPtr inst
= req_ptr
->getInst();
1101 req_ptr
->setSquashed();
1102 inst
->setSquashed();
1103 if (inst
->validMemAddr()) {
1104 DPRINTF(AddrDep
, "Squash of [tid:%i] [sn:%i], attempting to "
1105 "remove addr. %08p dependencies.\n",
1108 inst
->getMemAddr());
1110 removeAddrDependency(inst
);
1116 CacheUnit::squash(DynInstPtr inst
, int stage_num
,
1117 InstSeqNum squash_seq_num
, ThreadID tid
)
1119 vector
<int> slot_remove_list
;
1121 map
<int, ResReqPtr
>::iterator map_it
= reqMap
.begin();
1122 map
<int, ResReqPtr
>::iterator map_end
= reqMap
.end();
1124 while (map_it
!= map_end
) {
1125 ResReqPtr req_ptr
= (*map_it
).second
;
1128 req_ptr
->getInst()->readTid() == tid
&&
1129 req_ptr
->getInst()->seqNum
> squash_seq_num
) {
1131 DPRINTF(InOrderCachePort
,
1132 "[tid:%i] Squashing request from [sn:%i]\n",
1133 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
1135 if (req_ptr
->isSquashed()) {
1136 DPRINTF(AddrDep
, "Request for [tid:%i] [sn:%i] already "
1137 "squashed, ignoring squash process.\n",
1138 req_ptr
->getInst()->readTid(),
1139 req_ptr
->getInst()->seqNum
);
1144 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(req_ptr
);
1147 squashCacheRequest(cache_req
);
1149 int req_slot_num
= req_ptr
->getSlot();
1151 if (cache_req
->tlbStall
) {
1152 tlbBlocked
[tid
] = false;
1154 int stall_stage
= reqMap
[req_slot_num
]->getStageNum();
1156 cpu
->pipelineStage
[stall_stage
]->
1157 unsetResStall(reqMap
[req_slot_num
], tid
);
1160 if (!cache_req
->tlbStall
&& !cache_req
->isMemAccPending()) {
1161 // Mark request for later removal
1162 cpu
->reqRemoveList
.push(req_ptr
);
1164 // Mark slot for removal from resource
1165 slot_remove_list
.push_back(req_ptr
->getSlot());
1167 DPRINTF(InOrderCachePort
,
1168 "[tid:%i] Request from [sn:%i] squashed, but still "
1169 "pending completion.\n",
1170 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
1172 "[tid:%i] Request from [sn:%i] squashed (split:%i), but "
1173 "still pending completion.\n",
1174 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
,
1175 req_ptr
->getInst()->splitInst
);
1183 // Now Delete Slot Entry from Req. Map
1184 for (int i
= 0; i
< slot_remove_list
.size(); i
++)
1185 freeSlot(slot_remove_list
[i
]);