2 * Copyright (c) 2007 MIPS Technologies, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Korey Sewell
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/pipeline_traits.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "mem/request.hh"
47 using namespace TheISA
;
48 using namespace ThePipeline
;
52 printMemData(uint8_t *data
, unsigned size
)
54 std::stringstream dataStr
;
55 for (unsigned pos
= 0; pos
< size
; pos
++) {
56 ccprintf(dataStr
, "%02x", data
[pos
]);
63 CacheUnit::CachePort::recvAtomic(PacketPtr pkt
)
65 panic("CacheUnit::CachePort doesn't expect recvAtomic callback!");
70 CacheUnit::CachePort::recvFunctional(PacketPtr pkt
)
72 panic("CacheUnit::CachePort doesn't expect recvFunctional callback!");
76 CacheUnit::CachePort::recvStatusChange(Status status
)
78 if (status
== RangeChange
)
81 panic("CacheUnit::CachePort doesn't expect recvStatusChange callback!");
85 CacheUnit::CachePort::recvTiming(Packet
*pkt
)
87 cachePortUnit
->processCacheCompletion(pkt
);
92 CacheUnit::CachePort::recvRetry()
94 cachePortUnit
->recvRetry();
97 CacheUnit::CacheUnit(string res_name
, int res_id
, int res_width
,
98 int res_latency
, InOrderCPU
*_cpu
, ThePipeline::Params
*params
)
99 : Resource(res_name
, res_id
, res_width
, res_latency
, _cpu
),
100 cachePortBlocked(false)
102 cachePort
= new CachePort(this);
104 // Hard-Code Selection For Now
105 if (res_name
== "icache_port")
107 else if (res_name
== "dcache_port")
110 fatal("Unrecognized TLB name passed by user");
112 for (int i
=0; i
< MaxThreads
; i
++) {
113 tlbBlocked
[i
] = false;
125 CacheUnit::getPort(const string
&if_name
, int idx
)
127 if (if_name
== resName
)
136 for (int i
= 0; i
< width
; i
++) {
137 reqs
[i
] = new CacheRequest(this);
140 // Currently Used to Model TLB Latency. Eventually
141 // Switch to Timing TLB translations.
142 resourceEvent
= new CacheUnitEvent
[width
];
144 cacheBlkSize
= this->cachePort
->peerBlockSize();
145 cacheBlkMask
= cacheBlkSize
- 1;
151 CacheUnit::getSlot(DynInstPtr inst
)
153 ThreadID tid
= inst
->readTid();
155 if (tlbBlocked
[inst
->threadNumber
]) {
159 // For a Split-Load, the instruction would have processed once already
160 // causing the address to be unset.
161 if (!inst
->validMemAddr() && !inst
->splitInst
) {
162 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
163 "cache access\n", inst
->readTid(), inst
->seqNum
);
166 Addr req_addr
= inst
->getMemAddr();
168 if (resName
== "icache_port" ||
169 find(addrList
[tid
].begin(), addrList
[tid
].end(), req_addr
) ==
170 addrList
[tid
].end()) {
172 int new_slot
= Resource::getSlot(inst
);
177 inst
->memTime
= curTick();
178 setAddrDependency(inst
);
181 // Allow same instruction multiple accesses to same address
182 // should only happen maybe after a squashed inst. needs to replay
183 if (addrMap
[tid
][req_addr
] == inst
->seqNum
) {
184 int new_slot
= Resource::getSlot(inst
);
191 DPRINTF(InOrderCachePort
,
192 "[tid:%i] Denying request because there is an outstanding"
193 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
194 inst
->readTid(), req_addr
, addrMap
[tid
][req_addr
], inst
->memTime
);
203 CacheUnit::setAddrDependency(DynInstPtr inst
)
205 Addr req_addr
= inst
->getMemAddr();
206 ThreadID tid
= inst
->readTid();
208 addrList
[tid
].push_back(req_addr
);
209 addrMap
[tid
][req_addr
] = inst
->seqNum
;
212 "[tid:%i]: [sn:%i]: Address %08p added to dependency list (size=%i)\n",
213 inst
->readTid(), inst
->seqNum
, req_addr
, addrList
[tid
].size());
215 //@NOTE: 10 is an arbitrarily "high" number, but to be exact
216 // we would need to know the # of outstanding accesses
217 // a priori. Information like fetch width, stage width,
218 // fetch buffer, and the branch resolution stage would be
219 // useful for the icache_port. For the dcache port, the #
220 // of outstanding cache accesses (mshrs) would be a good
221 // sanity check here.
222 //assert(addrList[tid].size() < 10);
226 CacheUnit::removeAddrDependency(DynInstPtr inst
)
228 ThreadID tid
= inst
->readTid();
230 Addr mem_addr
= inst
->getMemAddr();
232 inst
->unsetMemAddr();
234 // Erase from Address List
235 vector
<Addr
>::iterator vect_it
= find(addrList
[tid
].begin(),
238 assert(vect_it
!= addrList
[tid
].end() || inst
->splitInst
);
240 if (vect_it
!= addrList
[tid
].end()) {
242 "[tid:%i]: [sn:%i] Address %08p removed from dependency "
243 "list\n", inst
->readTid(), inst
->seqNum
, (*vect_it
));
245 addrList
[tid
].erase(vect_it
);
247 // Erase From Address Map (Used for Debugging)
248 addrMap
[tid
].erase(addrMap
[tid
].find(mem_addr
));
255 CacheUnit::findRequest(DynInstPtr inst
)
257 for (int i
= 0; i
< width
; i
++) {
258 CacheRequest
* cache_req
=
259 dynamic_cast<CacheRequest
*>(reqs
[i
]);
262 if (cache_req
->valid
&&
263 cache_req
->getInst() == inst
&&
264 cache_req
->instIdx
== inst
->curSkedEntry
->idx
) {
273 CacheUnit::findRequest(DynInstPtr inst
, int idx
)
275 for (int i
= 0; i
< width
; i
++) {
276 CacheRequest
* cache_req
=
277 dynamic_cast<CacheRequest
*>(reqs
[i
]);
280 if (cache_req
->valid
&&
281 cache_req
->getInst() == inst
&&
282 cache_req
->instIdx
== idx
) {
292 CacheUnit::getRequest(DynInstPtr inst
, int stage_num
, int res_idx
,
293 int slot_num
, unsigned cmd
)
295 ScheduleEntry
* sched_entry
= *inst
->curSkedEntry
;
296 CacheRequest
* cache_req
= dynamic_cast<CacheRequest
*>(reqs
[slot_num
]);
298 if (!inst
->validMemAddr()) {
299 panic("Mem. Addr. must be set before requesting cache access\n");
302 MemCmd::Command pkt_cmd
;
304 switch (sched_entry
->cmd
)
306 case InitSecondSplitRead
:
307 pkt_cmd
= MemCmd::ReadReq
;
309 DPRINTF(InOrderCachePort
,
310 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
311 inst
->readTid(), inst
->seqNum
, inst
->split2ndAddr
);
314 case InitiateReadData
:
315 pkt_cmd
= MemCmd::ReadReq
;
317 DPRINTF(InOrderCachePort
,
318 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
319 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
322 case InitSecondSplitWrite
:
323 pkt_cmd
= MemCmd::WriteReq
;
325 DPRINTF(InOrderCachePort
,
326 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
327 inst
->readTid(), inst
->seqNum
, inst
->split2ndAddr
);
330 case InitiateWriteData
:
331 pkt_cmd
= MemCmd::WriteReq
;
333 DPRINTF(InOrderCachePort
,
334 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
335 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
339 panic("%i: Unexpected request type (%i) to %s", curTick(),
340 sched_entry
->cmd
, name());
343 cache_req
->setRequest(inst
, stage_num
, id
, slot_num
,
344 sched_entry
->cmd
, pkt_cmd
,
345 inst
->curSkedEntry
->idx
);
350 CacheUnit::requestAgain(DynInstPtr inst
, bool &service_request
)
352 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
355 // Check to see if this instruction is requesting the same command
356 // or a different one
357 if (cache_req
->cmd
!= inst
->curSkedEntry
->cmd
&&
358 cache_req
->instIdx
== inst
->curSkedEntry
->idx
) {
359 // If different, then update command in the request
360 cache_req
->cmd
= inst
->curSkedEntry
->cmd
;
361 DPRINTF(InOrderCachePort
,
362 "[tid:%i]: [sn:%i]: Updating the command for this "
363 "instruction\n ", inst
->readTid(), inst
->seqNum
);
365 service_request
= true;
366 } else if (inst
->curSkedEntry
->idx
!= CacheUnit::InitSecondSplitRead
&&
367 inst
->curSkedEntry
->idx
!= CacheUnit::InitSecondSplitWrite
) {
368 // If same command, just check to see if memory access was completed
369 // but dont try to re-execute
370 DPRINTF(InOrderCachePort
,
371 "[tid:%i]: [sn:%i]: requesting this resource again\n",
372 inst
->readTid(), inst
->seqNum
);
374 service_request
= true;
379 CacheUnit::setupMemRequest(DynInstPtr inst
, CacheReqPtr cache_req
,
380 int acc_size
, int flags
)
382 ThreadID tid
= inst
->readTid();
383 Addr aligned_addr
= inst
->getMemAddr();
385 if (!cache_req
->is2ndSplit()) {
387 new Request(cpu
->asid
[tid
], aligned_addr
, acc_size
, flags
,
388 inst
->instAddr(), cpu
->readCpuId(),
390 cache_req
->memReq
= inst
->dataMemReq
;
392 assert(inst
->splitInst
);
394 inst
->splitMemReq
= new Request(cpu
->asid
[tid
],
401 cache_req
->memReq
= inst
->splitMemReq
;
406 CacheUnit::doTLBAccess(DynInstPtr inst
, CacheReqPtr cache_req
, int acc_size
,
407 int flags
, TheISA::TLB::Mode tlb_mode
)
409 ThreadID tid
= inst
->readTid();
410 //Addr aligned_addr = inst->getMemAddr();
411 unsigned stage_num
= cache_req
->getStageNum();
412 unsigned slot_idx
= cache_req
->getSlot();
414 setupMemRequest(inst
, cache_req
, acc_size
, flags
);
417 _tlb
->translateAtomic(cache_req
->memReq
,
418 cpu
->thread
[tid
]->getTC(), tlb_mode
);
420 if (inst
->fault
!= NoFault
) {
421 DPRINTF(InOrderTLB
, "[tid:%i]: %s encountered while translating "
422 "addr:%08p for [sn:%i].\n", tid
, inst
->fault
->name(),
423 cache_req
->memReq
->getVaddr(), inst
->seqNum
);
425 cpu
->pipelineStage
[stage_num
]->setResStall(cache_req
, tid
);
427 tlbBlocked
[tid
] = true;
429 cache_req
->tlbStall
= true;
431 scheduleEvent(slot_idx
, 1);
433 cpu
->trap(inst
->fault
, tid
, inst
);
435 DPRINTF(InOrderTLB
, "[tid:%i]: [sn:%i] virt. addr %08p translated "
436 "to phys. addr:%08p.\n", tid
, inst
->seqNum
,
437 cache_req
->memReq
->getVaddr(),
438 cache_req
->memReq
->getPaddr());
444 CacheUnit::read(DynInstPtr inst
, Addr addr
,
445 uint8_t *data
, unsigned size
, unsigned flags
)
447 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
448 assert(cache_req
&& "Can't Find Instruction for Read!");
450 // The block size of our peer
451 unsigned blockSize
= this->cachePort
->peerBlockSize();
453 //The size of the data we're trying to read.
455 inst
->totalSize
= size
;
457 if (inst
->traceData
) {
458 inst
->traceData
->setAddr(addr
);
461 if (inst
->split2ndAccess
) {
462 size
= inst
->split2ndSize
;
463 cache_req
->splitAccess
= true;
464 cache_req
->split2ndAccess
= true;
466 DPRINTF(InOrderCachePort
, "[sn:%i] Split Read Access (2 of 2) for "
467 "(%#x, %#x).\n", inst
->seqNum
, inst
->getMemAddr(),
472 //The address of the second part of this access if it needs to be split
473 //across a cache line boundary.
474 Addr secondAddr
= roundDown(addr
+ size
- 1, blockSize
);
477 if (secondAddr
> addr
&& !inst
->split2ndAccess
) {
478 DPRINTF(InOrderCachePort
, "%i: sn[%i] Split Read Access (1 of 2) for "
479 "(%#x, %#x).\n", curTick(), inst
->seqNum
, addr
, secondAddr
);
481 // Save All "Total" Split Information
482 // ==============================
483 inst
->splitInst
= true;
484 inst
->splitMemData
= new uint8_t[size
];
486 if (!inst
->splitInstSked
) {
487 assert(0 && "Split Requests Not Supported for Now...");
489 // Schedule Split Read/Complete for Instruction
490 // ==============================
491 int stage_num
= cache_req
->getStageNum();
492 RSkedPtr inst_sked
= (stage_num
>= ThePipeline::BackEndStartStage
) ?
493 inst
->backSked
: inst
->frontSked
;
495 // this is just an arbitrarily high priority to ensure that this
496 // gets pushed to the back of the list
499 int isplit_cmd
= CacheUnit::InitSecondSplitRead
;
501 ScheduleEntry(stage_num
,
503 cpu
->resPool
->getResIdx(DCache
),
507 int csplit_cmd
= CacheUnit::CompleteSecondSplitRead
;
509 ScheduleEntry(stage_num
+ 1,
511 cpu
->resPool
->getResIdx(DCache
),
514 inst
->splitInstSked
= true;
516 DPRINTF(InOrderCachePort
, "[tid:%i] [sn:%i] Retrying Split Read "
517 "Access (1 of 2) for (%#x, %#x).\n", inst
->readTid(),
518 inst
->seqNum
, addr
, secondAddr
);
521 // Split Information for First Access
522 // ==============================
523 size
= secondAddr
- addr
;
524 cache_req
->splitAccess
= true;
526 // Split Information for Second Access
527 // ==============================
528 inst
->split2ndSize
= addr
+ fullSize
- secondAddr
;
529 inst
->split2ndAddr
= secondAddr
;
530 inst
->split2ndDataPtr
= inst
->splitMemData
+ size
;
531 inst
->split2ndFlags
= flags
;
534 doTLBAccess(inst
, cache_req
, size
, flags
, TheISA::TLB::Read
);
536 if (inst
->fault
== NoFault
) {
537 if (!cache_req
->splitAccess
) {
538 cache_req
->reqData
= new uint8_t[size
];
539 doCacheAccess(inst
, NULL
);
541 if (!inst
->split2ndAccess
) {
542 cache_req
->reqData
= inst
->splitMemData
;
544 cache_req
->reqData
= inst
->split2ndDataPtr
;
547 doCacheAccess(inst
, NULL
, cache_req
);
555 CacheUnit::write(DynInstPtr inst
, uint8_t *data
, unsigned size
,
556 Addr addr
, unsigned flags
, uint64_t *write_res
)
558 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
559 assert(cache_req
&& "Can't Find Instruction for Write!");
561 // The block size of our peer
562 unsigned blockSize
= this->cachePort
->peerBlockSize();
564 //The size of the data we're trying to write.
566 inst
->totalSize
= size
;
568 if (inst
->traceData
) {
569 inst
->traceData
->setAddr(addr
);
572 if (inst
->split2ndAccess
) {
573 size
= inst
->split2ndSize
;
574 cache_req
->splitAccess
= true;
575 cache_req
->split2ndAccess
= true;
577 DPRINTF(InOrderCachePort
, "[sn:%i] Split Write Access (2 of 2) for "
578 "(%#x, %#x).\n", inst
->seqNum
, inst
->getMemAddr(),
582 //The address of the second part of this access if it needs to be split
583 //across a cache line boundary.
584 Addr secondAddr
= roundDown(addr
+ size
- 1, blockSize
);
586 if (secondAddr
> addr
&& !inst
->split2ndAccess
) {
588 DPRINTF(InOrderCachePort
, "[sn:%i] Split Write Access (1 of 2) for "
589 "(%#x, %#x).\n", inst
->seqNum
, addr
, secondAddr
);
591 // Save All "Total" Split Information
592 // ==============================
593 inst
->splitInst
= true;
595 if (!inst
->splitInstSked
) {
596 assert(0 && "Split Requests Not Supported for Now...");
598 // Schedule Split Read/Complete for Instruction
599 // ==============================
600 int stage_num
= cache_req
->getStageNum();
601 RSkedPtr inst_sked
= (stage_num
>= ThePipeline::BackEndStartStage
) ?
602 inst
->backSked
: inst
->frontSked
;
604 // this is just an arbitrarily high priority to ensure that this
605 // gets pushed to the back of the list
608 int isplit_cmd
= CacheUnit::InitSecondSplitWrite
;
610 ScheduleEntry(stage_num
,
612 cpu
->resPool
->getResIdx(DCache
),
616 int csplit_cmd
= CacheUnit::CompleteSecondSplitWrite
;
618 ScheduleEntry(stage_num
+ 1,
620 cpu
->resPool
->getResIdx(DCache
),
623 inst
->splitInstSked
= true;
625 DPRINTF(InOrderCachePort
, "[tid:%i] sn:%i] Retrying Split Read "
626 "Access (1 of 2) for (%#x, %#x).\n",
627 inst
->readTid(), inst
->seqNum
, addr
, secondAddr
);
632 // Split Information for First Access
633 // ==============================
634 size
= secondAddr
- addr
;
635 cache_req
->splitAccess
= true;
637 // Split Information for Second Access
638 // ==============================
639 inst
->split2ndSize
= addr
+ fullSize
- secondAddr
;
640 inst
->split2ndAddr
= secondAddr
;
641 inst
->split2ndStoreDataPtr
= &cache_req
->inst
->storeData
;
642 inst
->split2ndStoreDataPtr
+= size
;
643 inst
->split2ndFlags
= flags
;
644 inst
->splitInstSked
= true;
647 doTLBAccess(inst
, cache_req
, size
, flags
, TheISA::TLB::Write
);
649 if (inst
->fault
== NoFault
) {
650 if (!cache_req
->splitAccess
) {
651 // Remove this line since storeData is saved in INST?
652 cache_req
->reqData
= new uint8_t[size
];
653 doCacheAccess(inst
, write_res
);
655 doCacheAccess(inst
, write_res
, cache_req
);
665 CacheUnit::execute(int slot_num
)
667 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(reqs
[slot_num
]);
670 if (cachePortBlocked
) {
671 DPRINTF(InOrderCachePort
, "Cache Port Blocked. Cannot Access\n");
672 cache_req
->setCompleted(false);
677 DynInstPtr inst
= cache_req
->inst
;
679 ThreadID tid
= inst
->readTid();
680 std::string acc_type
= "write";
683 inst
->fault
= NoFault
;
685 switch (cache_req
->cmd
)
688 case InitiateReadData
:
692 case InitiateWriteData
:
694 DPRINTF(InOrderCachePort
,
695 "[tid:%u]: [sn:%i] Initiating data %s access to %s for "
696 "addr. %08p\n", tid
, inst
->seqNum
, acc_type
, name(),
697 cache_req
->inst
->getMemAddr());
699 inst
->setCurResSlot(slot_num
);
701 if (inst
->isDataPrefetch() || inst
->isInstPrefetch()) {
709 case InitSecondSplitRead
:
710 DPRINTF(InOrderCachePort
,
711 "[tid:%u]: [sn:%i] Initiating split data read access to %s "
712 "for addr. %08p\n", tid
, inst
->seqNum
, name(),
713 cache_req
->inst
->split2ndAddr
);
714 inst
->split2ndAccess
= true;
715 assert(inst
->split2ndAddr
!= 0);
716 read(inst
, inst
->split2ndAddr
, &inst
->split2ndData
,
717 inst
->totalSize
, inst
->split2ndFlags
);
720 case InitSecondSplitWrite
:
721 DPRINTF(InOrderCachePort
,
722 "[tid:%u]: [sn:%i] Initiating split data write access to %s "
723 "for addr. %08p\n", tid
, inst
->seqNum
, name(),
724 cache_req
->inst
->getMemAddr());
726 inst
->split2ndAccess
= true;
727 assert(inst
->split2ndAddr
!= 0);
728 write(inst
, &inst
->split2ndData
, inst
->totalSize
,
729 inst
->split2ndAddr
, inst
->split2ndFlags
, NULL
);
732 case CompleteReadData
:
733 case CompleteWriteData
:
734 DPRINTF(InOrderCachePort
,
735 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
738 if (cache_req
->isMemAccComplete() ||
739 inst
->isDataPrefetch() ||
740 inst
->isInstPrefetch()) {
741 removeAddrDependency(inst
);
742 cache_req
->setMemStall(false);
745 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
746 tid
, cache_req
->inst
->getMemAddr());
747 cache_req
->setCompleted(false);
748 cache_req
->setMemStall(true);
752 case CompleteSecondSplitRead
:
753 DPRINTF(InOrderCachePort
,
754 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Read "
755 "Access\n", tid
, inst
->seqNum
);
757 if (cache_req
->isMemAccComplete() ||
758 inst
->isDataPrefetch() ||
759 inst
->isInstPrefetch()) {
760 removeAddrDependency(inst
);
761 cache_req
->setMemStall(false);
764 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
765 tid
, cache_req
->inst
->split2ndAddr
);
766 cache_req
->setCompleted(false);
767 cache_req
->setMemStall(true);
771 case CompleteSecondSplitWrite
:
772 DPRINTF(InOrderCachePort
,
773 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Write "
774 "Access\n", tid
, inst
->seqNum
);
776 if (cache_req
->isMemAccComplete() ||
777 inst
->isDataPrefetch() ||
778 inst
->isInstPrefetch()) {
779 removeAddrDependency(inst
);
780 cache_req
->setMemStall(false);
783 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
784 tid
, cache_req
->inst
->split2ndAddr
);
785 cache_req
->setCompleted(false);
786 cache_req
->setMemStall(true);
791 fatal("Unrecognized command to %s", resName
);
795 // @TODO: Split into doCacheRead() and doCacheWrite()
797 CacheUnit::doCacheAccess(DynInstPtr inst
, uint64_t *write_res
,
798 CacheReqPtr split_req
)
800 Fault fault
= NoFault
;
802 ThreadID tid
= inst
->readTid();
805 CacheReqPtr cache_req
;
807 if (split_req
== NULL
) {
808 cache_req
= dynamic_cast<CacheReqPtr
>(reqs
[inst
->getCurResSlot()]);
810 cache_req
= split_req
;
815 // Check for LL/SC and if so change command
816 if (cache_req
->memReq
->isLLSC() && cache_req
->pktCmd
== MemCmd::ReadReq
) {
817 cache_req
->pktCmd
= MemCmd::LoadLockedReq
;
820 if (cache_req
->pktCmd
== MemCmd::WriteReq
) {
822 cache_req
->memReq
->isSwap() ? MemCmd::SwapReq
:
823 (cache_req
->memReq
->isLLSC() ? MemCmd::StoreCondReq
827 cache_req
->dataPkt
= new CacheReqPacket(cache_req
,
832 if (cache_req
->dataPkt
->isRead()) {
833 cache_req
->dataPkt
->dataStatic(cache_req
->reqData
);
834 } else if (cache_req
->dataPkt
->isWrite()) {
835 if (inst
->split2ndAccess
) {
836 cache_req
->dataPkt
->dataStatic(inst
->split2ndStoreDataPtr
);
838 cache_req
->dataPkt
->dataStatic(&cache_req
->inst
->storeData
);
841 if (cache_req
->memReq
->isCondSwap()) {
843 cache_req
->memReq
->setExtraData(*write_res
);
847 bool do_access
= true; // flag to suppress cache access
849 Request
*memReq
= cache_req
->dataPkt
->req
;
851 if (cache_req
->dataPkt
->isWrite() && cache_req
->memReq
->isLLSC()) {
852 assert(cache_req
->inst
->isStoreConditional());
853 DPRINTF(InOrderCachePort
, "Evaluating Store Conditional access\n");
854 do_access
= TheISA::handleLockedWrite(cpu
, memReq
);
857 DPRINTF(InOrderCachePort
,
858 "[tid:%i] [sn:%i] attempting to access cache for addr %08p\n",
859 tid
, inst
->seqNum
, cache_req
->dataPkt
->getAddr());
862 if (!cachePort
->sendTiming(cache_req
->dataPkt
)) {
863 DPRINTF(InOrderCachePort
,
864 "[tid:%i] [sn:%i] cannot access cache, because port "
865 "is blocked. now waiting to retry request\n", tid
,
867 cache_req
->setCompleted(false);
868 cachePortBlocked
= true;
870 DPRINTF(InOrderCachePort
,
871 "[tid:%i] [sn:%i] is now waiting for cache response\n",
873 cache_req
->setCompleted();
874 cache_req
->setMemAccPending();
875 cachePortBlocked
= false;
877 } else if (!do_access
&& memReq
->isLLSC()){
878 // Store-Conditional instructions complete even if they "failed"
879 assert(cache_req
->inst
->isStoreConditional());
880 cache_req
->setCompleted(true);
883 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
886 processCacheCompletion(cache_req
->dataPkt
);
888 // Make cache request again since access due to
889 // inability to access
890 DPRINTF(InOrderStall
, "STALL: \n");
891 cache_req
->setCompleted(false);
897 CacheUnit::processCacheCompletion(PacketPtr pkt
)
899 // Cast to correct packet type
900 CacheReqPacket
* cache_pkt
= dynamic_cast<CacheReqPacket
*>(pkt
);
904 if (cache_pkt
->cacheReq
->isSquashed()) {
905 DPRINTF(InOrderCachePort
,
906 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
907 cache_pkt
->cacheReq
->getInst()->readTid(),
908 cache_pkt
->cacheReq
->getInst()->seqNum
);
910 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
911 cache_pkt
->cacheReq
->getTid(),
912 cache_pkt
->cacheReq
->seqNum
);
914 cache_pkt
->cacheReq
->done();
922 DPRINTF(InOrderCachePort
,
923 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
924 cache_pkt
->cacheReq
->getInst()->readTid(),
925 cache_pkt
->cacheReq
->getInst()->seqNum
,
926 cache_pkt
->cacheReq
->getInst()->getMemAddr());
928 // Cast to correct request type
929 CacheRequest
*cache_req
= dynamic_cast<CacheReqPtr
>(
930 findRequest(cache_pkt
->cacheReq
->getInst(), cache_pkt
->instIdx
));
933 panic("[tid:%u]: [sn:%i]: Can't find slot for cache access to "
934 "addr. %08p\n", cache_pkt
->cacheReq
->getInst()->readTid(),
935 cache_pkt
->cacheReq
->getInst()->seqNum
,
936 cache_pkt
->cacheReq
->getInst()->getMemAddr());
942 // Get resource request info
943 unsigned stage_num
= cache_req
->getStageNum();
944 DynInstPtr inst
= cache_req
->inst
;
945 ThreadID tid
= cache_req
->inst
->readTid();
947 if (!cache_req
->isSquashed()) {
948 if (inst
->staticInst
&& inst
->isMemRef()) {
949 DPRINTF(InOrderCachePort
,
950 "[tid:%u]: [sn:%i]: Processing cache access\n",
952 PacketPtr dataPkt
= NULL
;
954 if (inst
->splitInst
) {
955 inst
->splitFinishCnt
++;
957 if (inst
->splitFinishCnt
== 2) {
958 cache_req
->memReq
->setVirt(0/*inst->tid*/,
964 Packet
split_pkt(cache_req
->memReq
, cache_req
->pktCmd
,
968 if (inst
->isLoad()) {
969 split_pkt
.dataStatic(inst
->splitMemData
);
971 split_pkt
.dataStatic(&inst
->storeData
);
974 dataPkt
= &split_pkt
;
979 inst
->completeAcc(dataPkt
);
981 if (inst
->isLoad()) {
982 assert(cache_pkt
->isRead());
984 if (cache_pkt
->req
->isLLSC()) {
985 DPRINTF(InOrderCachePort
,
986 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
988 TheISA::handleLockedRead(cpu
, cache_pkt
->req
);
991 DPRINTF(InOrderCachePort
,
992 "[tid:%u]: [sn:%i]: Bytes loaded were: %s\n",
994 printMemData(dataPkt
->getPtr
<uint8_t>(),
995 dataPkt
->getSize()));
996 } else if(inst
->isStore()) {
997 assert(cache_pkt
->isWrite());
999 DPRINTF(InOrderCachePort
,
1000 "[tid:%u]: [sn:%i]: Bytes stored were: %s\n",
1002 printMemData(dataPkt
->getPtr
<uint8_t>(),
1003 dataPkt
->getSize()));
1009 cache_req
->setMemAccPending(false);
1010 cache_req
->setMemAccCompleted();
1012 if (cache_req
->isMemStall() &&
1013 cpu
->threadModel
== InOrderCPU::SwitchOnCacheMiss
) {
1014 DPRINTF(InOrderCachePort
, "[tid:%u] Waking up from Cache Miss.\n",
1017 cpu
->activateContext(tid
);
1019 DPRINTF(ThreadModel
, "Activating [tid:%i] after return from cache"
1023 // Wake up the CPU (if it went to sleep and was waiting on this
1024 // completion event).
1027 DPRINTF(Activity
, "[tid:%u] Activating %s due to cache completion\n",
1028 tid
, cpu
->pipelineStage
[stage_num
]->name());
1030 cpu
->switchToActive(stage_num
);
1032 DPRINTF(InOrderCachePort
,
1033 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
1034 tid
, cache_req
->inst
->instAddr());
1035 cache_req
->setMemAccCompleted();
1040 CacheUnit::recvRetry()
1042 DPRINTF(InOrderCachePort
, "Unblocking Cache Port. \n");
1044 assert(cachePortBlocked
);
1046 // Clear the cache port for use again
1047 cachePortBlocked
= false;
1052 CacheUnitEvent::CacheUnitEvent()
1057 CacheUnitEvent::process()
1059 DynInstPtr inst
= resource
->reqs
[slotIdx
]->inst
;
1060 int stage_num
= resource
->reqs
[slotIdx
]->getStageNum();
1061 ThreadID tid
= inst
->threadNumber
;
1062 CacheReqPtr req_ptr
= dynamic_cast<CacheReqPtr
>(resource
->reqs
[slotIdx
]);
1064 DPRINTF(InOrderTLB
, "Waking up from TLB Miss caused by [sn:%i].\n",
1067 CacheUnit
* tlb_res
= dynamic_cast<CacheUnit
*>(resource
);
1070 tlb_res
->tlbBlocked
[tid
] = false;
1072 tlb_res
->cpu
->pipelineStage
[stage_num
]->
1073 unsetResStall(tlb_res
->reqs
[slotIdx
], tid
);
1075 req_ptr
->tlbStall
= false;
1077 if (req_ptr
->isSquashed()) {
1083 CacheUnit::squashDueToMemStall(DynInstPtr inst
, int stage_num
,
1084 InstSeqNum squash_seq_num
, ThreadID tid
)
1086 // If squashing due to memory stall, then we do NOT want to
1087 // squash the instruction that caused the stall so we
1088 // increment the sequence number here to prevent that.
1090 // NOTE: This is only for the SwitchOnCacheMiss Model
1091 // NOTE: If you have multiple outstanding misses from the same
1092 // thread then you need to reevaluate this code
1093 // NOTE: squash should originate from
1094 // pipeline_stage.cc:processInstSchedule
1095 DPRINTF(InOrderCachePort
, "Squashing above [sn:%u]\n",
1096 squash_seq_num
+ 1);
1098 squash(inst
, stage_num
, squash_seq_num
+ 1, tid
);
1102 CacheUnit::squashCacheRequest(CacheReqPtr req_ptr
)
1104 DynInstPtr inst
= req_ptr
->getInst();
1106 req_ptr
->setSquashed();
1107 inst
->setSquashed();
1108 if (inst
->validMemAddr()) {
1109 DPRINTF(AddrDep
, "Squash of [tid:%i] [sn:%i], attempting to "
1110 "remove addr. %08p dependencies.\n",
1113 inst
->getMemAddr());
1115 removeAddrDependency(inst
);
1121 CacheUnit::squash(DynInstPtr inst
, int stage_num
,
1122 InstSeqNum squash_seq_num
, ThreadID tid
)
1124 for (int i
= 0; i
< width
; i
++) {
1125 ResReqPtr req_ptr
= reqs
[i
];
1127 if (req_ptr
->valid
&&
1128 req_ptr
->getInst()->readTid() == tid
&&
1129 req_ptr
->getInst()->seqNum
> squash_seq_num
) {
1131 DPRINTF(InOrderCachePort
,
1132 "[tid:%i] Squashing request from [sn:%i]\n",
1133 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
1135 if (req_ptr
->isSquashed()) {
1136 DPRINTF(AddrDep
, "Request for [tid:%i] [sn:%i] already "
1137 "squashed, ignoring squash process.\n",
1138 req_ptr
->getInst()->readTid(),
1139 req_ptr
->getInst()->seqNum
);
1143 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(req_ptr
);
1146 squashCacheRequest(cache_req
);
1148 int req_slot_num
= req_ptr
->getSlot();
1150 if (cache_req
->tlbStall
) {
1151 tlbBlocked
[tid
] = false;
1153 int stall_stage
= reqs
[req_slot_num
]->getStageNum();
1155 cpu
->pipelineStage
[stall_stage
]->
1156 unsetResStall(reqs
[req_slot_num
], tid
);
1159 if (!cache_req
->tlbStall
&& !cache_req
->isMemAccPending()) {
1160 freeSlot(req_slot_num
);
1162 DPRINTF(InOrderCachePort
,
1163 "[tid:%i] Request from [sn:%i] squashed, but still "
1164 "pending completion.\n",
1165 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
1167 "[tid:%i] Request from [sn:%i] squashed (split:%i), but "
1168 "still pending completion.\n",
1169 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
,
1170 req_ptr
->getInst()->splitInst
);