2 * Copyright (c) 2007 MIPS Technologies, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Korey Sewell
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/predecoder.hh"
38 #include "arch/utility.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/cpu.hh"
42 #include "cpu/inorder/pipeline_traits.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "debug/Activity.hh"
45 #include "debug/AddrDep.hh"
46 #include "debug/InOrderCachePort.hh"
47 #include "debug/InOrderStall.hh"
48 #include "debug/InOrderTLB.hh"
49 #include "debug/LLSC.hh"
50 #include "debug/RefCount.hh"
51 #include "debug/ThreadModel.hh"
52 #include "mem/request.hh"
55 using namespace TheISA
;
56 using namespace ThePipeline
;
60 printMemData(uint8_t *data
, unsigned size
)
62 std::stringstream dataStr
;
63 for (unsigned pos
= 0; pos
< size
; pos
++) {
64 ccprintf(dataStr
, "%02x", data
[pos
]);
71 CacheUnit::CachePort::recvAtomic(PacketPtr pkt
)
73 panic("CacheUnit::CachePort doesn't expect recvAtomic callback!");
78 CacheUnit::CachePort::recvFunctional(PacketPtr pkt
)
80 panic("CacheUnit::CachePort doesn't expect recvFunctional callback!");
84 CacheUnit::CachePort::recvStatusChange(Status status
)
86 if (status
== RangeChange
)
89 panic("CacheUnit::CachePort doesn't expect recvStatusChange callback!");
93 CacheUnit::CachePort::recvTiming(Packet
*pkt
)
95 cachePortUnit
->processCacheCompletion(pkt
);
100 CacheUnit::CachePort::recvRetry()
102 cachePortUnit
->recvRetry();
105 CacheUnit::CacheUnit(string res_name
, int res_id
, int res_width
,
106 int res_latency
, InOrderCPU
*_cpu
, ThePipeline::Params
*params
)
107 : Resource(res_name
, res_id
, res_width
, res_latency
, _cpu
),
108 cachePortBlocked(false)
110 cachePort
= new CachePort(this);
112 // Hard-Code Selection For Now
113 if (res_name
== "icache_port")
115 else if (res_name
== "dcache_port")
118 fatal("Unrecognized TLB name passed by user");
120 for (int i
=0; i
< MaxThreads
; i
++) {
121 tlbBlocked
[i
] = false;
133 CacheUnit::getPort(const string
&if_name
, int idx
)
135 if (if_name
== resName
)
144 for (int i
= 0; i
< width
; i
++) {
145 reqs
[i
] = new CacheRequest(this);
148 // Currently Used to Model TLB Latency. Eventually
149 // Switch to Timing TLB translations.
150 resourceEvent
= new CacheUnitEvent
[width
];
152 cacheBlkSize
= this->cachePort
->peerBlockSize();
153 cacheBlkMask
= cacheBlkSize
- 1;
159 CacheUnit::getSlot(DynInstPtr inst
)
161 ThreadID tid
= inst
->readTid();
163 if (tlbBlocked
[inst
->threadNumber
]) {
167 // For a Split-Load, the instruction would have processed once already
168 // causing the address to be unset.
169 if (!inst
->validMemAddr() && !inst
->splitInst
) {
170 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
171 "cache access\n", inst
->readTid(), inst
->seqNum
);
174 Addr req_addr
= inst
->getMemAddr();
176 if (resName
== "icache_port" ||
177 find(addrList
[tid
].begin(), addrList
[tid
].end(), req_addr
) ==
178 addrList
[tid
].end()) {
180 int new_slot
= Resource::getSlot(inst
);
185 inst
->memTime
= curTick();
186 setAddrDependency(inst
);
189 // Allow same instruction multiple accesses to same address
190 // should only happen maybe after a squashed inst. needs to replay
191 if (addrMap
[tid
][req_addr
] == inst
->seqNum
) {
192 int new_slot
= Resource::getSlot(inst
);
199 DPRINTF(InOrderCachePort
,
200 "[tid:%i] Denying request because there is an outstanding"
201 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
202 inst
->readTid(), req_addr
, addrMap
[tid
][req_addr
], inst
->memTime
);
211 CacheUnit::setAddrDependency(DynInstPtr inst
)
213 Addr req_addr
= inst
->getMemAddr();
214 ThreadID tid
= inst
->readTid();
216 addrList
[tid
].push_back(req_addr
);
217 addrMap
[tid
][req_addr
] = inst
->seqNum
;
220 "[tid:%i]: [sn:%i]: Address %08p added to dependency list (size=%i)\n",
221 inst
->readTid(), inst
->seqNum
, req_addr
, addrList
[tid
].size());
223 //@NOTE: 10 is an arbitrarily "high" number, but to be exact
224 // we would need to know the # of outstanding accesses
225 // a priori. Information like fetch width, stage width,
226 // fetch buffer, and the branch resolution stage would be
227 // useful for the icache_port. For the dcache port, the #
228 // of outstanding cache accesses (mshrs) would be a good
229 // sanity check here.
230 //assert(addrList[tid].size() < 10);
234 CacheUnit::removeAddrDependency(DynInstPtr inst
)
236 ThreadID tid
= inst
->readTid();
238 Addr mem_addr
= inst
->getMemAddr();
240 inst
->unsetMemAddr();
242 // Erase from Address List
243 vector
<Addr
>::iterator vect_it
= find(addrList
[tid
].begin(),
246 assert(vect_it
!= addrList
[tid
].end() || inst
->splitInst
);
248 if (vect_it
!= addrList
[tid
].end()) {
250 "[tid:%i]: [sn:%i] Address %08p removed from dependency "
251 "list\n", inst
->readTid(), inst
->seqNum
, (*vect_it
));
253 addrList
[tid
].erase(vect_it
);
255 // Erase From Address Map (Used for Debugging)
256 addrMap
[tid
].erase(addrMap
[tid
].find(mem_addr
));
263 CacheUnit::findRequest(DynInstPtr inst
)
265 for (int i
= 0; i
< width
; i
++) {
266 CacheRequest
* cache_req
=
267 dynamic_cast<CacheRequest
*>(reqs
[i
]);
270 if (cache_req
->valid
&&
271 cache_req
->getInst() == inst
&&
272 cache_req
->instIdx
== inst
->curSkedEntry
->idx
) {
281 CacheUnit::findRequest(DynInstPtr inst
, int idx
)
283 for (int i
= 0; i
< width
; i
++) {
284 CacheRequest
* cache_req
=
285 dynamic_cast<CacheRequest
*>(reqs
[i
]);
288 if (cache_req
->valid
&&
289 cache_req
->getInst() == inst
&&
290 cache_req
->instIdx
== idx
) {
300 CacheUnit::getRequest(DynInstPtr inst
, int stage_num
, int res_idx
,
301 int slot_num
, unsigned cmd
)
303 ScheduleEntry
* sched_entry
= *inst
->curSkedEntry
;
304 CacheRequest
* cache_req
= dynamic_cast<CacheRequest
*>(reqs
[slot_num
]);
306 if (!inst
->validMemAddr()) {
307 panic("Mem. Addr. must be set before requesting cache access\n");
310 MemCmd::Command pkt_cmd
;
312 switch (sched_entry
->cmd
)
314 case InitSecondSplitRead
:
315 pkt_cmd
= MemCmd::ReadReq
;
317 DPRINTF(InOrderCachePort
,
318 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
319 inst
->readTid(), inst
->seqNum
, inst
->split2ndAddr
);
322 case InitiateReadData
:
323 pkt_cmd
= MemCmd::ReadReq
;
325 DPRINTF(InOrderCachePort
,
326 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
327 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
330 case InitSecondSplitWrite
:
331 pkt_cmd
= MemCmd::WriteReq
;
333 DPRINTF(InOrderCachePort
,
334 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
335 inst
->readTid(), inst
->seqNum
, inst
->split2ndAddr
);
338 case InitiateWriteData
:
339 pkt_cmd
= MemCmd::WriteReq
;
341 DPRINTF(InOrderCachePort
,
342 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
343 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
347 panic("%i: Unexpected request type (%i) to %s", curTick(),
348 sched_entry
->cmd
, name());
351 cache_req
->setRequest(inst
, stage_num
, id
, slot_num
,
352 sched_entry
->cmd
, pkt_cmd
,
353 inst
->curSkedEntry
->idx
);
358 CacheUnit::requestAgain(DynInstPtr inst
, bool &service_request
)
360 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
363 // Check to see if this instruction is requesting the same command
364 // or a different one
365 if (cache_req
->cmd
!= inst
->curSkedEntry
->cmd
&&
366 cache_req
->instIdx
== inst
->curSkedEntry
->idx
) {
367 // If different, then update command in the request
368 cache_req
->cmd
= inst
->curSkedEntry
->cmd
;
369 DPRINTF(InOrderCachePort
,
370 "[tid:%i]: [sn:%i]: Updating the command for this "
371 "instruction\n ", inst
->readTid(), inst
->seqNum
);
373 service_request
= true;
374 } else if (inst
->curSkedEntry
->idx
!= CacheUnit::InitSecondSplitRead
&&
375 inst
->curSkedEntry
->idx
!= CacheUnit::InitSecondSplitWrite
) {
376 // If same command, just check to see if memory access was completed
377 // but dont try to re-execute
378 DPRINTF(InOrderCachePort
,
379 "[tid:%i]: [sn:%i]: requesting this resource again\n",
380 inst
->readTid(), inst
->seqNum
);
382 service_request
= true;
387 CacheUnit::setupMemRequest(DynInstPtr inst
, CacheReqPtr cache_req
,
388 int acc_size
, int flags
)
390 ThreadID tid
= inst
->readTid();
391 Addr aligned_addr
= inst
->getMemAddr();
393 if (!cache_req
->is2ndSplit()) {
394 if (inst
->dataMemReq
== NULL
) {
396 new Request(cpu
->asid
[tid
], aligned_addr
, acc_size
, flags
,
397 inst
->instAddr(), cpu
->readCpuId(),
399 cache_req
->memReq
= inst
->dataMemReq
;
402 assert(inst
->splitInst
);
404 if (inst
->splitMemReq
== NULL
) {
405 inst
->splitMemReq
= new Request(cpu
->asid
[tid
],
414 cache_req
->memReq
= inst
->splitMemReq
;
419 CacheUnit::doTLBAccess(DynInstPtr inst
, CacheReqPtr cache_req
, int acc_size
,
420 int flags
, TheISA::TLB::Mode tlb_mode
)
422 ThreadID tid
= inst
->readTid();
423 //Addr aligned_addr = inst->getMemAddr();
424 unsigned stage_num
= cache_req
->getStageNum();
425 unsigned slot_idx
= cache_req
->getSlot();
427 setupMemRequest(inst
, cache_req
, acc_size
, flags
);
430 _tlb
->translateAtomic(cache_req
->memReq
,
431 cpu
->thread
[tid
]->getTC(), tlb_mode
);
433 if (inst
->fault
!= NoFault
) {
434 DPRINTF(InOrderTLB
, "[tid:%i]: %s encountered while translating "
435 "addr:%08p for [sn:%i].\n", tid
, inst
->fault
->name(),
436 cache_req
->memReq
->getVaddr(), inst
->seqNum
);
438 cpu
->pipelineStage
[stage_num
]->setResStall(cache_req
, tid
);
440 tlbBlocked
[tid
] = true;
442 cache_req
->tlbStall
= true;
444 scheduleEvent(slot_idx
, 1);
446 cpu
->trap(inst
->fault
, tid
, inst
);
448 DPRINTF(InOrderTLB
, "[tid:%i]: [sn:%i] virt. addr %08p translated "
449 "to phys. addr:%08p.\n", tid
, inst
->seqNum
,
450 cache_req
->memReq
->getVaddr(),
451 cache_req
->memReq
->getPaddr());
457 CacheUnit::read(DynInstPtr inst
, Addr addr
,
458 uint8_t *data
, unsigned size
, unsigned flags
)
460 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
461 assert(cache_req
&& "Can't Find Instruction for Read!");
463 // The block size of our peer
464 unsigned blockSize
= this->cachePort
->peerBlockSize();
466 //The size of the data we're trying to read.
468 inst
->totalSize
= size
;
470 if (inst
->traceData
) {
471 inst
->traceData
->setAddr(addr
);
474 if (inst
->split2ndAccess
) {
475 size
= inst
->split2ndSize
;
476 cache_req
->splitAccess
= true;
477 cache_req
->split2ndAccess
= true;
479 DPRINTF(InOrderCachePort
, "[sn:%i] Split Read Access (2 of 2) for "
480 "(%#x, %#x).\n", inst
->seqNum
, inst
->getMemAddr(),
485 //The address of the second part of this access if it needs to be split
486 //across a cache line boundary.
487 Addr secondAddr
= roundDown(addr
+ size
- 1, blockSize
);
490 if (secondAddr
> addr
&& !inst
->split2ndAccess
) {
492 if (!inst
->splitInst
) {
493 DPRINTF(InOrderCachePort
, "%i: sn[%i] Split Read Access (1 of 2) for "
494 "(%#x, %#x).\n", curTick(), inst
->seqNum
, addr
, secondAddr
);
496 unsigned stage_num
= cache_req
->getStageNum();
497 unsigned cmd
= inst
->curSkedEntry
->cmd
;
499 // 1. Make A New Inst. Schedule w/Split Read/Complete Entered on
501 // ==============================
502 // 2. Reassign curSkedPtr to current command (InitiateRead) on new
504 // ==============================
505 inst
->splitInst
= true;
506 inst
->setBackSked(cpu
->createBackEndSked(inst
));
507 inst
->curSkedEntry
= inst
->backSked
->find(stage_num
, cmd
);
509 DPRINTF(InOrderCachePort
, "[tid:%i] [sn:%i] Retrying Split Read "
510 "Access (1 of 2) for (%#x, %#x).\n", inst
->readTid(),
511 inst
->seqNum
, addr
, secondAddr
);
514 // Save All "Total" Split Information
515 // ==============================
516 inst
->splitMemData
= new uint8_t[size
];
518 // Split Information for First Access
519 // ==============================
520 size
= secondAddr
- addr
;
521 cache_req
->splitAccess
= true;
523 // Split Information for Second Access
524 // ==============================
525 inst
->split2ndSize
= addr
+ fullSize
- secondAddr
;
526 inst
->split2ndAddr
= secondAddr
;
527 inst
->split2ndDataPtr
= inst
->splitMemData
+ size
;
528 inst
->split2ndFlags
= flags
;
531 doTLBAccess(inst
, cache_req
, size
, flags
, TheISA::TLB::Read
);
533 if (inst
->fault
== NoFault
) {
534 if (!cache_req
->splitAccess
) {
535 cache_req
->reqData
= new uint8_t[size
];
536 doCacheAccess(inst
, NULL
);
538 if (!inst
->split2ndAccess
) {
539 cache_req
->reqData
= inst
->splitMemData
;
541 cache_req
->reqData
= inst
->split2ndDataPtr
;
544 doCacheAccess(inst
, NULL
, cache_req
);
552 CacheUnit::write(DynInstPtr inst
, uint8_t *data
, unsigned size
,
553 Addr addr
, unsigned flags
, uint64_t *write_res
)
555 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
556 assert(cache_req
&& "Can't Find Instruction for Write!");
558 // The block size of our peer
559 unsigned blockSize
= this->cachePort
->peerBlockSize();
561 //The size of the data we're trying to write.
563 inst
->totalSize
= size
;
565 if (inst
->traceData
) {
566 inst
->traceData
->setAddr(addr
);
569 if (inst
->split2ndAccess
) {
570 size
= inst
->split2ndSize
;
571 cache_req
->splitAccess
= true;
572 cache_req
->split2ndAccess
= true;
574 DPRINTF(InOrderCachePort
, "[sn:%i] Split Write Access (2 of 2) for "
575 "(%#x, %#x).\n", inst
->seqNum
, inst
->getMemAddr(),
579 //The address of the second part of this access if it needs to be split
580 //across a cache line boundary.
581 Addr secondAddr
= roundDown(addr
+ size
- 1, blockSize
);
583 if (secondAddr
> addr
&& !inst
->split2ndAccess
) {
585 DPRINTF(InOrderCachePort
, "[sn:%i] Split Write Access (1 of 2) for "
586 "(%#x, %#x).\n", inst
->seqNum
, addr
, secondAddr
);
588 // Save All "Total" Split Information
589 // ==============================
590 inst
->splitInst
= true;
592 if (!inst
->splitInstSked
) {
593 assert(0 && "Split Requests Not Supported for Now...");
595 // Schedule Split Read/Complete for Instruction
596 // ==============================
597 int stage_num
= cache_req
->getStageNum();
598 RSkedPtr inst_sked
= (stage_num
>= ThePipeline::BackEndStartStage
) ?
599 inst
->backSked
: inst
->frontSked
;
601 // this is just an arbitrarily high priority to ensure that this
602 // gets pushed to the back of the list
605 int isplit_cmd
= CacheUnit::InitSecondSplitWrite
;
607 ScheduleEntry(stage_num
,
609 cpu
->resPool
->getResIdx(DCache
),
613 int csplit_cmd
= CacheUnit::CompleteSecondSplitWrite
;
615 ScheduleEntry(stage_num
+ 1,
617 cpu
->resPool
->getResIdx(DCache
),
620 inst
->splitInstSked
= true;
622 DPRINTF(InOrderCachePort
, "[tid:%i] sn:%i] Retrying Split Read "
623 "Access (1 of 2) for (%#x, %#x).\n",
624 inst
->readTid(), inst
->seqNum
, addr
, secondAddr
);
629 // Split Information for First Access
630 // ==============================
631 size
= secondAddr
- addr
;
632 cache_req
->splitAccess
= true;
634 // Split Information for Second Access
635 // ==============================
636 inst
->split2ndSize
= addr
+ fullSize
- secondAddr
;
637 inst
->split2ndAddr
= secondAddr
;
638 inst
->split2ndStoreDataPtr
= &cache_req
->inst
->storeData
;
639 inst
->split2ndStoreDataPtr
+= size
;
640 inst
->split2ndFlags
= flags
;
641 inst
->splitInstSked
= true;
644 doTLBAccess(inst
, cache_req
, size
, flags
, TheISA::TLB::Write
);
646 if (inst
->fault
== NoFault
) {
647 if (!cache_req
->splitAccess
) {
648 doCacheAccess(inst
, write_res
);
650 doCacheAccess(inst
, write_res
, cache_req
);
660 CacheUnit::execute(int slot_num
)
662 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(reqs
[slot_num
]);
665 if (cachePortBlocked
&&
666 (cache_req
->cmd
== InitiateReadData
||
667 cache_req
->cmd
== InitiateWriteData
||
668 cache_req
->cmd
== InitSecondSplitRead
||
669 cache_req
->cmd
== InitSecondSplitWrite
)) {
670 DPRINTF(InOrderCachePort
, "Cache Port Blocked. Cannot Access\n");
671 cache_req
->done(false);
675 DynInstPtr inst
= cache_req
->inst
;
677 ThreadID tid
= inst
->readTid();
678 std::string acc_type
= "write";
681 inst
->fault
= NoFault
;
683 switch (cache_req
->cmd
)
686 case InitiateReadData
:
690 case InitiateWriteData
:
691 if (cachePortBlocked
) {
692 DPRINTF(InOrderCachePort
, "Cache Port Blocked. Cannot Access\n");
693 cache_req
->done(false);
697 DPRINTF(InOrderCachePort
,
698 "[tid:%u]: [sn:%i] Initiating data %s access to %s for "
699 "addr. %08p\n", tid
, inst
->seqNum
, acc_type
, name(),
700 cache_req
->inst
->getMemAddr());
702 inst
->setCurResSlot(slot_num
);
704 if (inst
->isDataPrefetch() || inst
->isInstPrefetch()) {
712 case InitSecondSplitRead
:
713 DPRINTF(InOrderCachePort
,
714 "[tid:%u]: [sn:%i] Initiating split data read access to %s "
715 "for addr. %08p\n", tid
, inst
->seqNum
, name(),
716 cache_req
->inst
->split2ndAddr
);
717 inst
->split2ndAccess
= true;
718 assert(inst
->split2ndAddr
!= 0);
719 read(inst
, inst
->split2ndAddr
, &inst
->split2ndData
,
720 inst
->totalSize
, inst
->split2ndFlags
);
723 case InitSecondSplitWrite
:
724 DPRINTF(InOrderCachePort
,
725 "[tid:%u]: [sn:%i] Initiating split data write access to %s "
726 "for addr. %08p\n", tid
, inst
->seqNum
, name(),
727 cache_req
->inst
->getMemAddr());
729 inst
->split2ndAccess
= true;
730 assert(inst
->split2ndAddr
!= 0);
731 write(inst
, &inst
->split2ndData
, inst
->totalSize
,
732 inst
->split2ndAddr
, inst
->split2ndFlags
, NULL
);
735 case CompleteReadData
:
736 case CompleteWriteData
:
737 DPRINTF(InOrderCachePort
,
738 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
741 if (cache_req
->isMemAccComplete() ||
742 inst
->isDataPrefetch() ||
743 inst
->isInstPrefetch()) {
744 removeAddrDependency(inst
);
745 cache_req
->setMemStall(false);
748 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
749 tid
, cache_req
->inst
->getMemAddr());
750 cache_req
->setCompleted(false);
751 cache_req
->setMemStall(true);
755 case CompleteSecondSplitRead
:
756 DPRINTF(InOrderCachePort
,
757 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Read "
758 "Access\n", tid
, inst
->seqNum
);
760 if (cache_req
->isMemAccComplete() ||
761 inst
->isDataPrefetch() ||
762 inst
->isInstPrefetch()) {
763 removeAddrDependency(inst
);
764 cache_req
->setMemStall(false);
767 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
768 tid
, cache_req
->inst
->split2ndAddr
);
769 cache_req
->setCompleted(false);
770 cache_req
->setMemStall(true);
774 case CompleteSecondSplitWrite
:
775 DPRINTF(InOrderCachePort
,
776 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Write "
777 "Access\n", tid
, inst
->seqNum
);
779 if (cache_req
->isMemAccComplete() ||
780 inst
->isDataPrefetch() ||
781 inst
->isInstPrefetch()) {
782 removeAddrDependency(inst
);
783 cache_req
->setMemStall(false);
786 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
787 tid
, cache_req
->inst
->split2ndAddr
);
788 cache_req
->setCompleted(false);
789 cache_req
->setMemStall(true);
794 fatal("Unrecognized command to %s", resName
);
798 // @TODO: Split into doCacheRead() and doCacheWrite()
800 CacheUnit::doCacheAccess(DynInstPtr inst
, uint64_t *write_res
,
801 CacheReqPtr split_req
)
803 Fault fault
= NoFault
;
805 ThreadID tid
= inst
->readTid();
808 CacheReqPtr cache_req
;
810 if (split_req
== NULL
) {
811 cache_req
= dynamic_cast<CacheReqPtr
>(reqs
[inst
->getCurResSlot()]);
813 cache_req
= split_req
;
818 // Check for LL/SC and if so change command
819 if (cache_req
->memReq
->isLLSC() && cache_req
->pktCmd
== MemCmd::ReadReq
) {
820 cache_req
->pktCmd
= MemCmd::LoadLockedReq
;
823 if (cache_req
->pktCmd
== MemCmd::WriteReq
) {
825 cache_req
->memReq
->isSwap() ? MemCmd::SwapReq
:
826 (cache_req
->memReq
->isLLSC() ? MemCmd::StoreCondReq
830 cache_req
->dataPkt
= new CacheReqPacket(cache_req
,
835 if (cache_req
->dataPkt
->isRead()) {
836 cache_req
->dataPkt
->dataStatic(cache_req
->reqData
);
837 } else if (cache_req
->dataPkt
->isWrite()) {
838 if (inst
->split2ndAccess
) {
839 cache_req
->dataPkt
->dataStatic(inst
->split2ndStoreDataPtr
);
841 cache_req
->dataPkt
->dataStatic(&cache_req
->inst
->storeData
);
844 if (cache_req
->memReq
->isCondSwap()) {
846 cache_req
->memReq
->setExtraData(*write_res
);
850 bool do_access
= true; // flag to suppress cache access
852 Request
*memReq
= cache_req
->dataPkt
->req
;
854 if (cache_req
->dataPkt
->isWrite() && cache_req
->memReq
->isLLSC()) {
855 assert(cache_req
->inst
->isStoreConditional());
856 DPRINTF(InOrderCachePort
, "Evaluating Store Conditional access\n");
857 do_access
= TheISA::handleLockedWrite(cpu
, memReq
);
860 DPRINTF(InOrderCachePort
,
861 "[tid:%i] [sn:%i] attempting to access cache for addr %08p\n",
862 tid
, inst
->seqNum
, cache_req
->dataPkt
->getAddr());
865 if (!cachePort
->sendTiming(cache_req
->dataPkt
)) {
866 DPRINTF(InOrderCachePort
,
867 "[tid:%i] [sn:%i] cannot access cache, because port "
868 "is blocked. now waiting to retry request\n", tid
,
870 delete cache_req
->dataPkt
;
871 cache_req
->done(false);
872 cachePortBlocked
= true;
874 DPRINTF(InOrderCachePort
,
875 "[tid:%i] [sn:%i] is now waiting for cache response\n",
877 cache_req
->setCompleted();
878 cache_req
->setMemAccPending();
879 cachePortBlocked
= false;
881 } else if (!do_access
&& memReq
->isLLSC()){
882 // Store-Conditional instructions complete even if they "failed"
883 assert(cache_req
->inst
->isStoreConditional());
884 cache_req
->setCompleted(true);
887 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
890 processCacheCompletion(cache_req
->dataPkt
);
892 delete cache_req
->dataPkt
;
893 // Make cache request again since access due to
894 // inability to access
895 DPRINTF(InOrderStall
, "STALL: \n");
896 cache_req
->done(false);
902 CacheUnit::processCacheCompletion(PacketPtr pkt
)
904 // Cast to correct packet type
905 CacheReqPacket
* cache_pkt
= dynamic_cast<CacheReqPacket
*>(pkt
);
909 if (cache_pkt
->cacheReq
->isSquashed()) {
910 DPRINTF(InOrderCachePort
,
911 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
912 cache_pkt
->cacheReq
->getInst()->readTid(),
913 cache_pkt
->cacheReq
->getInst()->seqNum
);
915 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
916 cache_pkt
->cacheReq
->getTid(),
917 cache_pkt
->cacheReq
->seqNum
);
919 cache_pkt
->cacheReq
->freeSlot();
927 DPRINTF(InOrderCachePort
,
928 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
929 cache_pkt
->cacheReq
->getInst()->readTid(),
930 cache_pkt
->cacheReq
->getInst()->seqNum
,
931 cache_pkt
->cacheReq
->getInst()->getMemAddr());
933 // Cast to correct request type
934 CacheRequest
*cache_req
= dynamic_cast<CacheReqPtr
>(
935 findRequest(cache_pkt
->cacheReq
->getInst(), cache_pkt
->instIdx
));
938 panic("[tid:%u]: [sn:%i]: Can't find slot for cache access to "
939 "addr. %08p\n", cache_pkt
->cacheReq
->getInst()->readTid(),
940 cache_pkt
->cacheReq
->getInst()->seqNum
,
941 cache_pkt
->cacheReq
->getInst()->getMemAddr());
947 // Get resource request info
948 unsigned stage_num
= cache_req
->getStageNum();
949 DynInstPtr inst
= cache_req
->inst
;
950 ThreadID tid
= cache_req
->inst
->readTid();
952 assert(!cache_req
->isSquashed());
953 assert(inst
->staticInst
&& inst
->isMemRef());
956 DPRINTF(InOrderCachePort
,
957 "[tid:%u]: [sn:%i]: Processing cache access\n",
959 PacketPtr dataPkt
= NULL
;
961 if (inst
->splitInst
) {
962 inst
->splitFinishCnt
++;
964 if (inst
->splitFinishCnt
== 2) {
965 cache_req
->memReq
->setVirt(0/*inst->tid*/,
971 Packet
split_pkt(cache_req
->memReq
, cache_req
->pktCmd
,
975 if (inst
->isLoad()) {
976 split_pkt
.dataStatic(inst
->splitMemData
);
978 split_pkt
.dataStatic(&inst
->storeData
);
981 dataPkt
= &split_pkt
;
986 inst
->completeAcc(dataPkt
);
988 if (inst
->isLoad()) {
989 assert(cache_pkt
->isRead());
991 if (cache_pkt
->req
->isLLSC()) {
992 DPRINTF(InOrderCachePort
,
993 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
995 TheISA::handleLockedRead(cpu
, cache_pkt
->req
);
998 DPRINTF(InOrderCachePort
,
999 "[tid:%u]: [sn:%i]: Bytes loaded were: %s\n",
1001 printMemData(dataPkt
->getPtr
<uint8_t>(),
1002 dataPkt
->getSize()));
1003 } else if(inst
->isStore()) {
1004 assert(cache_pkt
->isWrite());
1006 DPRINTF(InOrderCachePort
,
1007 "[tid:%u]: [sn:%i]: Bytes stored were: %s\n",
1009 printMemData(dataPkt
->getPtr
<uint8_t>(),
1010 dataPkt
->getSize()));
1014 cache_req
->setMemAccPending(false);
1015 cache_req
->setMemAccCompleted();
1017 if (cache_req
->isMemStall() &&
1018 cpu
->threadModel
== InOrderCPU::SwitchOnCacheMiss
) {
1019 DPRINTF(InOrderCachePort
, "[tid:%u] Waking up from Cache Miss.\n",
1022 cpu
->activateContext(tid
);
1024 DPRINTF(ThreadModel
, "Activating [tid:%i] after return from cache"
1028 // Wake up the CPU (if it went to sleep and was waiting on this
1029 // completion event).
1032 DPRINTF(Activity
, "[tid:%u] Activating %s due to cache completion\n",
1033 tid
, cpu
->pipelineStage
[stage_num
]->name());
1035 cpu
->switchToActive(stage_num
);
1039 CacheUnit::recvRetry()
1041 DPRINTF(InOrderCachePort
, "Unblocking Cache Port. \n");
1043 assert(cachePortBlocked
);
1045 // Clear the cache port for use again
1046 cachePortBlocked
= false;
1051 CacheUnitEvent::CacheUnitEvent()
1056 CacheUnitEvent::process()
1058 DynInstPtr inst
= resource
->reqs
[slotIdx
]->inst
;
1059 int stage_num
= resource
->reqs
[slotIdx
]->getStageNum();
1060 ThreadID tid
= inst
->threadNumber
;
1061 CacheReqPtr req_ptr
= dynamic_cast<CacheReqPtr
>(resource
->reqs
[slotIdx
]);
1063 DPRINTF(InOrderTLB
, "Waking up from TLB Miss caused by [sn:%i].\n",
1066 CacheUnit
* tlb_res
= dynamic_cast<CacheUnit
*>(resource
);
1069 tlb_res
->tlbBlocked
[tid
] = false;
1071 tlb_res
->cpu
->pipelineStage
[stage_num
]->
1072 unsetResStall(tlb_res
->reqs
[slotIdx
], tid
);
1074 req_ptr
->tlbStall
= false;
1076 if (req_ptr
->isSquashed()) {
1077 req_ptr
->freeSlot();
1080 tlb_res
->cpu
->wakeCPU();
1084 CacheUnit::squashDueToMemStall(DynInstPtr inst
, int stage_num
,
1085 InstSeqNum squash_seq_num
, ThreadID tid
)
1087 // If squashing due to memory stall, then we do NOT want to
1088 // squash the instruction that caused the stall so we
1089 // increment the sequence number here to prevent that.
1091 // NOTE: This is only for the SwitchOnCacheMiss Model
1092 // NOTE: If you have multiple outstanding misses from the same
1093 // thread then you need to reevaluate this code
1094 // NOTE: squash should originate from
1095 // pipeline_stage.cc:processInstSchedule
1096 DPRINTF(InOrderCachePort
, "Squashing above [sn:%u]\n",
1097 squash_seq_num
+ 1);
1099 squash(inst
, stage_num
, squash_seq_num
+ 1, tid
);
1103 CacheUnit::squashCacheRequest(CacheReqPtr req_ptr
)
1105 DynInstPtr inst
= req_ptr
->getInst();
1107 req_ptr
->setSquashed();
1108 inst
->setSquashed();
1109 if (inst
->validMemAddr()) {
1110 DPRINTF(AddrDep
, "Squash of [tid:%i] [sn:%i], attempting to "
1111 "remove addr. %08p dependencies.\n",
1114 inst
->getMemAddr());
1116 removeAddrDependency(inst
);
1122 CacheUnit::squash(DynInstPtr inst
, int stage_num
,
1123 InstSeqNum squash_seq_num
, ThreadID tid
)
1125 for (int i
= 0; i
< width
; i
++) {
1126 ResReqPtr req_ptr
= reqs
[i
];
1128 if (req_ptr
->valid
&&
1129 req_ptr
->getInst()->readTid() == tid
&&
1130 req_ptr
->getInst()->seqNum
> squash_seq_num
) {
1132 DPRINTF(InOrderCachePort
,
1133 "[tid:%i] Squashing request from [sn:%i]\n",
1134 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
1136 if (req_ptr
->isSquashed()) {
1137 DPRINTF(AddrDep
, "Request for [tid:%i] [sn:%i] already "
1138 "squashed, ignoring squash process.\n",
1139 req_ptr
->getInst()->readTid(),
1140 req_ptr
->getInst()->seqNum
);
1144 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(req_ptr
);
1147 squashCacheRequest(cache_req
);
1149 int req_slot_num
= req_ptr
->getSlot();
1151 if (cache_req
->tlbStall
) {
1152 tlbBlocked
[tid
] = false;
1154 int stall_stage
= reqs
[req_slot_num
]->getStageNum();
1156 cpu
->pipelineStage
[stall_stage
]->
1157 unsetResStall(reqs
[req_slot_num
], tid
);
1160 if (!cache_req
->tlbStall
&& !cache_req
->isMemAccPending()) {
1161 freeSlot(req_slot_num
);
1163 DPRINTF(InOrderCachePort
,
1164 "[tid:%i] Request from [sn:%i] squashed, but still "
1165 "pending completion.\n",
1166 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
1168 "[tid:%i] Request from [sn:%i] squashed (split:%i), but "
1169 "still pending completion.\n",
1170 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
,
1171 req_ptr
->getInst()->splitInst
);