2 * Copyright (c) 2007 MIPS Technologies, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Korey Sewell
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/pipeline_traits.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "mem/request.hh"
47 using namespace TheISA
;
48 using namespace ThePipeline
;
52 printMemData(uint8_t *data
, unsigned size
)
54 std::stringstream dataStr
;
55 for (unsigned pos
= 0; pos
< size
; pos
++) {
56 ccprintf(dataStr
, "%02x", data
[pos
]);
63 CacheUnit::CachePort::recvAtomic(PacketPtr pkt
)
65 panic("CacheUnit::CachePort doesn't expect recvAtomic callback!");
70 CacheUnit::CachePort::recvFunctional(PacketPtr pkt
)
72 panic("CacheUnit::CachePort doesn't expect recvFunctional callback!");
76 CacheUnit::CachePort::recvStatusChange(Status status
)
78 if (status
== RangeChange
)
81 panic("CacheUnit::CachePort doesn't expect recvStatusChange callback!");
85 CacheUnit::CachePort::recvTiming(Packet
*pkt
)
87 cachePortUnit
->processCacheCompletion(pkt
);
92 CacheUnit::CachePort::recvRetry()
94 cachePortUnit
->recvRetry();
97 CacheUnit::CacheUnit(string res_name
, int res_id
, int res_width
,
98 int res_latency
, InOrderCPU
*_cpu
, ThePipeline::Params
*params
)
99 : Resource(res_name
, res_id
, res_width
, res_latency
, _cpu
),
100 cachePortBlocked(false), predecoder(NULL
)
102 cachePort
= new CachePort(this);
104 // Hard-Code Selection For Now
105 if (res_name
== "icache_port")
107 else if (res_name
== "dcache_port")
110 fatal("Unrecognized TLB name passed by user");
112 for (int i
=0; i
< MaxThreads
; i
++) {
113 tlbBlocked
[i
] = false;
125 CacheUnit::getPort(const string
&if_name
, int idx
)
127 if (if_name
== resName
)
136 // Currently Used to Model TLB Latency. Eventually
137 // Switch to Timing TLB translations.
138 resourceEvent
= new CacheUnitEvent
[width
];
144 CacheUnit::getSlot(DynInstPtr inst
)
146 ThreadID tid
= inst
->readTid();
148 if (tlbBlocked
[inst
->threadNumber
]) {
152 // For a Split-Load, the instruction would have processed once already
153 // causing the address to be unset.
154 if (!inst
->validMemAddr() && !inst
->splitInst
) {
155 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
156 "cache access\n", inst
->readTid(), inst
->seqNum
);
159 Addr req_addr
= inst
->getMemAddr();
161 if (resName
== "icache_port" ||
162 find(addrList
[tid
].begin(), addrList
[tid
].end(), req_addr
) ==
163 addrList
[tid
].end()) {
165 int new_slot
= Resource::getSlot(inst
);
170 inst
->memTime
= curTick
;
171 setAddrDependency(inst
);
174 // Allow same instruction multiple accesses to same address
175 // should only happen maybe after a squashed inst. needs to replay
176 if (addrMap
[tid
][req_addr
] == inst
->seqNum
) {
177 int new_slot
= Resource::getSlot(inst
);
184 DPRINTF(InOrderCachePort
,
185 "[tid:%i] Denying request because there is an outstanding"
186 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
187 inst
->readTid(), req_addr
, addrMap
[tid
][req_addr
], inst
->memTime
);
196 CacheUnit::setAddrDependency(DynInstPtr inst
)
198 Addr req_addr
= inst
->getMemAddr();
199 ThreadID tid
= inst
->readTid();
201 addrList
[tid
].push_back(req_addr
);
202 addrMap
[tid
][req_addr
] = inst
->seqNum
;
205 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
206 inst
->readTid(), inst
->seqNum
, req_addr
);
208 //@NOTE: 10 is an arbitrarily "high" number here, but to be exact
209 // we would need to know the # of outstanding accesses
210 // a priori. Information like fetch width, stage width,
211 // and the branch resolution stage would be useful for the
212 // icache_port (among other things). For the dcache, the #
213 // of outstanding cache accesses might be sufficient.
214 assert(addrList
[tid
].size() < 10);
218 CacheUnit::removeAddrDependency(DynInstPtr inst
)
220 ThreadID tid
= inst
->readTid();
222 Addr mem_addr
= inst
->getMemAddr();
224 inst
->unsetMemAddr();
226 // Erase from Address List
227 vector
<Addr
>::iterator vect_it
= find(addrList
[tid
].begin(),
230 assert(vect_it
!= addrList
[tid
].end() || inst
->splitInst
);
232 if (vect_it
!= addrList
[tid
].end()) {
234 "[tid:%i]: [sn:%i] Address %08p removed from dependency "
235 "list\n", inst
->readTid(), inst
->seqNum
, (*vect_it
));
237 addrList
[tid
].erase(vect_it
);
239 // Erase From Address Map (Used for Debugging)
240 addrMap
[tid
].erase(addrMap
[tid
].find(mem_addr
));
247 CacheUnit::findRequest(DynInstPtr inst
)
249 map
<int, ResReqPtr
>::iterator map_it
= reqMap
.begin();
250 map
<int, ResReqPtr
>::iterator map_end
= reqMap
.end();
252 while (map_it
!= map_end
) {
253 CacheRequest
* cache_req
=
254 dynamic_cast<CacheRequest
*>((*map_it
).second
);
258 cache_req
->getInst() == inst
&&
259 cache_req
->instIdx
== inst
->resSched
.top()->idx
) {
269 CacheUnit::findSplitRequest(DynInstPtr inst
, int idx
)
271 map
<int, ResReqPtr
>::iterator map_it
= reqMap
.begin();
272 map
<int, ResReqPtr
>::iterator map_end
= reqMap
.end();
274 while (map_it
!= map_end
) {
275 CacheRequest
* cache_req
=
276 dynamic_cast<CacheRequest
*>((*map_it
).second
);
280 cache_req
->getInst() == inst
&&
281 cache_req
->instIdx
== idx
) {
292 CacheUnit::getRequest(DynInstPtr inst
, int stage_num
, int res_idx
,
293 int slot_num
, unsigned cmd
)
295 ScheduleEntry
* sched_entry
= inst
->resSched
.top();
297 if (!inst
->validMemAddr()) {
298 panic("Mem. Addr. must be set before requesting cache access\n");
301 MemCmd::Command pkt_cmd
;
303 switch (sched_entry
->cmd
)
305 case InitSecondSplitRead
:
306 pkt_cmd
= MemCmd::ReadReq
;
308 DPRINTF(InOrderCachePort
,
309 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
310 inst
->readTid(), inst
->seqNum
, inst
->split2ndAddr
);
313 case InitiateReadData
:
314 pkt_cmd
= MemCmd::ReadReq
;
316 DPRINTF(InOrderCachePort
,
317 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
318 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
321 case InitSecondSplitWrite
:
322 pkt_cmd
= MemCmd::WriteReq
;
324 DPRINTF(InOrderCachePort
,
325 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
326 inst
->readTid(), inst
->seqNum
, inst
->split2ndAddr
);
329 case InitiateWriteData
:
330 pkt_cmd
= MemCmd::WriteReq
;
332 DPRINTF(InOrderCachePort
,
333 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
334 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
338 pkt_cmd
= MemCmd::ReadReq
;
340 DPRINTF(InOrderCachePort
,
341 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
342 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
346 panic("%i: Unexpected request type (%i) to %s", curTick
,
347 sched_entry
->cmd
, name());
350 return new CacheRequest(this, inst
, stage_num
, id
, slot_num
,
351 sched_entry
->cmd
, 0, pkt_cmd
,
352 0/*flags*/, this->cpu
->readCpuId(),
353 inst
->resSched
.top()->idx
);
357 CacheUnit::requestAgain(DynInstPtr inst
, bool &service_request
)
359 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
362 // Check to see if this instruction is requesting the same command
363 // or a different one
364 if (cache_req
->cmd
!= inst
->resSched
.top()->cmd
&&
365 cache_req
->instIdx
== inst
->resSched
.top()->idx
) {
366 // If different, then update command in the request
367 cache_req
->cmd
= inst
->resSched
.top()->cmd
;
368 DPRINTF(InOrderCachePort
,
369 "[tid:%i]: [sn:%i]: Updating the command for this "
370 "instruction\n ", inst
->readTid(), inst
->seqNum
);
372 service_request
= true;
373 } else if (inst
->resSched
.top()->idx
!= CacheUnit::InitSecondSplitRead
&&
374 inst
->resSched
.top()->idx
!= CacheUnit::InitSecondSplitWrite
) {
375 // If same command, just check to see if memory access was completed
376 // but dont try to re-execute
377 DPRINTF(InOrderCachePort
,
378 "[tid:%i]: [sn:%i]: requesting this resource again\n",
379 inst
->readTid(), inst
->seqNum
);
381 service_request
= true;
386 CacheUnit::doTLBAccess(DynInstPtr inst
, CacheReqPtr cache_req
, int acc_size
,
387 int flags
, TheISA::TLB::Mode tlb_mode
)
389 ThreadID tid
= inst
->readTid();
390 Addr aligned_addr
= inst
->getMemAddr();
391 unsigned stage_num
= cache_req
->getStageNum();
392 unsigned slot_idx
= cache_req
->getSlot();
394 if (tlb_mode
== TheISA::TLB::Execute
) {
395 inst
->fetchMemReq
= new Request(inst
->readTid(), aligned_addr
,
396 acc_size
, flags
, inst
->readPC(),
397 cpu
->readCpuId(), inst
->readTid());
398 cache_req
->memReq
= inst
->fetchMemReq
;
400 if (!cache_req
->is2ndSplit()) {
401 inst
->dataMemReq
= new Request(cpu
->asid
[tid
], aligned_addr
,
402 acc_size
, flags
, inst
->readPC(),
403 cpu
->readCpuId(), inst
->readTid());
404 cache_req
->memReq
= inst
->dataMemReq
;
406 assert(inst
->splitInst
);
408 inst
->splitMemReq
= new Request(cpu
->asid
[tid
],
415 cache_req
->memReq
= inst
->splitMemReq
;
421 _tlb
->translateAtomic(cache_req
->memReq
,
422 cpu
->thread
[tid
]->getTC(), tlb_mode
);
424 if (cache_req
->fault
!= NoFault
) {
425 DPRINTF(InOrderTLB
, "[tid:%i]: %s encountered while translating "
426 "addr:%08p for [sn:%i].\n", tid
, cache_req
->fault
->name(),
427 cache_req
->memReq
->getVaddr(), inst
->seqNum
);
429 cpu
->pipelineStage
[stage_num
]->setResStall(cache_req
, tid
);
431 tlbBlocked
[tid
] = true;
433 cache_req
->tlbStall
= true;
435 scheduleEvent(slot_idx
, 1);
437 cpu
->trap(cache_req
->fault
, tid
, inst
);
439 DPRINTF(InOrderTLB
, "[tid:%i]: [sn:%i] virt. addr %08p translated "
440 "to phys. addr:%08p.\n", tid
, inst
->seqNum
,
441 cache_req
->memReq
->getVaddr(),
442 cache_req
->memReq
->getPaddr());
445 return cache_req
->fault
;
449 CacheUnit::read(DynInstPtr inst
, Addr addr
,
450 uint8_t *data
, unsigned size
, unsigned flags
)
452 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
453 assert(cache_req
&& "Can't Find Instruction for Read!");
455 // The block size of our peer
456 unsigned blockSize
= this->cachePort
->peerBlockSize();
458 //The size of the data we're trying to read.
460 inst
->totalSize
= size
;
462 if (inst
->traceData
) {
463 inst
->traceData
->setAddr(addr
);
466 if (inst
->split2ndAccess
) {
467 size
= inst
->split2ndSize
;
468 cache_req
->splitAccess
= true;
469 cache_req
->split2ndAccess
= true;
471 DPRINTF(InOrderCachePort
, "[sn:%i] Split Read Access (2 of 2) for "
472 "(%#x, %#x).\n", inst
->seqNum
, inst
->getMemAddr(),
477 //The address of the second part of this access if it needs to be split
478 //across a cache line boundary.
479 Addr secondAddr
= roundDown(addr
+ size
- 1, blockSize
);
482 if (secondAddr
> addr
&& !inst
->split2ndAccess
) {
483 DPRINTF(InOrderCachePort
, "%i: sn[%i] Split Read Access (1 of 2) for "
484 "(%#x, %#x).\n", curTick
, inst
->seqNum
, addr
, secondAddr
);
486 // Save All "Total" Split Information
487 // ==============================
488 inst
->splitInst
= true;
489 inst
->splitMemData
= new uint8_t[size
];
491 if (!inst
->splitInstSked
) {
492 // Schedule Split Read/Complete for Instruction
493 // ==============================
494 int stage_num
= cache_req
->getStageNum();
496 int stage_pri
= ThePipeline::getNextPriority(inst
, stage_num
);
498 int isplit_cmd
= CacheUnit::InitSecondSplitRead
;
499 inst
->resSched
.push(new
500 ScheduleEntry(stage_num
,
502 cpu
->resPool
->getResIdx(DCache
),
506 int csplit_cmd
= CacheUnit::CompleteSecondSplitRead
;
507 inst
->resSched
.push(new
508 ScheduleEntry(stage_num
+ 1,
510 cpu
->resPool
->getResIdx(DCache
),
513 inst
->splitInstSked
= true;
515 DPRINTF(InOrderCachePort
, "[tid:%i] [sn:%i] Retrying Split Read "
516 "Access (1 of 2) for (%#x, %#x).\n", inst
->readTid(),
517 inst
->seqNum
, addr
, secondAddr
);
520 // Split Information for First Access
521 // ==============================
522 size
= secondAddr
- addr
;
523 cache_req
->splitAccess
= true;
525 // Split Information for Second Access
526 // ==============================
527 inst
->split2ndSize
= addr
+ fullSize
- secondAddr
;
528 inst
->split2ndAddr
= secondAddr
;
529 inst
->split2ndDataPtr
= inst
->splitMemData
+ size
;
530 inst
->split2ndFlags
= flags
;
533 doTLBAccess(inst
, cache_req
, size
, flags
, TheISA::TLB::Read
);
535 if (cache_req
->fault
== NoFault
) {
536 if (!cache_req
->splitAccess
) {
537 cache_req
->reqData
= new uint8_t[size
];
538 doCacheAccess(inst
, NULL
);
540 if (!inst
->split2ndAccess
) {
541 cache_req
->reqData
= inst
->splitMemData
;
543 cache_req
->reqData
= inst
->split2ndDataPtr
;
546 doCacheAccess(inst
, NULL
, cache_req
);
550 return cache_req
->fault
;
554 CacheUnit::write(DynInstPtr inst
, uint8_t *data
, unsigned size
,
555 Addr addr
, unsigned flags
, uint64_t *write_res
)
557 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
558 assert(cache_req
&& "Can't Find Instruction for Write!");
560 // The block size of our peer
561 unsigned blockSize
= this->cachePort
->peerBlockSize();
563 //The size of the data we're trying to write.
565 inst
->totalSize
= size
;
567 if (inst
->traceData
) {
568 inst
->traceData
->setAddr(addr
);
571 if (inst
->split2ndAccess
) {
572 size
= inst
->split2ndSize
;
573 cache_req
->splitAccess
= true;
574 cache_req
->split2ndAccess
= true;
576 DPRINTF(InOrderCachePort
, "[sn:%i] Split Write Access (2 of 2) for "
577 "(%#x, %#x).\n", inst
->seqNum
, inst
->getMemAddr(),
581 //The address of the second part of this access if it needs to be split
582 //across a cache line boundary.
583 Addr secondAddr
= roundDown(addr
+ size
- 1, blockSize
);
585 if (secondAddr
> addr
&& !inst
->split2ndAccess
) {
587 DPRINTF(InOrderCachePort
, "[sn:%i] Split Write Access (1 of 2) for "
588 "(%#x, %#x).\n", inst
->seqNum
, addr
, secondAddr
);
590 // Save All "Total" Split Information
591 // ==============================
592 inst
->splitInst
= true;
594 if (!inst
->splitInstSked
) {
595 // Schedule Split Read/Complete for Instruction
596 // ==============================
597 int stage_num
= cache_req
->getStageNum();
599 int stage_pri
= ThePipeline::getNextPriority(inst
, stage_num
);
601 int isplit_cmd
= CacheUnit::InitSecondSplitWrite
;
602 inst
->resSched
.push(new
603 ScheduleEntry(stage_num
,
605 cpu
->resPool
->getResIdx(DCache
),
609 int csplit_cmd
= CacheUnit::CompleteSecondSplitWrite
;
610 inst
->resSched
.push(new
611 ScheduleEntry(stage_num
+ 1,
613 cpu
->resPool
->getResIdx(DCache
),
616 inst
->splitInstSked
= true;
618 DPRINTF(InOrderCachePort
, "[tid:%i] sn:%i] Retrying Split Read "
619 "Access (1 of 2) for (%#x, %#x).\n",
620 inst
->readTid(), inst
->seqNum
, addr
, secondAddr
);
625 // Split Information for First Access
626 // ==============================
627 size
= secondAddr
- addr
;
628 cache_req
->splitAccess
= true;
630 // Split Information for Second Access
631 // ==============================
632 inst
->split2ndSize
= addr
+ fullSize
- secondAddr
;
633 inst
->split2ndAddr
= secondAddr
;
634 inst
->split2ndStoreDataPtr
= &cache_req
->inst
->storeData
;
635 inst
->split2ndStoreDataPtr
+= size
;
636 inst
->split2ndFlags
= flags
;
637 inst
->splitInstSked
= true;
640 doTLBAccess(inst
, cache_req
, size
, flags
, TheISA::TLB::Write
);
642 if (cache_req
->fault
== NoFault
) {
643 if (!cache_req
->splitAccess
) {
644 // Remove this line since storeData is saved in INST?
645 cache_req
->reqData
= new uint8_t[size
];
646 doCacheAccess(inst
, write_res
);
648 doCacheAccess(inst
, write_res
, cache_req
);
653 return cache_req
->fault
;
658 CacheUnit::execute(int slot_num
)
660 if (cachePortBlocked
) {
661 DPRINTF(InOrderCachePort
, "Cache Port Blocked. Cannot Access\n");
665 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(reqMap
[slot_num
]);
668 DynInstPtr inst
= cache_req
->inst
;
670 ThreadID tid
= inst
->readTid();
671 int seq_num
= inst
->seqNum
;
672 std::string acc_type
= "write";
676 cache_req
->fault
= NoFault
;
678 switch (cache_req
->cmd
)
682 //@TODO: Switch to size of full cache block. Store in fetch buffer
683 int acc_size
= sizeof(TheISA::MachInst
);
685 doTLBAccess(inst
, cache_req
, acc_size
, 0, TheISA::TLB::Execute
);
687 // Only Do Access if no fault from TLB
688 if (cache_req
->fault
== NoFault
) {
690 DPRINTF(InOrderCachePort
,
691 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
692 tid
, name(), cache_req
->inst
->getMemAddr());
694 cache_req
->reqData
= new uint8_t[acc_size
];
696 inst
->setCurResSlot(slot_num
);
704 case InitiateReadData
:
708 case InitiateWriteData
:
710 DPRINTF(InOrderCachePort
,
711 "[tid:%u]: [sn:%i] Initiating data %s access to %s for "
712 "addr. %08p\n", tid
, inst
->seqNum
, acc_type
, name(),
713 cache_req
->inst
->getMemAddr());
715 inst
->setCurResSlot(slot_num
);
717 if (inst
->isDataPrefetch() || inst
->isInstPrefetch()) {
725 case InitSecondSplitRead
:
726 DPRINTF(InOrderCachePort
,
727 "[tid:%u]: [sn:%i] Initiating split data read access to %s "
728 "for addr. %08p\n", tid
, inst
->seqNum
, name(),
729 cache_req
->inst
->split2ndAddr
);
730 inst
->split2ndAccess
= true;
731 assert(inst
->split2ndAddr
!= 0);
732 read(inst
, inst
->split2ndAddr
, &inst
->split2ndData
,
733 inst
->totalSize
, inst
->split2ndFlags
);
736 case InitSecondSplitWrite
:
737 DPRINTF(InOrderCachePort
,
738 "[tid:%u]: [sn:%i] Initiating split data write access to %s "
739 "for addr. %08p\n", tid
, inst
->seqNum
, name(),
740 cache_req
->inst
->getMemAddr());
742 inst
->split2ndAccess
= true;
743 assert(inst
->split2ndAddr
!= 0);
744 write(inst
, &inst
->split2ndData
, inst
->totalSize
,
745 inst
->split2ndAddr
, inst
->split2ndFlags
, NULL
);
750 if (cache_req
->isMemAccComplete()) {
751 DPRINTF(InOrderCachePort
,
752 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
756 DPRINTF(InOrderCachePort
, "[tid:%i]: Instruction [sn:%i] is: %s\n",
757 tid
, seq_num
, inst
->staticInst
->disassemble(inst
->PC
));
759 removeAddrDependency(inst
);
761 delete cache_req
->dataPkt
;
763 // Do not stall and switch threads for fetch... for now..
764 // TODO: We need to detect cache misses for latencies > 1
765 // cache_req->setMemStall(false);
769 DPRINTF(InOrderCachePort
,
770 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
772 DPRINTF(InOrderStall
,
773 "STALL: [tid:%i]: Fetch miss from %08p\n",
774 tid
, cache_req
->inst
->readPC());
775 cache_req
->setCompleted(false);
776 //cache_req->setMemStall(true);
780 case CompleteReadData
:
781 case CompleteWriteData
:
782 DPRINTF(InOrderCachePort
,
783 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
786 if (cache_req
->isMemAccComplete() ||
787 inst
->isDataPrefetch() ||
788 inst
->isInstPrefetch()) {
789 removeAddrDependency(inst
);
790 cache_req
->setMemStall(false);
793 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
794 tid
, cache_req
->inst
->getMemAddr());
795 cache_req
->setCompleted(false);
796 cache_req
->setMemStall(true);
800 case CompleteSecondSplitRead
:
801 DPRINTF(InOrderCachePort
,
802 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Read "
803 "Access\n", tid
, inst
->seqNum
);
805 if (cache_req
->isMemAccComplete() ||
806 inst
->isDataPrefetch() ||
807 inst
->isInstPrefetch()) {
808 removeAddrDependency(inst
);
809 cache_req
->setMemStall(false);
812 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
813 tid
, cache_req
->inst
->split2ndAddr
);
814 cache_req
->setCompleted(false);
815 cache_req
->setMemStall(true);
819 case CompleteSecondSplitWrite
:
820 DPRINTF(InOrderCachePort
,
821 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Write "
822 "Access\n", tid
, inst
->seqNum
);
824 if (cache_req
->isMemAccComplete() ||
825 inst
->isDataPrefetch() ||
826 inst
->isInstPrefetch()) {
827 removeAddrDependency(inst
);
828 cache_req
->setMemStall(false);
831 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
832 tid
, cache_req
->inst
->split2ndAddr
);
833 cache_req
->setCompleted(false);
834 cache_req
->setMemStall(true);
839 fatal("Unrecognized command to %s", resName
);
844 CacheUnit::prefetch(DynInstPtr inst
)
846 warn_once("Prefetching currently unimplemented");
848 CacheReqPtr cache_req
849 = dynamic_cast<CacheReqPtr
>(reqMap
[inst
->getCurResSlot()]);
852 // Clean-Up cache resource request so
853 // other memory insts. can use them
854 cache_req
->setCompleted();
855 cachePortBlocked
= false;
856 cache_req
->setMemAccPending(false);
857 cache_req
->setMemAccCompleted();
858 inst
->unsetMemAddr();
863 CacheUnit::writeHint(DynInstPtr inst
)
865 warn_once("Write Hints currently unimplemented");
867 CacheReqPtr cache_req
868 = dynamic_cast<CacheReqPtr
>(reqMap
[inst
->getCurResSlot()]);
871 // Clean-Up cache resource request so
872 // other memory insts. can use them
873 cache_req
->setCompleted();
874 cachePortBlocked
= false;
875 cache_req
->setMemAccPending(false);
876 cache_req
->setMemAccCompleted();
877 inst
->unsetMemAddr();
880 // @TODO: Split into doCacheRead() and doCacheWrite()
882 CacheUnit::doCacheAccess(DynInstPtr inst
, uint64_t *write_res
,
883 CacheReqPtr split_req
)
885 Fault fault
= NoFault
;
887 ThreadID tid
= inst
->readTid();
890 CacheReqPtr cache_req
;
892 if (split_req
== NULL
) {
893 cache_req
= dynamic_cast<CacheReqPtr
>(reqMap
[inst
->getCurResSlot()]);
895 cache_req
= split_req
;
900 // Check for LL/SC and if so change command
901 if (cache_req
->memReq
->isLLSC() && cache_req
->pktCmd
== MemCmd::ReadReq
) {
902 cache_req
->pktCmd
= MemCmd::LoadLockedReq
;
905 if (cache_req
->pktCmd
== MemCmd::WriteReq
) {
907 cache_req
->memReq
->isSwap() ? MemCmd::SwapReq
:
908 (cache_req
->memReq
->isLLSC() ? MemCmd::StoreCondReq
912 cache_req
->dataPkt
= new CacheReqPacket(cache_req
,
917 if (cache_req
->dataPkt
->isRead()) {
918 cache_req
->dataPkt
->dataStatic(cache_req
->reqData
);
919 } else if (cache_req
->dataPkt
->isWrite()) {
920 if (inst
->split2ndAccess
) {
921 cache_req
->dataPkt
->dataStatic(inst
->split2ndStoreDataPtr
);
923 cache_req
->dataPkt
->dataStatic(&cache_req
->inst
->storeData
);
926 if (cache_req
->memReq
->isCondSwap()) {
928 cache_req
->memReq
->setExtraData(*write_res
);
932 bool do_access
= true; // flag to suppress cache access
934 Request
*memReq
= cache_req
->dataPkt
->req
;
936 if (cache_req
->dataPkt
->isWrite() && cache_req
->memReq
->isLLSC()) {
937 assert(cache_req
->inst
->isStoreConditional());
938 DPRINTF(InOrderCachePort
, "Evaluating Store Conditional access\n");
939 do_access
= TheISA::handleLockedWrite(cpu
, memReq
);
942 DPRINTF(InOrderCachePort
,
943 "[tid:%i] [sn:%i] attempting to access cache\n",
947 if (!cachePort
->sendTiming(cache_req
->dataPkt
)) {
948 DPRINTF(InOrderCachePort
,
949 "[tid:%i] [sn:%i] cannot access cache, because port "
950 "is blocked. now waiting to retry request\n", tid
,
952 cache_req
->setCompleted(false);
953 cachePortBlocked
= true;
955 DPRINTF(InOrderCachePort
,
956 "[tid:%i] [sn:%i] is now waiting for cache response\n",
958 cache_req
->setCompleted();
959 cache_req
->setMemAccPending();
960 cachePortBlocked
= false;
962 } else if (!do_access
&& memReq
->isLLSC()){
963 // Store-Conditional instructions complete even if they "failed"
964 assert(cache_req
->inst
->isStoreConditional());
965 cache_req
->setCompleted(true);
968 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
971 processCacheCompletion(cache_req
->dataPkt
);
973 // Make cache request again since access due to
974 // inability to access
975 DPRINTF(InOrderStall
, "STALL: \n");
976 cache_req
->setCompleted(false);
983 CacheUnit::processCacheCompletion(PacketPtr pkt
)
985 // Cast to correct packet type
986 CacheReqPacket
* cache_pkt
= dynamic_cast<CacheReqPacket
*>(pkt
);
990 if (cache_pkt
->cacheReq
->isSquashed()) {
991 DPRINTF(InOrderCachePort
,
992 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
993 cache_pkt
->cacheReq
->getInst()->readTid(),
994 cache_pkt
->cacheReq
->getInst()->seqNum
);
996 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
997 cache_pkt
->cacheReq
->getTid(),
998 cache_pkt
->cacheReq
->seqNum
);
1000 cache_pkt
->cacheReq
->done();
1008 DPRINTF(InOrderCachePort
,
1009 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
1010 cache_pkt
->cacheReq
->getInst()->readTid(),
1011 cache_pkt
->cacheReq
->getInst()->seqNum
,
1012 cache_pkt
->cacheReq
->getInst()->getMemAddr());
1014 // Cast to correct request type
1015 CacheRequest
*cache_req
= dynamic_cast<CacheReqPtr
>(
1016 findSplitRequest(cache_pkt
->cacheReq
->getInst(), cache_pkt
->instIdx
));
1019 panic("[tid:%u]: [sn:%i]: Can't find slot for cache access to "
1020 "addr. %08p\n", cache_pkt
->cacheReq
->getInst()->readTid(),
1021 cache_pkt
->cacheReq
->getInst()->seqNum
,
1022 cache_pkt
->cacheReq
->getInst()->getMemAddr());
1028 // Get resource request info
1029 unsigned stage_num
= cache_req
->getStageNum();
1030 DynInstPtr inst
= cache_req
->inst
;
1031 ThreadID tid
= cache_req
->inst
->readTid();
1033 if (!cache_req
->isSquashed()) {
1034 if (inst
->resSched
.top()->cmd
== CompleteFetch
) {
1035 DPRINTF(InOrderCachePort
,
1036 "[tid:%u]: [sn:%i]: Processing fetch access\n",
1039 // NOTE: This is only allowing a thread to fetch one line
1040 // at a time. Re-examine when/if prefetching
1041 // gets implemented.
1042 //memcpy(fetchData[tid], cache_pkt->getPtr<uint8_t>(),
1043 // cache_pkt->getSize());
1045 // Get the instruction from the array of the cache line.
1046 // @todo: update thsi
1047 ExtMachInst ext_inst
;
1048 StaticInstPtr staticInst
= NULL
;
1049 Addr inst_pc
= inst
->readPC();
1050 MachInst mach_inst
=
1051 TheISA::gtoh(*reinterpret_cast<TheISA::MachInst
*>
1052 (cache_pkt
->getPtr
<uint8_t>()));
1054 predecoder
.setTC(cpu
->thread
[tid
]->getTC());
1055 predecoder
.moreBytes(inst_pc
, inst_pc
, mach_inst
);
1056 ext_inst
= predecoder
.getExtMachInst();
1058 inst
->setMachInst(ext_inst
);
1060 // Set Up More TraceData info
1061 if (inst
->traceData
) {
1062 inst
->traceData
->setStaticInst(inst
->staticInst
);
1063 inst
->traceData
->setPC(inst
->readPC());
1066 } else if (inst
->staticInst
&& inst
->isMemRef()) {
1067 DPRINTF(InOrderCachePort
,
1068 "[tid:%u]: [sn:%i]: Processing cache access\n",
1070 PacketPtr dataPkt
= NULL
;
1072 if (inst
->splitInst
) {
1073 inst
->splitFinishCnt
++;
1075 if (inst
->splitFinishCnt
== 2) {
1076 cache_req
->memReq
->setVirt(0/*inst->tid*/,
1082 Packet
split_pkt(cache_req
->memReq
, cache_req
->pktCmd
,
1086 if (inst
->isLoad()) {
1087 split_pkt
.dataStatic(inst
->splitMemData
);
1089 split_pkt
.dataStatic(&inst
->storeData
);
1092 dataPkt
= &split_pkt
;
1097 inst
->completeAcc(dataPkt
);
1099 if (inst
->isLoad()) {
1100 assert(cache_pkt
->isRead());
1102 if (cache_pkt
->req
->isLLSC()) {
1103 DPRINTF(InOrderCachePort
,
1104 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
1106 TheISA::handleLockedRead(cpu
, cache_pkt
->req
);
1109 DPRINTF(InOrderCachePort
,
1110 "[tid:%u]: [sn:%i]: Bytes loaded were: %s\n",
1112 printMemData(dataPkt
->getPtr
<uint8_t>(),
1113 dataPkt
->getSize()));
1114 } else if(inst
->isStore()) {
1115 assert(cache_pkt
->isWrite());
1117 DPRINTF(InOrderCachePort
,
1118 "[tid:%u]: [sn:%i]: Bytes stored were: %s\n",
1120 printMemData(dataPkt
->getPtr
<uint8_t>(),
1121 dataPkt
->getSize()));
1127 cache_req
->setMemAccPending(false);
1128 cache_req
->setMemAccCompleted();
1130 if (cache_req
->isMemStall() &&
1131 cpu
->threadModel
== InOrderCPU::SwitchOnCacheMiss
) {
1132 DPRINTF(InOrderCachePort
, "[tid:%u] Waking up from Cache Miss.\n",
1135 cpu
->activateContext(tid
);
1137 DPRINTF(ThreadModel
, "Activating [tid:%i] after return from cache"
1141 // Wake up the CPU (if it went to sleep and was waiting on this
1142 // completion event).
1145 DPRINTF(Activity
, "[tid:%u] Activating %s due to cache completion\n",
1146 tid
, cpu
->pipelineStage
[stage_num
]->name());
1148 cpu
->switchToActive(stage_num
);
1150 DPRINTF(InOrderCachePort
,
1151 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
1152 tid
, cache_req
->inst
->readPC());
1153 cache_req
->setMemAccCompleted();
1158 CacheUnit::recvRetry()
1160 DPRINTF(InOrderCachePort
, "Unblocking Cache Port. \n");
1162 assert(cachePortBlocked
);
1164 // Clear the cache port for use again
1165 cachePortBlocked
= false;
1170 CacheUnitEvent::CacheUnitEvent()
1175 CacheUnitEvent::process()
1177 DynInstPtr inst
= resource
->reqMap
[slotIdx
]->inst
;
1178 int stage_num
= resource
->reqMap
[slotIdx
]->getStageNum();
1179 ThreadID tid
= inst
->threadNumber
;
1180 CacheReqPtr req_ptr
= dynamic_cast<CacheReqPtr
>(resource
->reqMap
[slotIdx
]);
1182 DPRINTF(InOrderTLB
, "Waking up from TLB Miss caused by [sn:%i].\n",
1185 CacheUnit
* tlb_res
= dynamic_cast<CacheUnit
*>(resource
);
1188 tlb_res
->tlbBlocked
[tid
] = false;
1190 tlb_res
->cpu
->pipelineStage
[stage_num
]->
1191 unsetResStall(tlb_res
->reqMap
[slotIdx
], tid
);
1193 req_ptr
->tlbStall
= false;
1195 if (req_ptr
->isSquashed()) {
1201 CacheUnit::squashDueToMemStall(DynInstPtr inst
, int stage_num
,
1202 InstSeqNum squash_seq_num
, ThreadID tid
)
1204 // If squashing due to memory stall, then we do NOT want to
1205 // squash the instruction that caused the stall so we
1206 // increment the sequence number here to prevent that.
1208 // NOTE: This is only for the SwitchOnCacheMiss Model
1209 // NOTE: If you have multiple outstanding misses from the same
1210 // thread then you need to reevaluate this code
1211 // NOTE: squash should originate from
1212 // pipeline_stage.cc:processInstSchedule
1213 DPRINTF(InOrderCachePort
, "Squashing above [sn:%u]\n",
1214 squash_seq_num
+ 1);
1216 squash(inst
, stage_num
, squash_seq_num
+ 1, tid
);
1221 CacheUnit::squash(DynInstPtr inst
, int stage_num
,
1222 InstSeqNum squash_seq_num
, ThreadID tid
)
1224 vector
<int> slot_remove_list
;
1226 map
<int, ResReqPtr
>::iterator map_it
= reqMap
.begin();
1227 map
<int, ResReqPtr
>::iterator map_end
= reqMap
.end();
1229 while (map_it
!= map_end
) {
1230 ResReqPtr req_ptr
= (*map_it
).second
;
1233 req_ptr
->getInst()->readTid() == tid
&&
1234 req_ptr
->getInst()->seqNum
> squash_seq_num
) {
1236 DPRINTF(InOrderCachePort
,
1237 "[tid:%i] Squashing request from [sn:%i]\n",
1238 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
1240 if (req_ptr
->isSquashed()) {
1241 DPRINTF(AddrDep
, "Request for [tid:%i] [sn:%i] already "
1242 "squashed, ignoring squash process.\n",
1243 req_ptr
->getInst()->readTid(),
1244 req_ptr
->getInst()->seqNum
);
1249 req_ptr
->setSquashed();
1251 req_ptr
->getInst()->setSquashed();
1253 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(req_ptr
);
1256 int req_slot_num
= req_ptr
->getSlot();
1258 if (cache_req
->tlbStall
) {
1259 tlbBlocked
[tid
] = false;
1261 int stall_stage
= reqMap
[req_slot_num
]->getStageNum();
1263 cpu
->pipelineStage
[stall_stage
]->
1264 unsetResStall(reqMap
[req_slot_num
], tid
);
1267 if (!cache_req
->tlbStall
&& !cache_req
->isMemAccPending()) {
1268 // Mark request for later removal
1269 cpu
->reqRemoveList
.push(req_ptr
);
1271 // Mark slot for removal from resource
1272 slot_remove_list
.push_back(req_ptr
->getSlot());
1274 DPRINTF(InOrderCachePort
,
1275 "[tid:%i] Request from [sn:%i] squashed, but still "
1276 "pending completion.\n",
1277 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
1279 "[tid:%i] Request from [sn:%i] squashed (split:%i), but "
1280 "still pending completion.\n",
1281 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
,
1282 req_ptr
->getInst()->splitInst
);
1285 if (req_ptr
->getInst()->validMemAddr()) {
1286 DPRINTF(AddrDep
, "Squash of [tid:%i] [sn:%i], attempting to "
1287 "remove addr. %08p dependencies.\n",
1288 req_ptr
->getInst()->readTid(),
1289 req_ptr
->getInst()->seqNum
,
1290 req_ptr
->getInst()->getMemAddr());
1292 removeAddrDependency(req_ptr
->getInst());
1299 // Now Delete Slot Entry from Req. Map
1300 for (int i
= 0; i
< slot_remove_list
.size(); i
++)
1301 freeSlot(slot_remove_list
[i
]);