2 * Copyright (c) 2007 MIPS Technologies, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Korey Sewell
34 #include "arch/isa_traits.hh"
35 #include "arch/locked_mem.hh"
36 #include "arch/utility.hh"
37 #include "arch/predecoder.hh"
38 #include "cpu/inorder/resources/cache_unit.hh"
39 #include "cpu/inorder/pipeline_traits.hh"
40 #include "cpu/inorder/cpu.hh"
41 #include "mem/request.hh"
44 using namespace TheISA
;
45 using namespace ThePipeline
;
48 CacheUnit::CachePort::recvAtomic(PacketPtr pkt
)
50 panic("DefaultFetch doesn't expect recvAtomic callback!");
55 CacheUnit::CachePort::recvFunctional(PacketPtr pkt
)
57 panic("DefaultFetch doesn't expect recvFunctional callback!");
61 CacheUnit::CachePort::recvStatusChange(Status status
)
63 if (status
== RangeChange
)
66 panic("DefaultFetch doesn't expect recvStatusChange callback!");
70 CacheUnit::CachePort::recvTiming(Packet
*pkt
)
72 cachePortUnit
->processCacheCompletion(pkt
);
77 CacheUnit::CachePort::recvRetry()
79 cachePortUnit
->recvRetry();
82 CacheUnit::CacheUnit(string res_name
, int res_id
, int res_width
,
83 int res_latency
, InOrderCPU
*_cpu
, ThePipeline::Params
*params
)
84 : Resource(res_name
, res_id
, res_width
, res_latency
, _cpu
),
85 retryPkt(NULL
), retrySlot(-1), cacheBlocked(false),
88 cachePort
= new CachePort(this);
92 CacheUnit::getPort(const string
&if_name
, int idx
)
94 if (if_name
== resName
)
101 CacheUnit::getSlot(DynInstPtr inst
)
103 if (!inst
->validMemAddr()) {
104 panic("Mem. Addr. must be set before requesting cache access\n");
107 Addr req_addr
= inst
->getMemAddr();
109 if (resName
== "icache_port" ||
110 find(addrList
.begin(), addrList
.end(), req_addr
) == addrList
.end()) {
112 int new_slot
= Resource::getSlot(inst
);
117 inst
->memTime
= curTick
;
118 addrList
.push_back(req_addr
);
119 addrMap
[req_addr
] = inst
->seqNum
;
120 DPRINTF(InOrderCachePort
,
121 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
122 inst
->readTid(), inst
->seqNum
, req_addr
);
125 DPRINTF(InOrderCachePort
,
126 "Denying request because there is an outstanding"
127 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
128 req_addr
, addrMap
[req_addr
], inst
->memTime
);
134 CacheUnit::freeSlot(int slot_num
)
136 vector
<Addr
>::iterator vect_it
= find(addrList
.begin(), addrList
.end(),
137 reqMap
[slot_num
]->inst
->getMemAddr());
138 assert(vect_it
!= addrList
.end());
140 DPRINTF(InOrderCachePort
,
141 "[tid:%i]: Address %08p removed from dependency list\n",
142 reqMap
[slot_num
]->inst
->readTid(), (*vect_it
));
144 addrList
.erase(vect_it
);
146 Resource::freeSlot(slot_num
);
150 CacheUnit::getRequest(DynInstPtr inst
, int stage_num
, int res_idx
,
151 int slot_num
, unsigned cmd
)
153 ScheduleEntry
* sched_entry
= inst
->resSched
.top();
155 if (!inst
->validMemAddr()) {
156 panic("Mem. Addr. must be set before requesting cache access\n");
160 MemCmd::Command pkt_cmd
;
162 if (sched_entry
->cmd
== InitiateReadData
) {
163 pkt_cmd
= MemCmd::ReadReq
;
164 req_size
= inst
->getMemAccSize();
166 DPRINTF(InOrderCachePort
,
167 "[tid:%i]: %i byte Read request from [sn:%i] for addr %08p\n",
168 inst
->readTid(), req_size
, inst
->seqNum
, inst
->getMemAddr());
169 } else if (sched_entry
->cmd
== InitiateWriteData
) {
170 pkt_cmd
= MemCmd::WriteReq
;
171 req_size
= inst
->getMemAccSize();
173 DPRINTF(InOrderCachePort
,
174 "[tid:%i]: %i byte Write request from [sn:%i] for addr %08p\n",
175 inst
->readTid(), req_size
, inst
->seqNum
, inst
->getMemAddr());
176 } else if (sched_entry
->cmd
== InitiateFetch
){
177 pkt_cmd
= MemCmd::ReadReq
;
178 req_size
= sizeof(MachInst
); //@TODO: mips16e
180 DPRINTF(InOrderCachePort
,
181 "[tid:%i]: %i byte Fetch request from [sn:%i] for addr %08p\n",
182 inst
->readTid(), req_size
, inst
->seqNum
, inst
->getMemAddr());
184 panic("%i: Unexpected request type (%i) to %s", curTick
,
185 sched_entry
->cmd
, name());
188 return new CacheRequest(this, inst
, stage_num
, id
, slot_num
,
189 sched_entry
->cmd
, req_size
, pkt_cmd
,
190 0/*flags*/, this->cpu
->readCpuId());
194 CacheUnit::requestAgain(DynInstPtr inst
, bool &service_request
)
196 //service_request = false;
198 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
201 // Check to see if this instruction is requesting the same command
202 // or a different one
203 if (cache_req
->cmd
!= inst
->resSched
.top()->cmd
) {
204 // If different, then update command in the request
205 cache_req
->cmd
= inst
->resSched
.top()->cmd
;
206 DPRINTF(InOrderCachePort
,
207 "[tid:%i]: [sn:%i]: the command for this instruction\n",
208 inst
->readTid(), inst
->seqNum
);
210 service_request
= true;
212 // If same command, just check to see if memory access was completed
213 // but dont try to re-execute
214 DPRINTF(InOrderCachePort
,
215 "[tid:%i]: [sn:%i]: requesting this resource again\n",
216 inst
->readTid(), inst
->seqNum
);
218 service_request
= true;
223 CacheUnit::execute(int slot_num
)
226 DPRINTF(InOrderCachePort
, "Cache Blocked. Cannot Access\n");
230 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(reqMap
[slot_num
]);
233 DynInstPtr inst
= cache_req
->inst
;
237 tid
= inst
->readTid();
238 seq_num
= inst
->seqNum
;
239 cache_req
->fault
= NoFault
;
241 switch (cache_req
->cmd
)
244 DPRINTF(InOrderCachePort
,
245 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
246 tid
, name(), cache_req
->inst
->getMemAddr());
248 DPRINTF(InOrderCachePort
,
249 "[tid:%u]: Fetching new cache block from addr: %08p\n",
250 tid
, cache_req
->memReq
->getVaddr());
252 inst
->setCurResSlot(slot_num
);
257 // @TODO: MOVE Functionality of handling fetched data into 'fetch unit'
258 // let cache-unit just be responsible for transferring data.
259 if (cache_req
->isMemAccComplete()) {
260 DPRINTF(InOrderCachePort
,
261 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
265 DPRINTF(InOrderCachePort
, "[tid:%i]: Instruction [sn:%i] is: %s\n",
266 tid
, seq_num
, inst
->staticInst
->disassemble(inst
->PC
));
268 delete cache_req
->dataPkt
;
271 DPRINTF(InOrderCachePort
,
272 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
274 DPRINTF(InOrderStall
,
275 "STALL: [tid:%i]: Fetch miss from %08p\n",
276 tid
, cache_req
->inst
->readPC());
277 cache_req
->setCompleted(false);
281 case InitiateReadData
:
282 case InitiateWriteData
:
283 DPRINTF(InOrderCachePort
,
284 "[tid:%u]: Initiating data access to %s for addr. %08p\n",
285 tid
, name(), cache_req
->inst
->getMemAddr());
287 inst
->setCurResSlot(slot_num
);
289 if (inst
->isDataPrefetch() || inst
->isInstPrefetch()) {
297 case CompleteReadData
:
298 case CompleteWriteData
:
299 DPRINTF(InOrderCachePort
,
300 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
303 if (cache_req
->isMemAccComplete() ||
304 inst
->isDataPrefetch() ||
305 inst
->isInstPrefetch()) {
308 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
309 tid
, cache_req
->inst
->getMemAddr());
310 cache_req
->setCompleted(false);
315 fatal("Unrecognized command to %s", resName
);
320 CacheUnit::prefetch(DynInstPtr inst
)
322 warn_once("Prefetching currently unimplemented");
324 CacheReqPtr cache_req
325 = dynamic_cast<CacheReqPtr
>(reqMap
[inst
->getCurResSlot()]);
328 // Clean-Up cache resource request so
329 // other memory insts. can use them
330 cache_req
->setCompleted();
331 cacheStatus
= cacheAccessComplete
;
332 cacheBlocked
= false;
333 cache_req
->setMemAccPending(false);
334 cache_req
->setMemAccCompleted();
335 inst
->unsetMemAddr();
340 CacheUnit::writeHint(DynInstPtr inst
)
342 warn_once("Write Hints currently unimplemented");
344 CacheReqPtr cache_req
345 = dynamic_cast<CacheReqPtr
>(reqMap
[inst
->getCurResSlot()]);
348 // Clean-Up cache resource request so
349 // other memory insts. can use them
350 cache_req
->setCompleted();
351 cacheStatus
= cacheAccessComplete
;
352 cacheBlocked
= false;
353 cache_req
->setMemAccPending(false);
354 cache_req
->setMemAccCompleted();
355 inst
->unsetMemAddr();
359 CacheUnit::doDataAccess(DynInstPtr inst
)
361 Fault fault
= NoFault
;
364 tid
= inst
->readTid();
366 CacheReqPtr cache_req
367 = dynamic_cast<CacheReqPtr
>(reqMap
[inst
->getCurResSlot()]);
370 cache_req
->dataPkt
= new CacheReqPacket(cache_req
, cache_req
->pktCmd
,
373 if (cache_req
->dataPkt
->isRead()) {
374 cache_req
->dataPkt
->dataStatic(cache_req
->reqData
);
375 } else if (cache_req
->dataPkt
->isWrite()) {
376 cache_req
->dataPkt
->dataStatic(&cache_req
->inst
->storeData
);
379 cache_req
->dataPkt
->time
= curTick
;
381 bool do_access
= true; // flag to suppress cache access
383 Request
*memReq
= cache_req
->dataPkt
->req
;
385 if (cache_req
->dataPkt
->isWrite() && memReq
->isLLSC()) {
386 assert(cache_req
->inst
->isStoreConditional());
387 DPRINTF(InOrderCachePort
, "Evaluating Store Conditional access\n");
388 do_access
= TheISA::handleLockedWrite(cpu
, memReq
);
391 DPRINTF(InOrderCachePort
,
392 "[tid:%i] [sn:%i] attempting to access cache\n",
395 //@TODO: If you want to ignore failed store conditional accesses, then
396 // enable this. However, this might skew memory stats because
397 // the failed store conditional access will get ignored.
398 // - Remove optionality here ...
399 if (1/*do_access*/) {
400 if (!cachePort
->sendTiming(cache_req
->dataPkt
)) {
401 DPRINTF(InOrderCachePort
,
402 "[tid:%i] [sn:%i] is waiting to retry request\n",
405 retrySlot
= cache_req
->getSlot();
406 retryReq
= cache_req
;
407 retryPkt
= cache_req
->dataPkt
;
409 cacheStatus
= cacheWaitRetry
;
411 //cacheBlocked = true;
413 DPRINTF(InOrderStall
, "STALL: \n");
415 cache_req
->setCompleted(false);
417 DPRINTF(InOrderCachePort
,
418 "[tid:%i] [sn:%i] is now waiting for cache response\n",
420 cache_req
->setCompleted();
421 cache_req
->setMemAccPending();
422 cacheStatus
= cacheWaitResponse
;
423 cacheBlocked
= false;
425 } else if (!do_access
&& memReq
->isLLSC()){
426 // Store-Conditional instructions complete even if they "failed"
427 assert(cache_req
->inst
->isStoreConditional());
428 cache_req
->setCompleted(true);
431 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
434 cache_req
->dataPkt
->req
->setExtraData(0);
436 processCacheCompletion(cache_req
->dataPkt
);
438 // Automatically set these since we ignored the memory access
439 //cache_req->setMemAccPending(false);
440 //cache_req->setMemAccCompleted();
442 // Make cache request again since access due to
443 // inability to access
444 DPRINTF(InOrderStall
, "STALL: \n");
445 cache_req
->setCompleted(false);
452 CacheUnit::processCacheCompletion(PacketPtr pkt
)
454 // Cast to correct packet type
455 CacheReqPacket
* cache_pkt
= dynamic_cast<CacheReqPacket
*>(pkt
);
458 if (cache_pkt
->cacheReq
->isSquashed()) {
459 DPRINTF(InOrderCachePort
,
460 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
461 cache_pkt
->cacheReq
->getInst()->readTid(),
462 cache_pkt
->cacheReq
->getInst()->seqNum
);
464 cache_pkt
->cacheReq
->done();
469 DPRINTF(InOrderCachePort
,
470 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
471 cache_pkt
->cacheReq
->getInst()->readTid(),
472 cache_pkt
->cacheReq
->getInst()->seqNum
,
473 cache_pkt
->cacheReq
->getInst()->getMemAddr());
475 // Cast to correct request type
476 CacheRequest
*cache_req
= dynamic_cast<CacheReqPtr
>(
477 findRequest(cache_pkt
->cacheReq
->getInst()));
481 // Get resource request info
482 unsigned stage_num
= cache_req
->getStageNum();
483 DynInstPtr inst
= cache_req
->inst
;
487 tid
= cache_req
->inst
->readTid();
489 if (!cache_req
->isSquashed()) {
490 if (inst
->resSched
.top()->cmd
== CompleteFetch
) {
491 DPRINTF(InOrderCachePort
,
492 "[tid:%u]: [sn:%i]: Processing fetch access\n",
495 // NOTE: This is only allowing a thread to fetch one line
496 // at a time. Re-examine when/if prefetching
498 //memcpy(fetchData[tid], cache_pkt->getPtr<uint8_t>(),
499 // cache_pkt->getSize());
501 // Get the instruction from the array of the cache line.
502 // @todo: update thsi
503 ExtMachInst ext_inst
;
504 StaticInstPtr staticInst
= NULL
;
505 Addr inst_pc
= inst
->readPC();
506 MachInst mach_inst
= TheISA::gtoh(*reinterpret_cast<TheISA::MachInst
*>
507 (cache_pkt
->getPtr
<uint8_t>()));
509 predecoder
.setTC(cpu
->thread
[tid
]->getTC());
510 predecoder
.moreBytes(inst_pc
, inst_pc
, mach_inst
);
511 ext_inst
= predecoder
.getExtMachInst();
513 inst
->setMachInst(ext_inst
);
515 // Set Up More TraceData info
516 if (inst
->traceData
) {
517 inst
->traceData
->setStaticInst(inst
->staticInst
);
518 inst
->traceData
->setPC(inst
->readPC());
521 } else if (inst
->staticInst
&& inst
->isMemRef()) {
522 DPRINTF(InOrderCachePort
,
523 "[tid:%u]: [sn:%i]: Processing cache access\n",
526 inst
->completeAcc(pkt
);
528 if (inst
->isLoad()) {
529 assert(cache_pkt
->isRead());
531 if (cache_pkt
->req
->isLLSC()) {
532 DPRINTF(InOrderCachePort
,
533 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
535 TheISA::handleLockedRead(cpu
, cache_pkt
->req
);
538 // @TODO: Hardcoded to for load instructions. Assumes that
539 // the dest. idx 0 is always where the data is loaded to.
540 DPRINTF(InOrderCachePort
,
541 "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
542 tid
, inst
->seqNum
, inst
->readIntResult(0));
543 DPRINTF(InOrderCachePort
,
544 "[tid:%u]: [sn:%i]: FP Data loaded was: %08p\n",
545 tid
, inst
->seqNum
, inst
->readFloatResult(0));
546 } else if(inst
->isStore()) {
547 assert(cache_pkt
->isWrite());
549 DPRINTF(InOrderCachePort
,
550 "[tid:%u]: [sn:%i]: Data stored was: FIX ME\n",
552 getMemData(cache_pkt)*/);
558 cache_req
->setMemAccPending(false);
559 cache_req
->setMemAccCompleted();
561 // Wake up the CPU (if it went to sleep and was waiting on this
562 // completion event).
565 DPRINTF(Activity
, "[tid:%u] Activating %s due to cache completion\n",
566 tid
, cpu
->pipelineStage
[stage_num
]->name());
568 cpu
->switchToActive(stage_num
);
570 DPRINTF(InOrderCachePort
,
571 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
572 tid
, cache_req
->inst
->readPC());
573 cache_req
->setMemAccCompleted();
576 inst
->unsetMemAddr();
580 CacheUnit::recvRetry()
582 DPRINTF(InOrderCachePort
, "Retrying Request for [tid:%i] [sn:%i]\n",
583 retryReq
->inst
->readTid(), retryReq
->inst
->seqNum
);
585 assert(retryPkt
!= NULL
);
586 assert(cacheBlocked
);
587 assert(cacheStatus
== cacheWaitRetry
);
589 if (cachePort
->sendTiming(retryPkt
)) {
590 cacheStatus
= cacheWaitResponse
;
592 cacheBlocked
= false;
594 DPRINTF(InOrderCachePort
,
595 "Retry Request for [tid:%i] [sn:%i] failed\n",
596 retryReq
->inst
->readTid(), retryReq
->inst
->seqNum
);
601 CacheUnit::squash(DynInstPtr inst
, int stage_num
,
602 InstSeqNum squash_seq_num
, unsigned tid
)
604 vector
<int> slot_remove_list
;
606 map
<int, ResReqPtr
>::iterator map_it
= reqMap
.begin();
607 map
<int, ResReqPtr
>::iterator map_end
= reqMap
.end();
609 while (map_it
!= map_end
) {
610 ResReqPtr req_ptr
= (*map_it
).second
;
613 req_ptr
->getInst()->readTid() == tid
&&
614 req_ptr
->getInst()->seqNum
> squash_seq_num
) {
616 DPRINTF(InOrderCachePort
,
617 "[tid:%i] Squashing request from [sn:%i]\n",
618 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
620 req_ptr
->setSquashed();
622 req_ptr
->getInst()->setSquashed();
624 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(req_ptr
);
627 if (!cache_req
->isMemAccPending()) {
628 // Mark request for later removal
629 cpu
->reqRemoveList
.push(req_ptr
);
631 // Mark slot for removal from resource
632 slot_remove_list
.push_back(req_ptr
->getSlot());
639 // Now Delete Slot Entry from Req. Map
640 for (int i
= 0; i
< slot_remove_list
.size(); i
++)
641 freeSlot(slot_remove_list
[i
]);
645 CacheUnit::getMemData(Packet
*packet
)
647 switch (packet
->getSize())
650 return packet
->get
<uint8_t>();
653 return packet
->get
<uint16_t>();
656 return packet
->get
<uint32_t>();
659 return packet
->get
<uint64_t>();
662 panic("bad store data size = %d\n", packet
->getSize());