2 * Copyright (c) 2007 MIPS Technologies, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Korey Sewell
34 #include "arch/isa_traits.hh"
35 #include "arch/locked_mem.hh"
36 #include "arch/utility.hh"
37 #include "cpu/inorder/resources/cache_unit.hh"
38 #include "cpu/inorder/pipeline_traits.hh"
39 #include "cpu/inorder/cpu.hh"
40 #include "mem/request.hh"
43 using namespace TheISA
;
44 using namespace ThePipeline
;
47 CacheUnit::CachePort::recvAtomic(PacketPtr pkt
)
49 panic("DefaultFetch doesn't expect recvAtomic callback!");
54 CacheUnit::CachePort::recvFunctional(PacketPtr pkt
)
56 panic("DefaultFetch doesn't expect recvFunctional callback!");
60 CacheUnit::CachePort::recvStatusChange(Status status
)
62 if (status
== RangeChange
)
65 panic("DefaultFetch doesn't expect recvStatusChange callback!");
69 CacheUnit::CachePort::recvTiming(Packet
*pkt
)
71 cachePortUnit
->processCacheCompletion(pkt
);
76 CacheUnit::CachePort::recvRetry()
78 cachePortUnit
->recvRetry();
81 CacheUnit::CacheUnit(string res_name
, int res_id
, int res_width
,
82 int res_latency
, InOrderCPU
*_cpu
, ThePipeline::Params
*params
)
83 : Resource(res_name
, res_id
, res_width
, res_latency
, _cpu
),
84 retryPkt(NULL
), retrySlot(-1), cacheBlocked(false)
86 cachePort
= new CachePort(this);
90 CacheUnit::getPort(const string
&if_name
, int idx
)
92 if (if_name
== resName
)
99 CacheUnit::getSlot(DynInstPtr inst
)
101 if (!inst
->validMemAddr()) {
102 panic("Mem. Addr. must be set before requesting cache access\n");
105 Addr req_addr
= inst
->getMemAddr();
107 if (resName
== "icache_port" ||
108 find(addrList
.begin(), addrList
.end(), req_addr
) == addrList
.end()) {
110 int new_slot
= Resource::getSlot(inst
);
115 inst
->memTime
= curTick
;
116 addrList
.push_back(req_addr
);
117 addrMap
[req_addr
] = inst
->seqNum
;
118 DPRINTF(InOrderCachePort
,
119 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
120 inst
->readTid(), inst
->seqNum
, req_addr
);
123 DPRINTF(InOrderCachePort
,
124 "Denying request because there is an outstanding"
125 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
126 req_addr
, addrMap
[req_addr
], inst
->memTime
);
132 CacheUnit::freeSlot(int slot_num
)
134 vector
<Addr
>::iterator vect_it
= find(addrList
.begin(), addrList
.end(),
135 reqMap
[slot_num
]->inst
->getMemAddr());
136 assert(vect_it
!= addrList
.end());
138 DPRINTF(InOrderCachePort
,
139 "[tid:%i]: Address %08p removed from dependency list\n",
140 reqMap
[slot_num
]->inst
->readTid(), (*vect_it
));
142 addrList
.erase(vect_it
);
144 Resource::freeSlot(slot_num
);
148 CacheUnit::getRequest(DynInstPtr inst
, int stage_num
, int res_idx
,
149 int slot_num
, unsigned cmd
)
151 ScheduleEntry
* sched_entry
= inst
->resSched
.top();
153 if (!inst
->validMemAddr()) {
154 panic("Mem. Addr. must be set before requesting cache access\n");
158 MemCmd::Command pkt_cmd
;
160 if (sched_entry
->cmd
== InitiateReadData
) {
161 pkt_cmd
= MemCmd::ReadReq
;
162 req_size
= inst
->getMemAccSize();
164 DPRINTF(InOrderCachePort
,
165 "[tid:%i]: %i byte Read request from [sn:%i] for addr %08p\n",
166 inst
->readTid(), req_size
, inst
->seqNum
, inst
->getMemAddr());
167 } else if (sched_entry
->cmd
== InitiateWriteData
) {
168 pkt_cmd
= MemCmd::WriteReq
;
169 req_size
= inst
->getMemAccSize();
171 DPRINTF(InOrderCachePort
,
172 "[tid:%i]: %i byte Write request from [sn:%i] for addr %08p\n",
173 inst
->readTid(), req_size
, inst
->seqNum
, inst
->getMemAddr());
174 } else if (sched_entry
->cmd
== InitiateFetch
){
175 pkt_cmd
= MemCmd::ReadReq
;
176 req_size
= sizeof(MachInst
); //@TODO: mips16e
178 DPRINTF(InOrderCachePort
,
179 "[tid:%i]: %i byte Fetch request from [sn:%i] for addr %08p\n",
180 inst
->readTid(), req_size
, inst
->seqNum
, inst
->getMemAddr());
182 panic("%i: Unexpected request type (%i) to %s", curTick
,
183 sched_entry
->cmd
, name());
186 return new CacheRequest(this, inst
, stage_num
, id
, slot_num
,
187 sched_entry
->cmd
, req_size
, pkt_cmd
,
188 0/*flags*/, this->cpu
->readCpuId());
192 CacheUnit::requestAgain(DynInstPtr inst
, bool &service_request
)
194 //service_request = false;
196 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(findRequest(inst
));
199 // Check to see if this instruction is requesting the same command
200 // or a different one
201 if (cache_req
->cmd
!= inst
->resSched
.top()->cmd
) {
202 // If different, then update command in the request
203 cache_req
->cmd
= inst
->resSched
.top()->cmd
;
204 DPRINTF(InOrderCachePort
,
205 "[tid:%i]: [sn:%i]: the command for this instruction\n",
206 inst
->readTid(), inst
->seqNum
);
208 service_request
= true;
210 // If same command, just check to see if memory access was completed
211 // but dont try to re-execute
212 DPRINTF(InOrderCachePort
,
213 "[tid:%i]: [sn:%i]: requesting this resource again\n",
214 inst
->readTid(), inst
->seqNum
);
216 service_request
= true;
221 CacheUnit::execute(int slot_num
)
224 DPRINTF(InOrderCachePort
, "Cache Blocked. Cannot Access\n");
228 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(reqMap
[slot_num
]);
231 DynInstPtr inst
= cache_req
->inst
;
233 tid
= inst
->readTid();
235 seq_num
= inst
->seqNum
;
236 //int stage_num = cache_req->getStageNum();
238 cache_req
->fault
= NoFault
;
240 switch (cache_req
->cmd
)
243 DPRINTF(InOrderCachePort
,
244 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
245 tid
, name(), cache_req
->inst
->getMemAddr());
247 DPRINTF(InOrderCachePort
,
248 "[tid:%u]: Fetching new cache block from addr: %08p\n",
249 tid
, cache_req
->memReq
->getVaddr());
251 inst
->setCurResSlot(slot_num
);
256 if (cache_req
->isMemAccComplete()) {
257 DPRINTF(InOrderCachePort
,
258 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
261 MachInst mach_inst
= cache_req
->dataPkt
->get
<MachInst
>();
264 * @TODO: May Need This Function for Endianness-Compatibility
266 * gtoh(*reinterpret_cast<MachInst *>(&cacheData[tid][offset]));
269 DPRINTF(InOrderCachePort
,
270 "[tid:%i]: Fetched instruction is %08p\n",
273 // ExtMachInst ext_inst = makeExtMI(mach_inst, cpu->tcBase(tid));
275 inst
->setMachInst(mach_inst
);
277 inst
->setThreadState(cpu
->thread
[tid
]);
279 DPRINTF(InOrderStage
, "[tid:%i]: Instruction [sn:%i] is: %s\n",
280 tid
, seq_num
, inst
->staticInst
->disassemble(inst
->PC
));
282 // Set Up More TraceData info
283 if (inst
->traceData
) {
284 inst
->traceData
->setStaticInst(inst
->staticInst
);
285 inst
->traceData
->setPC(inst
->readPC());
290 DPRINTF(InOrderCachePort
,
291 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
293 DPRINTF(InOrderStall
,
294 "STALL: [tid:%i]: Fetch miss from %08p\n",
295 tid
, cache_req
->inst
->readPC());
296 cache_req
->setCompleted(false);
300 case InitiateReadData
:
301 case InitiateWriteData
:
302 DPRINTF(InOrderCachePort
,
303 "[tid:%u]: Initiating data access to %s for addr. %08p\n",
304 tid
, name(), cache_req
->inst
->getMemAddr());
306 inst
->setCurResSlot(slot_num
);
311 case CompleteReadData
:
312 case CompleteWriteData
:
313 DPRINTF(InOrderCachePort
,
314 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
316 if (cache_req
->isMemAccComplete()) {
319 DPRINTF(InOrderStall
, "STALL: [tid:%i]: Data miss from %08p\n",
320 tid
, cache_req
->inst
->getMemAddr());
321 cache_req
->setCompleted(false);
326 fatal("Unrecognized command to %s", resName
);
331 CacheUnit::doDataAccess(DynInstPtr inst
)
333 Fault fault
= NoFault
;
336 tid
= inst
->readTid();
338 CacheReqPtr cache_req
339 = dynamic_cast<CacheReqPtr
>(reqMap
[inst
->getCurResSlot()]);
342 cache_req
->dataPkt
= new CacheReqPacket(cache_req
, cache_req
->pktCmd
,
345 if (cache_req
->dataPkt
->isRead()) {
346 cache_req
->dataPkt
->dataStatic(cache_req
->reqData
);
347 } else if (cache_req
->dataPkt
->isWrite()) {
348 cache_req
->dataPkt
->dataStatic(&cache_req
->inst
->storeData
);
352 cache_req
->dataPkt
->time
= curTick
;
354 bool do_access
= true; // flag to suppress cache access
356 Request
*memReq
= cache_req
->dataPkt
->req
;
358 if (cache_req
->dataPkt
->isWrite() && memReq
->isLLSC()) {
359 assert(cache_req
->inst
->isStoreConditional());
360 DPRINTF(InOrderCachePort
, "Evaluating Store Conditional access\n");
361 do_access
= TheISA::handleLockedWrite(cpu
, memReq
);
364 DPRINTF(InOrderCachePort
,
365 "[tid:%i] [sn:%i] attempting to access cache\n",
368 //@TODO: If you want to ignore failed store conditional accesses, then
369 // enable this. However, this might skew memory stats because
370 // the failed store conditional access will get ignored.
371 // - Remove optionality here ...
372 if (1/*do_access*/) {
373 if (!cachePort
->sendTiming(cache_req
->dataPkt
)) {
374 DPRINTF(InOrderCachePort
,
375 "[tid:%i] [sn:%i] is waiting to retry request\n",
378 retrySlot
= cache_req
->getSlot();
379 retryReq
= cache_req
;
380 retryPkt
= cache_req
->dataPkt
;
382 cacheStatus
= cacheWaitRetry
;
384 //cacheBlocked = true;
386 DPRINTF(InOrderStall
, "STALL: \n");
388 cache_req
->setCompleted(false);
390 DPRINTF(InOrderCachePort
,
391 "[tid:%i] [sn:%i] is now waiting for cache response\n",
393 cache_req
->setCompleted();
394 cache_req
->setMemAccPending();
395 cacheStatus
= cacheWaitResponse
;
396 cacheBlocked
= false;
398 } else if (!do_access
&& memReq
->isLLSC()){
399 // Store-Conditional instructions complete even if they "failed"
400 assert(cache_req
->inst
->isStoreConditional());
401 cache_req
->setCompleted(true);
404 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
407 cache_req
->dataPkt
->req
->setExtraData(0);
409 processCacheCompletion(cache_req
->dataPkt
);
411 // Automatically set these since we ignored the memory access
412 //cache_req->setMemAccPending(false);
413 //cache_req->setMemAccCompleted();
415 // Make cache request again since access due to
416 // inability to access
417 DPRINTF(InOrderStall
, "STALL: \n");
418 cache_req
->setCompleted(false);
425 CacheUnit::processCacheCompletion(PacketPtr pkt
)
427 // Cast to correct packet type
428 CacheReqPacket
* cache_pkt
= dynamic_cast<CacheReqPacket
*>(pkt
);
431 if (cache_pkt
->cacheReq
->isSquashed()) {
432 DPRINTF(InOrderCachePort
,
433 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
434 cache_pkt
->cacheReq
->getInst()->readTid(),
435 cache_pkt
->cacheReq
->getInst()->seqNum
);
437 cache_pkt
->cacheReq
->done();
441 DPRINTF(InOrderCachePort
,
442 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
443 cache_pkt
->cacheReq
->getInst()->readTid(),
444 cache_pkt
->cacheReq
->getInst()->seqNum
,
445 cache_pkt
->cacheReq
->getInst()->getMemAddr());
447 // Cast to correct request type
448 CacheRequest
*cache_req
= dynamic_cast<CacheReqPtr
>(
449 findRequest(cache_pkt
->cacheReq
->getInst()));
453 // Get resource request info
454 // @todo: SMT needs to figure out where to get thread # from.
456 unsigned stage_num
= cache_req
->getStageNum();
457 DynInstPtr inst
= cache_req
->inst
;
459 if (!cache_req
->isSquashed()) {
460 if (inst
->resSched
.top()->cmd
== CompleteFetch
) {
461 DPRINTF(InOrderCachePort
,
462 "[tid:%u]: [sn:%i]: Processing fetch access\n",
464 } else if (inst
->staticInst
&& inst
->isMemRef()) {
465 DPRINTF(InOrderCachePort
,
466 "[tid:%u]: [sn:%i]: Processing cache access\n",
469 inst
->completeAcc(pkt
);
471 if (inst
->isLoad()) {
472 assert(cache_pkt
->isRead());
474 if (cache_pkt
->req
->isLLSC()) {
475 DPRINTF(InOrderCachePort
,
476 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
478 TheISA::handleLockedRead(cpu
, cache_pkt
->req
);
481 // @TODO: Hardcoded to for load instructions. Assumes that
482 // the dest. idx 0 is always where the data is loaded to.
483 DPRINTF(InOrderCachePort
,
484 "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
485 tid
, inst
->seqNum
, inst
->readIntResult(0));
486 } else if(inst
->isStore()) {
487 assert(cache_pkt
->isWrite());
489 DPRINTF(InOrderCachePort
,
490 "[tid:%u]: [sn:%i]: Data stored was: %08p\n",
492 getMemData(cache_pkt
));
497 cache_req
->setMemAccPending(false);
498 cache_req
->setMemAccCompleted();
500 // Wake up the CPU (if it went to sleep and was waiting on this
501 // completion event).
504 DPRINTF(Activity
, "[tid:%u] Activating %s due to cache completion\n",
505 tid
, cpu
->pipelineStage
[stage_num
]->name());
507 cpu
->switchToActive(stage_num
);
509 DPRINTF(InOrderCachePort
,
510 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
511 tid
, cache_req
->inst
->readPC());
512 cache_req
->setMemAccCompleted();
515 inst
->unsetMemAddr();
519 CacheUnit::recvRetry()
521 DPRINTF(InOrderCachePort
, "Retrying Request for [tid:%i] [sn:%i]\n",
522 retryReq
->inst
->readTid(), retryReq
->inst
->seqNum
);
524 assert(retryPkt
!= NULL
);
525 assert(cacheBlocked
);
526 assert(cacheStatus
== cacheWaitRetry
);
528 if (cachePort
->sendTiming(retryPkt
)) {
529 cacheStatus
= cacheWaitResponse
;
531 cacheBlocked
= false;
533 DPRINTF(InOrderCachePort
,
534 "Retry Request for [tid:%i] [sn:%i] failed\n",
535 retryReq
->inst
->readTid(), retryReq
->inst
->seqNum
);
540 CacheUnit::squash(DynInstPtr inst
, int stage_num
,
541 InstSeqNum squash_seq_num
, unsigned tid
)
543 vector
<int> slot_remove_list
;
545 map
<int, ResReqPtr
>::iterator map_it
= reqMap
.begin();
546 map
<int, ResReqPtr
>::iterator map_end
= reqMap
.end();
548 while (map_it
!= map_end
) {
549 ResReqPtr req_ptr
= (*map_it
).second
;
552 req_ptr
->getInst()->readTid() == tid
&&
553 req_ptr
->getInst()->seqNum
> squash_seq_num
) {
555 DPRINTF(InOrderCachePort
,
556 "[tid:%i] Squashing request from [sn:%i]\n",
557 req_ptr
->getInst()->readTid(), req_ptr
->getInst()->seqNum
);
559 req_ptr
->setSquashed();
561 req_ptr
->getInst()->setSquashed();
563 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(req_ptr
);
566 if (!cache_req
->isMemAccPending()) {
567 // Mark request for later removal
568 cpu
->reqRemoveList
.push(req_ptr
);
570 // Mark slot for removal from resource
571 slot_remove_list
.push_back(req_ptr
->getSlot());
578 // Now Delete Slot Entry from Req. Map
579 for (int i
= 0; i
< slot_remove_list
.size(); i
++)
580 freeSlot(slot_remove_list
[i
]);
584 CacheUnit::getMemData(Packet
*packet
)
586 switch (packet
->getSize())
589 return packet
->get
<uint8_t>();
592 return packet
->get
<uint16_t>();
595 return packet
->get
<uint32_t>();
598 return packet
->get
<uint64_t>();
601 panic("bad store data size = %d\n", packet
->getSize());