Configs: Add support for the InOrder CPU model
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34 #include "arch/isa_traits.hh"
35 #include "arch/mips/locked_mem.hh"
36 #include "arch/utility.hh"
37 #include "cpu/inorder/resources/cache_unit.hh"
38 #include "cpu/inorder/pipeline_traits.hh"
39 #include "cpu/inorder/cpu.hh"
40 #include "mem/request.hh"
41
42 using namespace std;
43 using namespace TheISA;
44 using namespace ThePipeline;
45
46 Tick
47 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
48 {
49 panic("DefaultFetch doesn't expect recvAtomic callback!");
50 return curTick;
51 }
52
53 void
54 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
55 {
56 panic("DefaultFetch doesn't expect recvFunctional callback!");
57 }
58
59 void
60 CacheUnit::CachePort::recvStatusChange(Status status)
61 {
62 if (status == RangeChange)
63 return;
64
65 panic("DefaultFetch doesn't expect recvStatusChange callback!");
66 }
67
68 bool
69 CacheUnit::CachePort::recvTiming(Packet *pkt)
70 {
71 cachePortUnit->processCacheCompletion(pkt);
72 return true;
73 }
74
75 void
76 CacheUnit::CachePort::recvRetry()
77 {
78 cachePortUnit->recvRetry();
79 }
80
81 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
82 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
83 : Resource(res_name, res_id, res_width, res_latency, _cpu),
84 retryPkt(NULL), retrySlot(-1)
85 {
86 //cacheData.resize(res_width);
87 //slotStatus = new CachePortStatus[width];
88 //fetchPC = new Addr[width];
89 cachePort = new CachePort(this);
90
91 cacheBlocked = false;
92 }
93
94 Port *
95 CacheUnit::getPort(const string &if_name, int idx)
96 {
97 if (if_name == resName)
98 return cachePort;
99 else
100 return NULL;
101 }
102
103 int
104 CacheUnit::getSlot(DynInstPtr inst)
105 {
106 if (!inst->validMemAddr()) {
107 panic("Mem. Addr. must be set before requesting cache access\n");
108 }
109
110 Addr req_addr = inst->getMemAddr();
111
112 if (resName == "icache_port" ||
113 find(addrList.begin(), addrList.end(), req_addr) == addrList.end()) {
114
115 int new_slot = Resource::getSlot(inst);
116
117 if (new_slot == -1)
118 return -1;
119
120 inst->memTime = curTick;
121 addrList.push_back(req_addr);
122 addrMap[req_addr] = inst->seqNum;
123 DPRINTF(InOrderCachePort,
124 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
125 inst->readTid(), inst->seqNum, req_addr);
126 return new_slot;
127 } else {
128 DPRINTF(InOrderCachePort,
129 "Denying request because there is an outstanding"
130 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
131 req_addr, addrMap[req_addr], inst->memTime);
132 return -1;
133 }
134 }
135
136 void
137 CacheUnit::freeSlot(int slot_num)
138 {
139 vector<Addr>::iterator vect_it = find(addrList.begin(), addrList.end(),
140 reqMap[slot_num]->inst->getMemAddr());
141 assert(vect_it != addrList.end());
142
143 DPRINTF(InOrderCachePort,
144 "[tid:%i]: Address %08p removed from dependency list\n",
145 reqMap[slot_num]->inst->readTid(), (*vect_it));
146
147 addrList.erase(vect_it);
148
149 Resource::freeSlot(slot_num);
150 }
151
152 ResReqPtr
153 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
154 int slot_num, unsigned cmd)
155 {
156 ScheduleEntry* sched_entry = inst->resSched.top();
157
158 if (!inst->validMemAddr()) {
159 panic("Mem. Addr. must be set before requesting cache access\n");
160 }
161
162 int req_size = 0;
163 MemCmd::Command pkt_cmd;
164
165 if (sched_entry->cmd == InitiateReadData) {
166 pkt_cmd = MemCmd::ReadReq;
167 req_size = inst->getMemAccSize();
168
169 DPRINTF(InOrderCachePort,
170 "[tid:%i]: %i byte Read request from [sn:%i] for addr %08p\n",
171 inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
172 } else if (sched_entry->cmd == InitiateWriteData) {
173 pkt_cmd = MemCmd::WriteReq;
174 req_size = inst->getMemAccSize();
175
176 DPRINTF(InOrderCachePort,
177 "[tid:%i]: %i byte Write request from [sn:%i] for addr %08p\n",
178 inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
179 } else if (sched_entry->cmd == InitiateFetch){
180 pkt_cmd = MemCmd::ReadReq;
181 req_size = sizeof(MachInst); //@TODO: mips16e
182
183 DPRINTF(InOrderCachePort,
184 "[tid:%i]: %i byte Fetch request from [sn:%i] for addr %08p\n",
185 inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
186 } else {
187 panic("%i: Unexpected request type (%i) to %s", curTick,
188 sched_entry->cmd, name());
189 }
190
191 return new CacheRequest(this, inst, stage_num, id, slot_num,
192 sched_entry->cmd, req_size, pkt_cmd,
193 0/*flags*/, this->cpu->readCpuId());
194 }
195
196 void
197 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
198 {
199 //service_request = false;
200
201 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
202 assert(cache_req);
203
204 // Check to see if this instruction is requesting the same command
205 // or a different one
206 if (cache_req->cmd != inst->resSched.top()->cmd) {
207 // If different, then update command in the request
208 cache_req->cmd = inst->resSched.top()->cmd;
209 DPRINTF(InOrderCachePort,
210 "[tid:%i]: [sn:%i]: the command for this instruction\n",
211 inst->readTid(), inst->seqNum);
212
213 service_request = true;
214 } else {
215 // If same command, just check to see if memory access was completed
216 // but dont try to re-execute
217 DPRINTF(InOrderCachePort,
218 "[tid:%i]: [sn:%i]: requesting this resource again\n",
219 inst->readTid(), inst->seqNum);
220
221 service_request = true;
222 }
223 }
224
225 void
226 CacheUnit::execute(int slot_num)
227 {
228 if (cacheBlocked) {
229 DPRINTF(InOrderCachePort, "Cache Blocked. Cannot Access\n");
230 return;
231 }
232
233 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
234 assert(cache_req);
235
236 DynInstPtr inst = cache_req->inst;
237 int tid;
238 tid = inst->readTid();
239 int seq_num;
240 seq_num = inst->seqNum;
241 //int stage_num = cache_req->getStageNum();
242
243 cache_req->fault = NoFault;
244
245 switch (cache_req->cmd)
246 {
247 case InitiateFetch:
248 DPRINTF(InOrderCachePort,
249 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
250 tid, name(), cache_req->inst->getMemAddr());
251
252 DPRINTF(InOrderCachePort,
253 "[tid:%u]: Fetching new cache block from addr: %08p\n",
254 tid, cache_req->memReq->getVaddr());
255
256 inst->setCurResSlot(slot_num);
257 doDataAccess(inst);
258 break;
259
260 case CompleteFetch:
261 if (cache_req->isMemAccComplete()) {
262 DPRINTF(InOrderCachePort,
263 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
264 tid, inst->seqNum);
265
266 MachInst mach_inst = cache_req->dataPkt->get<MachInst>();
267
268 /**
269 * @TODO: May Need This Function for Endianness-Compatibility
270 * mach_inst =
271 * gtoh(*reinterpret_cast<MachInst *>(&cacheData[tid][offset]));
272 */
273
274 DPRINTF(InOrderCachePort,
275 "[tid:%i]: Fetched instruction is %08p\n",
276 tid, mach_inst);
277
278 // ExtMachInst ext_inst = makeExtMI(mach_inst, cpu->tcBase(tid));
279
280 inst->setMachInst(mach_inst);
281 inst->setASID(tid);
282 inst->setThreadState(cpu->thread[tid]);
283
284 DPRINTF(InOrderStage, "[tid:%i]: Instruction [sn:%i] is: %s\n",
285 tid, seq_num, inst->staticInst->disassemble(inst->PC));
286
287 // Set Up More TraceData info
288 if (inst->traceData) {
289 inst->traceData->setStaticInst(inst->staticInst);
290 inst->traceData->setPC(inst->readPC());
291 }
292
293 cache_req->done();
294 } else {
295 DPRINTF(InOrderCachePort,
296 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
297 tid, inst->seqNum);
298 DPRINTF(InOrderStall,
299 "STALL: [tid:%i]: Fetch miss from %08p\n",
300 tid, cache_req->inst->readPC());
301 cache_req->setCompleted(false);
302 }
303 break;
304
305 case InitiateReadData:
306 case InitiateWriteData:
307 DPRINTF(InOrderCachePort,
308 "[tid:%u]: Initiating data access to %s for addr. %08p\n",
309 tid, name(), cache_req->inst->getMemAddr());
310
311 inst->setCurResSlot(slot_num);
312 //inst->memAccess();
313 inst->initiateAcc();
314 break;
315
316 case CompleteReadData:
317 case CompleteWriteData:
318 DPRINTF(InOrderCachePort,
319 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
320 tid, inst->seqNum);
321 if (cache_req->isMemAccComplete()) {
322 cache_req->done();
323 } else {
324 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
325 tid, cache_req->inst->getMemAddr());
326 cache_req->setCompleted(false);
327 }
328 break;
329
330 default:
331 fatal("Unrecognized command to %s", resName);
332 }
333 }
334
335 Fault
336 CacheUnit::doDataAccess(DynInstPtr inst)
337 {
338 Fault fault = NoFault;
339 int tid = 0;
340
341 tid = inst->readTid();
342
343 CacheReqPtr cache_req
344 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
345 assert(cache_req);
346
347 cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
348 Packet::Broadcast);
349
350 if (cache_req->dataPkt->isRead()) {
351 cache_req->dataPkt->dataStatic(cache_req->reqData);
352 } else if (cache_req->dataPkt->isWrite()) {
353 cache_req->dataPkt->dataStatic(&cache_req->inst->storeData);
354
355 }
356
357 cache_req->dataPkt->time = curTick;
358
359 bool do_access = true; // flag to suppress cache access
360
361 Request *memReq = cache_req->dataPkt->req;
362
363 if (cache_req->dataPkt->isWrite() && memReq->isLocked()) {
364 assert(cache_req->inst->isStoreConditional());
365 DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
366 do_access = TheISA::handleLockedWrite(cpu, memReq);
367 }
368
369 DPRINTF(InOrderCachePort,
370 "[tid:%i] [sn:%i] attempting to access cache\n",
371 tid, inst->seqNum);
372
373 //@TODO: If you want to ignore failed store conditional accesses, then
374 // enable this. However, this might skew memory stats because
375 // the failed store conditional access will get ignored.
376 // - Remove optionality here ...
377 if (1/*do_access*/) {
378 if (!cachePort->sendTiming(cache_req->dataPkt)) {
379 DPRINTF(InOrderCachePort,
380 "[tid:%i] [sn:%i] is waiting to retry request\n",
381 tid, inst->seqNum);
382
383 retrySlot = cache_req->getSlot();
384 retryReq = cache_req;
385 retryPkt = cache_req->dataPkt;
386
387 cacheStatus = cacheWaitRetry;
388
389 //cacheBlocked = true;
390
391 DPRINTF(InOrderStall, "STALL: \n");
392
393 cache_req->setCompleted(false);
394 } else {
395 DPRINTF(InOrderCachePort,
396 "[tid:%i] [sn:%i] is now waiting for cache response\n",
397 tid, inst->seqNum);
398 cache_req->setCompleted();
399 cache_req->setMemAccPending();
400 cacheStatus = cacheWaitResponse;
401 cacheBlocked = false;
402 }
403 } else if (!do_access && memReq->isLocked()){
404 // Store-Conditional instructions complete even if they "failed"
405 assert(cache_req->inst->isStoreConditional());
406 cache_req->setCompleted(true);
407
408 DPRINTF(LLSC,
409 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
410 tid, tid);
411
412 cache_req->dataPkt->req->setExtraData(0);
413
414 processCacheCompletion(cache_req->dataPkt);
415
416 // Automatically set these since we ignored the memory access
417 //cache_req->setMemAccPending(false);
418 //cache_req->setMemAccCompleted();
419 } else {
420 // Make cache request again since access due to
421 // inability to access
422 DPRINTF(InOrderStall, "STALL: \n");
423 cache_req->setCompleted(false);
424 }
425
426 return fault;
427 }
428
429 void
430 CacheUnit::processCacheCompletion(PacketPtr pkt)
431 {
432 // Cast to correct packet type
433 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
434 assert(cache_pkt);
435
436 if (cache_pkt->cacheReq->isSquashed()) {
437 DPRINTF(InOrderCachePort,
438 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
439 cache_pkt->cacheReq->getInst()->readTid(),
440 cache_pkt->cacheReq->getInst()->seqNum);
441
442 cache_pkt->cacheReq->done();
443 return;
444 }
445
446 DPRINTF(InOrderCachePort,
447 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
448 cache_pkt->cacheReq->getInst()->readTid(),
449 cache_pkt->cacheReq->getInst()->seqNum,
450 cache_pkt->cacheReq->getInst()->getMemAddr());
451
452 // Cast to correct request type
453 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
454 findRequest(cache_pkt->cacheReq->getInst()));
455 assert(cache_req);
456
457 #if TRACING_ON
458 // Get resource request info
459 unsigned tid = 0;
460 #endif
461
462 //tid = pkt->req->getThreadNum();
463 unsigned stage_num = cache_req->getStageNum();
464 DynInstPtr inst = cache_req->inst;
465
466 if (!cache_req->isSquashed()) {
467 if (inst->resSched.top()->cmd == CompleteFetch) {
468 DPRINTF(InOrderCachePort,
469 "[tid:%u]: [sn:%i]: Processing fetch access\n",
470 tid, inst->seqNum);
471 } else if (inst->staticInst && inst->isMemRef()) {
472 DPRINTF(InOrderCachePort,
473 "[tid:%u]: [sn:%i]: Processing cache access\n",
474 tid, inst->seqNum);
475
476 inst->completeAcc(pkt);
477
478 if (inst->isLoad()) {
479 assert(cache_pkt->isRead());
480
481 if (cache_pkt->req->isLocked()) {
482 DPRINTF(InOrderCachePort,
483 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
484 tid, inst->seqNum);
485 TheISA::handleLockedRead(cpu, cache_pkt->req);
486 }
487
488 // @TODO: Hardcoded to for load instructions. Assumes that
489 // the dest. idx 0 is always where the data is loaded to.
490 DPRINTF(InOrderCachePort,
491 "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
492 tid, inst->seqNum, inst->readIntResult(0));
493 } else if(inst->isStore()) {
494 assert(cache_pkt->isWrite());
495
496 DPRINTF(InOrderCachePort,
497 "[tid:%u]: [sn:%i]: Data stored was: %08p\n",
498 tid, inst->seqNum,
499 getMemData(cache_pkt));
500
501 }
502 }
503
504 cache_req->setMemAccPending(false);
505 cache_req->setMemAccCompleted();
506
507 // Wake up the CPU (if it went to sleep and was waiting on this
508 // completion event).
509 cpu->wakeCPU();
510
511 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
512 tid, cpu->pipelineStage[stage_num]->name());
513
514 cpu->switchToActive(stage_num);
515 } else {
516 DPRINTF(InOrderCachePort,
517 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
518 tid, cache_req->inst->readPC());
519 cache_req->setMemAccCompleted();
520 }
521
522 inst->unsetMemAddr();
523 }
524
525 void
526 CacheUnit::recvRetry()
527 {
528 DPRINTF(InOrderCachePort, "Retrying Request for [tid:%i] [sn:%i]\n",
529 retryReq->inst->readTid(), retryReq->inst->seqNum);
530
531 assert(retryPkt != NULL);
532 assert(cacheBlocked);
533 assert(cacheStatus == cacheWaitRetry);
534
535 if (cachePort->sendTiming(retryPkt)) {
536 cacheStatus = cacheWaitResponse;
537 retryPkt = NULL;
538 cacheBlocked = false;
539 } else {
540 DPRINTF(InOrderCachePort,
541 "Retry Request for [tid:%i] [sn:%i] failed\n",
542 retryReq->inst->readTid(), retryReq->inst->seqNum);
543 }
544 }
545
546 void
547 CacheUnit::squash(DynInstPtr inst, int stage_num,
548 InstSeqNum squash_seq_num, unsigned tid)
549 {
550 vector<int> slot_remove_list;
551
552 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
553 map<int, ResReqPtr>::iterator map_end = reqMap.end();
554
555 while (map_it != map_end) {
556 ResReqPtr req_ptr = (*map_it).second;
557
558 if (req_ptr &&
559 req_ptr->getInst()->readTid() == tid &&
560 req_ptr->getInst()->seqNum > squash_seq_num) {
561
562 DPRINTF(InOrderCachePort,
563 "[tid:%i] Squashing request from [sn:%i]\n",
564 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
565
566 req_ptr->setSquashed();
567
568 req_ptr->getInst()->setSquashed();
569
570 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
571 assert(cache_req);
572
573 if (!cache_req->isMemAccPending()) {
574 // Mark request for later removal
575 cpu->reqRemoveList.push(req_ptr);
576
577 // Mark slot for removal from resource
578 slot_remove_list.push_back(req_ptr->getSlot());
579 }
580 }
581
582 map_it++;
583 }
584
585 // Now Delete Slot Entry from Req. Map
586 for (int i = 0; i < slot_remove_list.size(); i++)
587 freeSlot(slot_remove_list[i]);
588 }
589
590 uint64_t
591 CacheUnit::getMemData(Packet *packet)
592 {
593 switch (packet->getSize())
594 {
595 case 8:
596 return packet->get<uint8_t>();
597
598 case 16:
599 return packet->get<uint16_t>();
600
601 case 32:
602 return packet->get<uint32_t>();
603
604 case 864:
605 return packet->get<uint64_t>();
606
607 default:
608 panic("bad store data size = %d\n", packet->getSize());
609 }
610 }
611