inorder-miscregs: Fix indexing for misc. reg operands and update result-types for...
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34 #include "arch/isa_traits.hh"
35 #include "arch/locked_mem.hh"
36 #include "arch/utility.hh"
37 #include "cpu/inorder/resources/cache_unit.hh"
38 #include "cpu/inorder/pipeline_traits.hh"
39 #include "cpu/inorder/cpu.hh"
40 #include "mem/request.hh"
41
42 using namespace std;
43 using namespace TheISA;
44 using namespace ThePipeline;
45
46 Tick
47 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
48 {
49 panic("DefaultFetch doesn't expect recvAtomic callback!");
50 return curTick;
51 }
52
53 void
54 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
55 {
56 panic("DefaultFetch doesn't expect recvFunctional callback!");
57 }
58
59 void
60 CacheUnit::CachePort::recvStatusChange(Status status)
61 {
62 if (status == RangeChange)
63 return;
64
65 panic("DefaultFetch doesn't expect recvStatusChange callback!");
66 }
67
68 bool
69 CacheUnit::CachePort::recvTiming(Packet *pkt)
70 {
71 cachePortUnit->processCacheCompletion(pkt);
72 return true;
73 }
74
75 void
76 CacheUnit::CachePort::recvRetry()
77 {
78 cachePortUnit->recvRetry();
79 }
80
81 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
82 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
83 : Resource(res_name, res_id, res_width, res_latency, _cpu),
84 retryPkt(NULL), retrySlot(-1), cacheBlocked(false)
85 {
86 cachePort = new CachePort(this);
87 }
88
89 Port *
90 CacheUnit::getPort(const string &if_name, int idx)
91 {
92 if (if_name == resName)
93 return cachePort;
94 else
95 return NULL;
96 }
97
98 int
99 CacheUnit::getSlot(DynInstPtr inst)
100 {
101 if (!inst->validMemAddr()) {
102 panic("Mem. Addr. must be set before requesting cache access\n");
103 }
104
105 Addr req_addr = inst->getMemAddr();
106
107 if (resName == "icache_port" ||
108 find(addrList.begin(), addrList.end(), req_addr) == addrList.end()) {
109
110 int new_slot = Resource::getSlot(inst);
111
112 if (new_slot == -1)
113 return -1;
114
115 inst->memTime = curTick;
116 addrList.push_back(req_addr);
117 addrMap[req_addr] = inst->seqNum;
118 DPRINTF(InOrderCachePort,
119 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
120 inst->readTid(), inst->seqNum, req_addr);
121 return new_slot;
122 } else {
123 DPRINTF(InOrderCachePort,
124 "Denying request because there is an outstanding"
125 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
126 req_addr, addrMap[req_addr], inst->memTime);
127 return -1;
128 }
129 }
130
131 void
132 CacheUnit::freeSlot(int slot_num)
133 {
134 vector<Addr>::iterator vect_it = find(addrList.begin(), addrList.end(),
135 reqMap[slot_num]->inst->getMemAddr());
136 assert(vect_it != addrList.end());
137
138 DPRINTF(InOrderCachePort,
139 "[tid:%i]: Address %08p removed from dependency list\n",
140 reqMap[slot_num]->inst->readTid(), (*vect_it));
141
142 addrList.erase(vect_it);
143
144 Resource::freeSlot(slot_num);
145 }
146
147 ResReqPtr
148 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
149 int slot_num, unsigned cmd)
150 {
151 ScheduleEntry* sched_entry = inst->resSched.top();
152
153 if (!inst->validMemAddr()) {
154 panic("Mem. Addr. must be set before requesting cache access\n");
155 }
156
157 int req_size = 0;
158 MemCmd::Command pkt_cmd;
159
160 if (sched_entry->cmd == InitiateReadData) {
161 pkt_cmd = MemCmd::ReadReq;
162 req_size = inst->getMemAccSize();
163
164 DPRINTF(InOrderCachePort,
165 "[tid:%i]: %i byte Read request from [sn:%i] for addr %08p\n",
166 inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
167 } else if (sched_entry->cmd == InitiateWriteData) {
168 pkt_cmd = MemCmd::WriteReq;
169 req_size = inst->getMemAccSize();
170
171 DPRINTF(InOrderCachePort,
172 "[tid:%i]: %i byte Write request from [sn:%i] for addr %08p\n",
173 inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
174 } else if (sched_entry->cmd == InitiateFetch){
175 pkt_cmd = MemCmd::ReadReq;
176 req_size = sizeof(MachInst); //@TODO: mips16e
177
178 DPRINTF(InOrderCachePort,
179 "[tid:%i]: %i byte Fetch request from [sn:%i] for addr %08p\n",
180 inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
181 } else {
182 panic("%i: Unexpected request type (%i) to %s", curTick,
183 sched_entry->cmd, name());
184 }
185
186 return new CacheRequest(this, inst, stage_num, id, slot_num,
187 sched_entry->cmd, req_size, pkt_cmd,
188 0/*flags*/, this->cpu->readCpuId());
189 }
190
191 void
192 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
193 {
194 //service_request = false;
195
196 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
197 assert(cache_req);
198
199 // Check to see if this instruction is requesting the same command
200 // or a different one
201 if (cache_req->cmd != inst->resSched.top()->cmd) {
202 // If different, then update command in the request
203 cache_req->cmd = inst->resSched.top()->cmd;
204 DPRINTF(InOrderCachePort,
205 "[tid:%i]: [sn:%i]: the command for this instruction\n",
206 inst->readTid(), inst->seqNum);
207
208 service_request = true;
209 } else {
210 // If same command, just check to see if memory access was completed
211 // but dont try to re-execute
212 DPRINTF(InOrderCachePort,
213 "[tid:%i]: [sn:%i]: requesting this resource again\n",
214 inst->readTid(), inst->seqNum);
215
216 service_request = true;
217 }
218 }
219
220 void
221 CacheUnit::execute(int slot_num)
222 {
223 if (cacheBlocked) {
224 DPRINTF(InOrderCachePort, "Cache Blocked. Cannot Access\n");
225 return;
226 }
227
228 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
229 assert(cache_req);
230
231 DynInstPtr inst = cache_req->inst;
232 int tid;
233 tid = inst->readTid();
234 int seq_num;
235 seq_num = inst->seqNum;
236 //int stage_num = cache_req->getStageNum();
237
238 cache_req->fault = NoFault;
239
240 switch (cache_req->cmd)
241 {
242 case InitiateFetch:
243 DPRINTF(InOrderCachePort,
244 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
245 tid, name(), cache_req->inst->getMemAddr());
246
247 DPRINTF(InOrderCachePort,
248 "[tid:%u]: Fetching new cache block from addr: %08p\n",
249 tid, cache_req->memReq->getVaddr());
250
251 inst->setCurResSlot(slot_num);
252 doDataAccess(inst);
253 break;
254
255 case CompleteFetch:
256 if (cache_req->isMemAccComplete()) {
257 DPRINTF(InOrderCachePort,
258 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
259 tid, inst->seqNum);
260
261 MachInst mach_inst = cache_req->dataPkt->get<MachInst>();
262
263 /**
264 * @TODO: May Need This Function for Endianness-Compatibility
265 * mach_inst =
266 * gtoh(*reinterpret_cast<MachInst *>(&cacheData[tid][offset]));
267 */
268
269 DPRINTF(InOrderCachePort,
270 "[tid:%i]: Fetched instruction is %08p\n",
271 tid, mach_inst);
272
273 // ExtMachInst ext_inst = makeExtMI(mach_inst, cpu->tcBase(tid));
274
275 inst->setMachInst(mach_inst);
276 inst->setASID(tid);
277 inst->setThreadState(cpu->thread[tid]);
278
279 DPRINTF(InOrderStage, "[tid:%i]: Instruction [sn:%i] is: %s\n",
280 tid, seq_num, inst->staticInst->disassemble(inst->PC));
281
282 // Set Up More TraceData info
283 if (inst->traceData) {
284 inst->traceData->setStaticInst(inst->staticInst);
285 inst->traceData->setPC(inst->readPC());
286 }
287
288 cache_req->done();
289 } else {
290 DPRINTF(InOrderCachePort,
291 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
292 tid, inst->seqNum);
293 DPRINTF(InOrderStall,
294 "STALL: [tid:%i]: Fetch miss from %08p\n",
295 tid, cache_req->inst->readPC());
296 cache_req->setCompleted(false);
297 }
298 break;
299
300 case InitiateReadData:
301 case InitiateWriteData:
302 DPRINTF(InOrderCachePort,
303 "[tid:%u]: Initiating data access to %s for addr. %08p\n",
304 tid, name(), cache_req->inst->getMemAddr());
305
306 inst->setCurResSlot(slot_num);
307 //inst->memAccess();
308 inst->initiateAcc();
309 break;
310
311 case CompleteReadData:
312 case CompleteWriteData:
313 DPRINTF(InOrderCachePort,
314 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
315 tid, inst->seqNum);
316 if (cache_req->isMemAccComplete()) {
317 cache_req->done();
318 } else {
319 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
320 tid, cache_req->inst->getMemAddr());
321 cache_req->setCompleted(false);
322 }
323 break;
324
325 default:
326 fatal("Unrecognized command to %s", resName);
327 }
328 }
329
330 Fault
331 CacheUnit::doDataAccess(DynInstPtr inst)
332 {
333 Fault fault = NoFault;
334 int tid = 0;
335
336 tid = inst->readTid();
337
338 CacheReqPtr cache_req
339 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
340 assert(cache_req);
341
342 cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
343 Packet::Broadcast);
344
345 if (cache_req->dataPkt->isRead()) {
346 cache_req->dataPkt->dataStatic(cache_req->reqData);
347 } else if (cache_req->dataPkt->isWrite()) {
348 cache_req->dataPkt->dataStatic(&cache_req->inst->storeData);
349
350 }
351
352 cache_req->dataPkt->time = curTick;
353
354 bool do_access = true; // flag to suppress cache access
355
356 Request *memReq = cache_req->dataPkt->req;
357
358 if (cache_req->dataPkt->isWrite() && memReq->isLLSC()) {
359 assert(cache_req->inst->isStoreConditional());
360 DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
361 do_access = TheISA::handleLockedWrite(cpu, memReq);
362 }
363
364 DPRINTF(InOrderCachePort,
365 "[tid:%i] [sn:%i] attempting to access cache\n",
366 tid, inst->seqNum);
367
368 //@TODO: If you want to ignore failed store conditional accesses, then
369 // enable this. However, this might skew memory stats because
370 // the failed store conditional access will get ignored.
371 // - Remove optionality here ...
372 if (1/*do_access*/) {
373 if (!cachePort->sendTiming(cache_req->dataPkt)) {
374 DPRINTF(InOrderCachePort,
375 "[tid:%i] [sn:%i] is waiting to retry request\n",
376 tid, inst->seqNum);
377
378 retrySlot = cache_req->getSlot();
379 retryReq = cache_req;
380 retryPkt = cache_req->dataPkt;
381
382 cacheStatus = cacheWaitRetry;
383
384 //cacheBlocked = true;
385
386 DPRINTF(InOrderStall, "STALL: \n");
387
388 cache_req->setCompleted(false);
389 } else {
390 DPRINTF(InOrderCachePort,
391 "[tid:%i] [sn:%i] is now waiting for cache response\n",
392 tid, inst->seqNum);
393 cache_req->setCompleted();
394 cache_req->setMemAccPending();
395 cacheStatus = cacheWaitResponse;
396 cacheBlocked = false;
397 }
398 } else if (!do_access && memReq->isLLSC()){
399 // Store-Conditional instructions complete even if they "failed"
400 assert(cache_req->inst->isStoreConditional());
401 cache_req->setCompleted(true);
402
403 DPRINTF(LLSC,
404 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
405 tid, tid);
406
407 cache_req->dataPkt->req->setExtraData(0);
408
409 processCacheCompletion(cache_req->dataPkt);
410
411 // Automatically set these since we ignored the memory access
412 //cache_req->setMemAccPending(false);
413 //cache_req->setMemAccCompleted();
414 } else {
415 // Make cache request again since access due to
416 // inability to access
417 DPRINTF(InOrderStall, "STALL: \n");
418 cache_req->setCompleted(false);
419 }
420
421 return fault;
422 }
423
424 void
425 CacheUnit::processCacheCompletion(PacketPtr pkt)
426 {
427 // Cast to correct packet type
428 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
429 assert(cache_pkt);
430
431 if (cache_pkt->cacheReq->isSquashed()) {
432 DPRINTF(InOrderCachePort,
433 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
434 cache_pkt->cacheReq->getInst()->readTid(),
435 cache_pkt->cacheReq->getInst()->seqNum);
436
437 cache_pkt->cacheReq->done();
438 return;
439 }
440
441 DPRINTF(InOrderCachePort,
442 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
443 cache_pkt->cacheReq->getInst()->readTid(),
444 cache_pkt->cacheReq->getInst()->seqNum,
445 cache_pkt->cacheReq->getInst()->getMemAddr());
446
447 // Cast to correct request type
448 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
449 findRequest(cache_pkt->cacheReq->getInst()));
450 assert(cache_req);
451
452
453 // Get resource request info
454 // @todo: SMT needs to figure out where to get thread # from.
455 unsigned tid = 0;
456 unsigned stage_num = cache_req->getStageNum();
457 DynInstPtr inst = cache_req->inst;
458
459 if (!cache_req->isSquashed()) {
460 if (inst->resSched.top()->cmd == CompleteFetch) {
461 DPRINTF(InOrderCachePort,
462 "[tid:%u]: [sn:%i]: Processing fetch access\n",
463 tid, inst->seqNum);
464 } else if (inst->staticInst && inst->isMemRef()) {
465 DPRINTF(InOrderCachePort,
466 "[tid:%u]: [sn:%i]: Processing cache access\n",
467 tid, inst->seqNum);
468
469 inst->completeAcc(pkt);
470
471 if (inst->isLoad()) {
472 assert(cache_pkt->isRead());
473
474 if (cache_pkt->req->isLLSC()) {
475 DPRINTF(InOrderCachePort,
476 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
477 tid, inst->seqNum);
478 TheISA::handleLockedRead(cpu, cache_pkt->req);
479 }
480
481 // @TODO: Hardcoded to for load instructions. Assumes that
482 // the dest. idx 0 is always where the data is loaded to.
483 DPRINTF(InOrderCachePort,
484 "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
485 tid, inst->seqNum, inst->readIntResult(0));
486 DPRINTF(InOrderCachePort,
487 "[tid:%u]: [sn:%i]: FP Data loaded was: %08p\n",
488 tid, inst->seqNum, inst->readFloatResult(0));
489 } else if(inst->isStore()) {
490 assert(cache_pkt->isWrite());
491
492 DPRINTF(InOrderCachePort,
493 "[tid:%u]: [sn:%i]: Data stored was: %08p\n",
494 tid, inst->seqNum,
495 getMemData(cache_pkt));
496
497 }
498 }
499
500 cache_req->setMemAccPending(false);
501 cache_req->setMemAccCompleted();
502
503 // Wake up the CPU (if it went to sleep and was waiting on this
504 // completion event).
505 cpu->wakeCPU();
506
507 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
508 tid, cpu->pipelineStage[stage_num]->name());
509
510 cpu->switchToActive(stage_num);
511 } else {
512 DPRINTF(InOrderCachePort,
513 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
514 tid, cache_req->inst->readPC());
515 cache_req->setMemAccCompleted();
516 }
517
518 inst->unsetMemAddr();
519 }
520
521 void
522 CacheUnit::recvRetry()
523 {
524 DPRINTF(InOrderCachePort, "Retrying Request for [tid:%i] [sn:%i]\n",
525 retryReq->inst->readTid(), retryReq->inst->seqNum);
526
527 assert(retryPkt != NULL);
528 assert(cacheBlocked);
529 assert(cacheStatus == cacheWaitRetry);
530
531 if (cachePort->sendTiming(retryPkt)) {
532 cacheStatus = cacheWaitResponse;
533 retryPkt = NULL;
534 cacheBlocked = false;
535 } else {
536 DPRINTF(InOrderCachePort,
537 "Retry Request for [tid:%i] [sn:%i] failed\n",
538 retryReq->inst->readTid(), retryReq->inst->seqNum);
539 }
540 }
541
542 void
543 CacheUnit::squash(DynInstPtr inst, int stage_num,
544 InstSeqNum squash_seq_num, unsigned tid)
545 {
546 vector<int> slot_remove_list;
547
548 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
549 map<int, ResReqPtr>::iterator map_end = reqMap.end();
550
551 while (map_it != map_end) {
552 ResReqPtr req_ptr = (*map_it).second;
553
554 if (req_ptr &&
555 req_ptr->getInst()->readTid() == tid &&
556 req_ptr->getInst()->seqNum > squash_seq_num) {
557
558 DPRINTF(InOrderCachePort,
559 "[tid:%i] Squashing request from [sn:%i]\n",
560 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
561
562 req_ptr->setSquashed();
563
564 req_ptr->getInst()->setSquashed();
565
566 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
567 assert(cache_req);
568
569 if (!cache_req->isMemAccPending()) {
570 // Mark request for later removal
571 cpu->reqRemoveList.push(req_ptr);
572
573 // Mark slot for removal from resource
574 slot_remove_list.push_back(req_ptr->getSlot());
575 }
576 }
577
578 map_it++;
579 }
580
581 // Now Delete Slot Entry from Req. Map
582 for (int i = 0; i < slot_remove_list.size(); i++)
583 freeSlot(slot_remove_list[i]);
584 }
585
586 uint64_t
587 CacheUnit::getMemData(Packet *packet)
588 {
589 switch (packet->getSize())
590 {
591 case 8:
592 return packet->get<uint8_t>();
593
594 case 16:
595 return packet->get<uint16_t>();
596
597 case 32:
598 return packet->get<uint32_t>();
599
600 case 64:
601 return packet->get<uint64_t>();
602
603 default:
604 panic("bad store data size = %d\n", packet->getSize());
605 }
606 }
607