inorder-float: Fix storage of FP results
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34 #include "arch/isa_traits.hh"
35 #include "arch/locked_mem.hh"
36 #include "arch/utility.hh"
37 #include "arch/predecoder.hh"
38 #include "cpu/inorder/resources/cache_unit.hh"
39 #include "cpu/inorder/pipeline_traits.hh"
40 #include "cpu/inorder/cpu.hh"
41 #include "mem/request.hh"
42
43 using namespace std;
44 using namespace TheISA;
45 using namespace ThePipeline;
46
47 Tick
48 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
49 {
50 panic("DefaultFetch doesn't expect recvAtomic callback!");
51 return curTick;
52 }
53
54 void
55 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
56 {
57 panic("DefaultFetch doesn't expect recvFunctional callback!");
58 }
59
60 void
61 CacheUnit::CachePort::recvStatusChange(Status status)
62 {
63 if (status == RangeChange)
64 return;
65
66 panic("DefaultFetch doesn't expect recvStatusChange callback!");
67 }
68
69 bool
70 CacheUnit::CachePort::recvTiming(Packet *pkt)
71 {
72 cachePortUnit->processCacheCompletion(pkt);
73 return true;
74 }
75
76 void
77 CacheUnit::CachePort::recvRetry()
78 {
79 cachePortUnit->recvRetry();
80 }
81
82 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
83 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
84 : Resource(res_name, res_id, res_width, res_latency, _cpu),
85 retryPkt(NULL), retrySlot(-1), cacheBlocked(false),
86 predecoder(NULL)
87 {
88 cachePort = new CachePort(this);
89 }
90
91 Port *
92 CacheUnit::getPort(const string &if_name, int idx)
93 {
94 if (if_name == resName)
95 return cachePort;
96 else
97 return NULL;
98 }
99
100 int
101 CacheUnit::getSlot(DynInstPtr inst)
102 {
103 if (!inst->validMemAddr()) {
104 panic("Mem. Addr. must be set before requesting cache access\n");
105 }
106
107 Addr req_addr = inst->getMemAddr();
108
109 if (resName == "icache_port" ||
110 find(addrList.begin(), addrList.end(), req_addr) == addrList.end()) {
111
112 int new_slot = Resource::getSlot(inst);
113
114 if (new_slot == -1)
115 return -1;
116
117 inst->memTime = curTick;
118 addrList.push_back(req_addr);
119 addrMap[req_addr] = inst->seqNum;
120 DPRINTF(InOrderCachePort,
121 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
122 inst->readTid(), inst->seqNum, req_addr);
123 return new_slot;
124 } else {
125 DPRINTF(InOrderCachePort,
126 "Denying request because there is an outstanding"
127 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
128 req_addr, addrMap[req_addr], inst->memTime);
129 return -1;
130 }
131 }
132
133 void
134 CacheUnit::freeSlot(int slot_num)
135 {
136 vector<Addr>::iterator vect_it = find(addrList.begin(), addrList.end(),
137 reqMap[slot_num]->inst->getMemAddr());
138 assert(vect_it != addrList.end());
139
140 DPRINTF(InOrderCachePort,
141 "[tid:%i]: Address %08p removed from dependency list\n",
142 reqMap[slot_num]->inst->readTid(), (*vect_it));
143
144 addrList.erase(vect_it);
145
146 Resource::freeSlot(slot_num);
147 }
148
149 ResReqPtr
150 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
151 int slot_num, unsigned cmd)
152 {
153 ScheduleEntry* sched_entry = inst->resSched.top();
154
155 if (!inst->validMemAddr()) {
156 panic("Mem. Addr. must be set before requesting cache access\n");
157 }
158
159 int req_size = 0;
160 MemCmd::Command pkt_cmd;
161
162 if (sched_entry->cmd == InitiateReadData) {
163 pkt_cmd = MemCmd::ReadReq;
164 req_size = inst->getMemAccSize();
165
166 DPRINTF(InOrderCachePort,
167 "[tid:%i]: %i byte Read request from [sn:%i] for addr %08p\n",
168 inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
169 } else if (sched_entry->cmd == InitiateWriteData) {
170 pkt_cmd = MemCmd::WriteReq;
171 req_size = inst->getMemAccSize();
172
173 DPRINTF(InOrderCachePort,
174 "[tid:%i]: %i byte Write request from [sn:%i] for addr %08p\n",
175 inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
176 } else if (sched_entry->cmd == InitiateFetch){
177 pkt_cmd = MemCmd::ReadReq;
178 req_size = sizeof(MachInst); //@TODO: mips16e
179
180 DPRINTF(InOrderCachePort,
181 "[tid:%i]: %i byte Fetch request from [sn:%i] for addr %08p\n",
182 inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
183 } else {
184 panic("%i: Unexpected request type (%i) to %s", curTick,
185 sched_entry->cmd, name());
186 }
187
188 return new CacheRequest(this, inst, stage_num, id, slot_num,
189 sched_entry->cmd, req_size, pkt_cmd,
190 0/*flags*/, this->cpu->readCpuId());
191 }
192
193 void
194 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
195 {
196 //service_request = false;
197
198 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
199 assert(cache_req);
200
201 // Check to see if this instruction is requesting the same command
202 // or a different one
203 if (cache_req->cmd != inst->resSched.top()->cmd) {
204 // If different, then update command in the request
205 cache_req->cmd = inst->resSched.top()->cmd;
206 DPRINTF(InOrderCachePort,
207 "[tid:%i]: [sn:%i]: the command for this instruction\n",
208 inst->readTid(), inst->seqNum);
209
210 service_request = true;
211 } else {
212 // If same command, just check to see if memory access was completed
213 // but dont try to re-execute
214 DPRINTF(InOrderCachePort,
215 "[tid:%i]: [sn:%i]: requesting this resource again\n",
216 inst->readTid(), inst->seqNum);
217
218 service_request = true;
219 }
220 }
221
222 void
223 CacheUnit::execute(int slot_num)
224 {
225 if (cacheBlocked) {
226 DPRINTF(InOrderCachePort, "Cache Blocked. Cannot Access\n");
227 return;
228 }
229
230 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
231 assert(cache_req);
232
233 DynInstPtr inst = cache_req->inst;
234 int tid;
235 int seq_num;
236
237 tid = inst->readTid();
238 seq_num = inst->seqNum;
239 cache_req->fault = NoFault;
240
241 switch (cache_req->cmd)
242 {
243 case InitiateFetch:
244 DPRINTF(InOrderCachePort,
245 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
246 tid, name(), cache_req->inst->getMemAddr());
247
248 DPRINTF(InOrderCachePort,
249 "[tid:%u]: Fetching new cache block from addr: %08p\n",
250 tid, cache_req->memReq->getVaddr());
251
252 inst->setCurResSlot(slot_num);
253 doDataAccess(inst);
254 break;
255
256 case CompleteFetch:
257 // @TODO: MOVE Functionality of handling fetched data into 'fetch unit'
258 // let cache-unit just be responsible for transferring data.
259 if (cache_req->isMemAccComplete()) {
260 DPRINTF(InOrderCachePort,
261 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
262 tid, inst->seqNum);
263
264
265 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
266 tid, seq_num, inst->staticInst->disassemble(inst->PC));
267
268 delete cache_req->dataPkt;
269 cache_req->done();
270 } else {
271 DPRINTF(InOrderCachePort,
272 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
273 tid, inst->seqNum);
274 DPRINTF(InOrderStall,
275 "STALL: [tid:%i]: Fetch miss from %08p\n",
276 tid, cache_req->inst->readPC());
277 cache_req->setCompleted(false);
278 }
279 break;
280
281 case InitiateReadData:
282 case InitiateWriteData:
283 DPRINTF(InOrderCachePort,
284 "[tid:%u]: Initiating data access to %s for addr. %08p\n",
285 tid, name(), cache_req->inst->getMemAddr());
286
287 inst->setCurResSlot(slot_num);
288
289 if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
290 inst->execute();
291 } else {
292 inst->initiateAcc();
293 }
294
295 break;
296
297 case CompleteReadData:
298 case CompleteWriteData:
299 DPRINTF(InOrderCachePort,
300 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
301 tid, inst->seqNum);
302
303 if (cache_req->isMemAccComplete() ||
304 inst->isDataPrefetch() ||
305 inst->isInstPrefetch()) {
306 cache_req->done();
307 } else {
308 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
309 tid, cache_req->inst->getMemAddr());
310 cache_req->setCompleted(false);
311 }
312 break;
313
314 default:
315 fatal("Unrecognized command to %s", resName);
316 }
317 }
318
319 void
320 CacheUnit::prefetch(DynInstPtr inst)
321 {
322 warn_once("Prefetching currently unimplemented");
323
324 CacheReqPtr cache_req
325 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
326 assert(cache_req);
327
328 // Clean-Up cache resource request so
329 // other memory insts. can use them
330 cache_req->setCompleted();
331 cacheStatus = cacheAccessComplete;
332 cacheBlocked = false;
333 cache_req->setMemAccPending(false);
334 cache_req->setMemAccCompleted();
335 inst->unsetMemAddr();
336 }
337
338
339 void
340 CacheUnit::writeHint(DynInstPtr inst)
341 {
342 warn_once("Write Hints currently unimplemented");
343
344 CacheReqPtr cache_req
345 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
346 assert(cache_req);
347
348 // Clean-Up cache resource request so
349 // other memory insts. can use them
350 cache_req->setCompleted();
351 cacheStatus = cacheAccessComplete;
352 cacheBlocked = false;
353 cache_req->setMemAccPending(false);
354 cache_req->setMemAccCompleted();
355 inst->unsetMemAddr();
356 }
357
358 Fault
359 CacheUnit::doDataAccess(DynInstPtr inst)
360 {
361 Fault fault = NoFault;
362 int tid = 0;
363
364 tid = inst->readTid();
365
366 CacheReqPtr cache_req
367 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
368 assert(cache_req);
369
370 cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
371 Packet::Broadcast);
372
373 if (cache_req->dataPkt->isRead()) {
374 cache_req->dataPkt->dataStatic(cache_req->reqData);
375 } else if (cache_req->dataPkt->isWrite()) {
376 cache_req->dataPkt->dataStatic(&cache_req->inst->storeData);
377 }
378
379 cache_req->dataPkt->time = curTick;
380
381 bool do_access = true; // flag to suppress cache access
382
383 Request *memReq = cache_req->dataPkt->req;
384
385 if (cache_req->dataPkt->isWrite() && memReq->isLLSC()) {
386 assert(cache_req->inst->isStoreConditional());
387 DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
388 do_access = TheISA::handleLockedWrite(cpu, memReq);
389 }
390
391 DPRINTF(InOrderCachePort,
392 "[tid:%i] [sn:%i] attempting to access cache\n",
393 tid, inst->seqNum);
394
395 //@TODO: If you want to ignore failed store conditional accesses, then
396 // enable this. However, this might skew memory stats because
397 // the failed store conditional access will get ignored.
398 // - Remove optionality here ...
399 if (1/*do_access*/) {
400 if (!cachePort->sendTiming(cache_req->dataPkt)) {
401 DPRINTF(InOrderCachePort,
402 "[tid:%i] [sn:%i] is waiting to retry request\n",
403 tid, inst->seqNum);
404
405 retrySlot = cache_req->getSlot();
406 retryReq = cache_req;
407 retryPkt = cache_req->dataPkt;
408
409 cacheStatus = cacheWaitRetry;
410
411 //cacheBlocked = true;
412
413 DPRINTF(InOrderStall, "STALL: \n");
414
415 cache_req->setCompleted(false);
416 } else {
417 DPRINTF(InOrderCachePort,
418 "[tid:%i] [sn:%i] is now waiting for cache response\n",
419 tid, inst->seqNum);
420 cache_req->setCompleted();
421 cache_req->setMemAccPending();
422 cacheStatus = cacheWaitResponse;
423 cacheBlocked = false;
424 }
425 } else if (!do_access && memReq->isLLSC()){
426 // Store-Conditional instructions complete even if they "failed"
427 assert(cache_req->inst->isStoreConditional());
428 cache_req->setCompleted(true);
429
430 DPRINTF(LLSC,
431 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
432 tid, tid);
433
434 cache_req->dataPkt->req->setExtraData(0);
435
436 processCacheCompletion(cache_req->dataPkt);
437
438 // Automatically set these since we ignored the memory access
439 //cache_req->setMemAccPending(false);
440 //cache_req->setMemAccCompleted();
441 } else {
442 // Make cache request again since access due to
443 // inability to access
444 DPRINTF(InOrderStall, "STALL: \n");
445 cache_req->setCompleted(false);
446 }
447
448 return fault;
449 }
450
451 void
452 CacheUnit::processCacheCompletion(PacketPtr pkt)
453 {
454 // Cast to correct packet type
455 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
456 assert(cache_pkt);
457
458 if (cache_pkt->cacheReq->isSquashed()) {
459 DPRINTF(InOrderCachePort,
460 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
461 cache_pkt->cacheReq->getInst()->readTid(),
462 cache_pkt->cacheReq->getInst()->seqNum);
463
464 cache_pkt->cacheReq->done();
465 delete cache_pkt;
466 return;
467 }
468
469 DPRINTF(InOrderCachePort,
470 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
471 cache_pkt->cacheReq->getInst()->readTid(),
472 cache_pkt->cacheReq->getInst()->seqNum,
473 cache_pkt->cacheReq->getInst()->getMemAddr());
474
475 // Cast to correct request type
476 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
477 findRequest(cache_pkt->cacheReq->getInst()));
478 assert(cache_req);
479
480
481 // Get resource request info
482 unsigned stage_num = cache_req->getStageNum();
483 DynInstPtr inst = cache_req->inst;
484 unsigned tid;
485
486
487 tid = cache_req->inst->readTid();
488
489 if (!cache_req->isSquashed()) {
490 if (inst->resSched.top()->cmd == CompleteFetch) {
491 DPRINTF(InOrderCachePort,
492 "[tid:%u]: [sn:%i]: Processing fetch access\n",
493 tid, inst->seqNum);
494
495 // NOTE: This is only allowing a thread to fetch one line
496 // at a time. Re-examine when/if prefetching
497 // gets implemented.
498 //memcpy(fetchData[tid], cache_pkt->getPtr<uint8_t>(),
499 // cache_pkt->getSize());
500
501 // Get the instruction from the array of the cache line.
502 // @todo: update thsi
503 ExtMachInst ext_inst;
504 StaticInstPtr staticInst = NULL;
505 Addr inst_pc = inst->readPC();
506 MachInst mach_inst = TheISA::gtoh(*reinterpret_cast<TheISA::MachInst *>
507 (cache_pkt->getPtr<uint8_t>()));
508
509 predecoder.setTC(cpu->thread[tid]->getTC());
510 predecoder.moreBytes(inst_pc, inst_pc, mach_inst);
511 ext_inst = predecoder.getExtMachInst();
512
513 inst->setMachInst(ext_inst);
514
515 // Set Up More TraceData info
516 if (inst->traceData) {
517 inst->traceData->setStaticInst(inst->staticInst);
518 inst->traceData->setPC(inst->readPC());
519 }
520
521 } else if (inst->staticInst && inst->isMemRef()) {
522 DPRINTF(InOrderCachePort,
523 "[tid:%u]: [sn:%i]: Processing cache access\n",
524 tid, inst->seqNum);
525
526 inst->completeAcc(pkt);
527
528 if (inst->isLoad()) {
529 assert(cache_pkt->isRead());
530
531 if (cache_pkt->req->isLLSC()) {
532 DPRINTF(InOrderCachePort,
533 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
534 tid, inst->seqNum);
535 TheISA::handleLockedRead(cpu, cache_pkt->req);
536 }
537
538 // @TODO: Hardcoded to for load instructions. Assumes that
539 // the dest. idx 0 is always where the data is loaded to.
540 DPRINTF(InOrderCachePort,
541 "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
542 tid, inst->seqNum, inst->readIntResult(0));
543 DPRINTF(InOrderCachePort,
544 "[tid:%u]: [sn:%i]: FP Data loaded was: %08p\n",
545 tid, inst->seqNum, inst->readFloatResult(0));
546 } else if(inst->isStore()) {
547 assert(cache_pkt->isWrite());
548
549 DPRINTF(InOrderCachePort,
550 "[tid:%u]: [sn:%i]: Data stored was: FIX ME\n",
551 tid, inst->seqNum/*,
552 getMemData(cache_pkt)*/);
553 }
554
555 delete cache_pkt;
556 }
557
558 cache_req->setMemAccPending(false);
559 cache_req->setMemAccCompleted();
560
561 // Wake up the CPU (if it went to sleep and was waiting on this
562 // completion event).
563 cpu->wakeCPU();
564
565 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
566 tid, cpu->pipelineStage[stage_num]->name());
567
568 cpu->switchToActive(stage_num);
569 } else {
570 DPRINTF(InOrderCachePort,
571 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
572 tid, cache_req->inst->readPC());
573 cache_req->setMemAccCompleted();
574 }
575
576 inst->unsetMemAddr();
577 }
578
579 void
580 CacheUnit::recvRetry()
581 {
582 DPRINTF(InOrderCachePort, "Retrying Request for [tid:%i] [sn:%i]\n",
583 retryReq->inst->readTid(), retryReq->inst->seqNum);
584
585 assert(retryPkt != NULL);
586 assert(cacheBlocked);
587 assert(cacheStatus == cacheWaitRetry);
588
589 if (cachePort->sendTiming(retryPkt)) {
590 cacheStatus = cacheWaitResponse;
591 retryPkt = NULL;
592 cacheBlocked = false;
593 } else {
594 DPRINTF(InOrderCachePort,
595 "Retry Request for [tid:%i] [sn:%i] failed\n",
596 retryReq->inst->readTid(), retryReq->inst->seqNum);
597 }
598 }
599
600 void
601 CacheUnit::squash(DynInstPtr inst, int stage_num,
602 InstSeqNum squash_seq_num, unsigned tid)
603 {
604 vector<int> slot_remove_list;
605
606 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
607 map<int, ResReqPtr>::iterator map_end = reqMap.end();
608
609 while (map_it != map_end) {
610 ResReqPtr req_ptr = (*map_it).second;
611
612 if (req_ptr &&
613 req_ptr->getInst()->readTid() == tid &&
614 req_ptr->getInst()->seqNum > squash_seq_num) {
615
616 DPRINTF(InOrderCachePort,
617 "[tid:%i] Squashing request from [sn:%i]\n",
618 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
619
620 req_ptr->setSquashed();
621
622 req_ptr->getInst()->setSquashed();
623
624 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
625 assert(cache_req);
626
627 if (!cache_req->isMemAccPending()) {
628 // Mark request for later removal
629 cpu->reqRemoveList.push(req_ptr);
630
631 // Mark slot for removal from resource
632 slot_remove_list.push_back(req_ptr->getSlot());
633 }
634 }
635
636 map_it++;
637 }
638
639 // Now Delete Slot Entry from Req. Map
640 for (int i = 0; i < slot_remove_list.size(); i++)
641 freeSlot(slot_remove_list[i]);
642 }
643
644 uint64_t
645 CacheUnit::getMemData(Packet *packet)
646 {
647 switch (packet->getSize())
648 {
649 case 8:
650 return packet->get<uint8_t>();
651
652 case 16:
653 return packet->get<uint16_t>();
654
655 case 32:
656 return packet->get<uint32_t>();
657
658 case 64:
659 return packet->get<uint64_t>();
660
661 default:
662 panic("bad store data size = %d\n", packet->getSize());
663 }
664 }
665