inorder: fix address list bug
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/pipeline_traits.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "mem/request.hh"
45
46 using namespace std;
47 using namespace TheISA;
48 using namespace ThePipeline;
49
50 Tick
51 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
52 {
53 panic("CacheUnit::CachePort doesn't expect recvAtomic callback!");
54 return curTick;
55 }
56
57 void
58 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
59 {
60 panic("CacheUnit::CachePort doesn't expect recvFunctional callback!");
61 }
62
63 void
64 CacheUnit::CachePort::recvStatusChange(Status status)
65 {
66 if (status == RangeChange)
67 return;
68
69 panic("CacheUnit::CachePort doesn't expect recvStatusChange callback!");
70 }
71
72 bool
73 CacheUnit::CachePort::recvTiming(Packet *pkt)
74 {
75 cachePortUnit->processCacheCompletion(pkt);
76 return true;
77 }
78
79 void
80 CacheUnit::CachePort::recvRetry()
81 {
82 cachePortUnit->recvRetry();
83 }
84
85 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
86 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
87 : Resource(res_name, res_id, res_width, res_latency, _cpu),
88 cachePortBlocked(false), predecoder(NULL)
89 {
90 cachePort = new CachePort(this);
91
92 // Hard-Code Selection For Now
93 if (res_name == "icache_port")
94 _tlb = params->itb;
95 else if (res_name == "dcache_port")
96 _tlb = params->dtb;
97 else
98 fatal("Unrecognized TLB name passed by user");
99
100 for (int i=0; i < MaxThreads; i++) {
101 tlbBlocked[i] = false;
102 }
103 }
104
105 TheISA::TLB*
106 CacheUnit::tlb()
107 {
108 return _tlb;
109
110 }
111
112 Port *
113 CacheUnit::getPort(const string &if_name, int idx)
114 {
115 if (if_name == resName)
116 return cachePort;
117 else
118 return NULL;
119 }
120
121 void
122 CacheUnit::init()
123 {
124 // Currently Used to Model TLB Latency. Eventually
125 // Switch to Timing TLB translations.
126 resourceEvent = new CacheUnitEvent[width];
127
128 initSlots();
129 }
130
131 int
132 CacheUnit::getSlot(DynInstPtr inst)
133 {
134 ThreadID tid = inst->readTid();
135
136 if (tlbBlocked[inst->threadNumber]) {
137 return -1;
138 }
139
140 // For a Split-Load, the instruction would have processed once already
141 // causing the address to be unset.
142 if (!inst->validMemAddr() && !inst->splitInst) {
143 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting cache access\n",
144 inst->readTid(), inst->seqNum);
145 }
146
147 Addr req_addr = inst->getMemAddr();
148
149 if (resName == "icache_port" ||
150 find(addrList[tid].begin(), addrList[tid].end(), req_addr) ==
151 addrList[tid].end()) {
152
153 int new_slot = Resource::getSlot(inst);
154
155 if (new_slot == -1)
156 return -1;
157
158 inst->memTime = curTick;
159 setAddrDependency(inst);
160 return new_slot;
161 } else {
162 // Allow same instruction multiple accesses to same address
163 // should only happen maybe after a squashed inst. needs to replay
164 if (addrMap[tid][req_addr] == inst->seqNum) {
165 int new_slot = Resource::getSlot(inst);
166
167 if (new_slot == -1)
168 return -1;
169
170 return new_slot;
171 } else {
172 DPRINTF(InOrderCachePort,
173 "[tid:%i] Denying request because there is an outstanding"
174 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
175 inst->readTid(), req_addr, addrMap[tid][req_addr], inst->memTime);
176 return -1;
177 }
178 }
179
180 return -1;
181 }
182
183 void
184 CacheUnit::setAddrDependency(DynInstPtr inst)
185 {
186 Addr req_addr = inst->getMemAddr();
187 ThreadID tid = inst->readTid();
188
189 addrList[tid].push_back(req_addr);
190 addrMap[tid][req_addr] = inst->seqNum;
191
192 DPRINTF(AddrDep,
193 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
194 inst->readTid(), inst->seqNum, req_addr);
195
196 //@NOTE: 10 is an arbitrarily "high" number here, but to be exact
197 // we would need to know the # of outstanding accesses
198 // a priori. Information like fetch width, stage width,
199 // and the branch resolution stage would be useful for the
200 // icache_port (among other things). For the dcache, the #
201 // of outstanding cache accesses might be sufficient.
202 assert(addrList[tid].size() < 10);
203 }
204
205 void
206 CacheUnit::removeAddrDependency(DynInstPtr inst)
207 {
208 ThreadID tid = inst->readTid();
209
210 Addr mem_addr = inst->getMemAddr();
211
212 inst->unsetMemAddr();
213
214 // Erase from Address List
215 vector<Addr>::iterator vect_it = find(addrList[tid].begin(), addrList[tid].end(),
216 mem_addr);
217 assert(vect_it != addrList[tid].end() || inst->splitInst);
218
219 if (vect_it != addrList[tid].end()) {
220 DPRINTF(AddrDep,
221 "[tid:%i]: [sn:%i] Address %08p removed from dependency list\n",
222 inst->readTid(), inst->seqNum, (*vect_it));
223
224 addrList[tid].erase(vect_it);
225
226 // Erase From Address Map (Used for Debugging)
227 addrMap[tid].erase(addrMap[tid].find(mem_addr));
228 }
229
230
231 }
232
233 ResReqPtr
234 CacheUnit::findRequest(DynInstPtr inst)
235 {
236 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
237 map<int, ResReqPtr>::iterator map_end = reqMap.end();
238
239 while (map_it != map_end) {
240 CacheRequest* cache_req = dynamic_cast<CacheRequest*>((*map_it).second);
241 assert(cache_req);
242
243 if (cache_req &&
244 cache_req->getInst() == inst &&
245 cache_req->instIdx == inst->resSched.top()->idx) {
246 return cache_req;
247 }
248 map_it++;
249 }
250
251 return NULL;
252 }
253
254 ResReqPtr
255 CacheUnit::findSplitRequest(DynInstPtr inst, int idx)
256 {
257 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
258 map<int, ResReqPtr>::iterator map_end = reqMap.end();
259
260 while (map_it != map_end) {
261 CacheRequest* cache_req = dynamic_cast<CacheRequest*>((*map_it).second);
262 assert(cache_req);
263
264 if (cache_req &&
265 cache_req->getInst() == inst &&
266 cache_req->instIdx == idx) {
267 return cache_req;
268 }
269 map_it++;
270 }
271
272 return NULL;
273 }
274
275
276 ResReqPtr
277 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
278 int slot_num, unsigned cmd)
279 {
280 ScheduleEntry* sched_entry = inst->resSched.top();
281
282 if (!inst->validMemAddr()) {
283 panic("Mem. Addr. must be set before requesting cache access\n");
284 }
285
286 MemCmd::Command pkt_cmd;
287
288 switch (sched_entry->cmd)
289 {
290 case InitSecondSplitRead:
291 pkt_cmd = MemCmd::ReadReq;
292
293 DPRINTF(InOrderCachePort,
294 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
295 inst->readTid(), inst->seqNum, inst->split2ndAddr);
296 break;
297
298 case InitiateReadData:
299 pkt_cmd = MemCmd::ReadReq;
300
301 DPRINTF(InOrderCachePort,
302 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
303 inst->readTid(), inst->seqNum, inst->getMemAddr());
304 break;
305
306 case InitSecondSplitWrite:
307 pkt_cmd = MemCmd::WriteReq;
308
309 DPRINTF(InOrderCachePort,
310 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
311 inst->readTid(), inst->seqNum, inst->split2ndAddr);
312 break;
313
314 case InitiateWriteData:
315 pkt_cmd = MemCmd::WriteReq;
316
317 DPRINTF(InOrderCachePort,
318 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
319 inst->readTid(), inst->seqNum, inst->getMemAddr());
320 break;
321
322 case InitiateFetch:
323 pkt_cmd = MemCmd::ReadReq;
324
325 DPRINTF(InOrderCachePort,
326 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
327 inst->readTid(), inst->seqNum, inst->getMemAddr());
328 break;
329
330 default:
331 panic("%i: Unexpected request type (%i) to %s", curTick,
332 sched_entry->cmd, name());
333 }
334
335 return new CacheRequest(this, inst, stage_num, id, slot_num,
336 sched_entry->cmd, 0, pkt_cmd,
337 0/*flags*/, this->cpu->readCpuId(),
338 inst->resSched.top()->idx);
339 }
340
341 void
342 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
343 {
344 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
345 assert(cache_req);
346
347 // Check to see if this instruction is requesting the same command
348 // or a different one
349 if (cache_req->cmd != inst->resSched.top()->cmd &&
350 cache_req->instIdx == inst->resSched.top()->idx) {
351 // If different, then update command in the request
352 cache_req->cmd = inst->resSched.top()->cmd;
353 DPRINTF(InOrderCachePort,
354 "[tid:%i]: [sn:%i]: Updating the command for this "
355 "instruction\n ", inst->readTid(), inst->seqNum);
356
357 service_request = true;
358 } else if (inst->resSched.top()->idx != CacheUnit::InitSecondSplitRead &&
359 inst->resSched.top()->idx != CacheUnit::InitSecondSplitWrite) {
360 // If same command, just check to see if memory access was completed
361 // but dont try to re-execute
362 DPRINTF(InOrderCachePort,
363 "[tid:%i]: [sn:%i]: requesting this resource again\n",
364 inst->readTid(), inst->seqNum);
365
366 service_request = true;
367 }
368 }
369
370 Fault
371 CacheUnit::doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
372 int flags, TheISA::TLB::Mode tlb_mode)
373 {
374 ThreadID tid = inst->readTid();
375 Addr aligned_addr = inst->getMemAddr();
376 unsigned stage_num = cache_req->getStageNum();
377 unsigned slot_idx = cache_req->getSlot();
378
379 if (tlb_mode == TheISA::TLB::Execute) {
380 inst->fetchMemReq = new Request(inst->readTid(), aligned_addr,
381 acc_size, flags, inst->readPC(),
382 cpu->readCpuId(), inst->readTid());
383 cache_req->memReq = inst->fetchMemReq;
384 } else {
385 if (!cache_req->is2ndSplit()) {
386 inst->dataMemReq = new Request(cpu->asid[tid], aligned_addr,
387 acc_size, flags, inst->readPC(),
388 cpu->readCpuId(), inst->readTid());
389 cache_req->memReq = inst->dataMemReq;
390 } else {
391 assert(inst->splitInst);
392
393 inst->splitMemReq = new Request(cpu->asid[tid],
394 inst->split2ndAddr,
395 acc_size,
396 flags,
397 inst->readPC(),
398 cpu->readCpuId(),
399 tid);
400 cache_req->memReq = inst->splitMemReq;
401 }
402 }
403
404
405 cache_req->fault =
406 _tlb->translateAtomic(cache_req->memReq,
407 cpu->thread[tid]->getTC(), tlb_mode);
408
409 if (cache_req->fault != NoFault) {
410 DPRINTF(InOrderTLB, "[tid:%i]: %s encountered while translating "
411 "addr:%08p for [sn:%i].\n", tid, cache_req->fault->name(),
412 cache_req->memReq->getVaddr(), inst->seqNum);
413
414 cpu->pipelineStage[stage_num]->setResStall(cache_req, tid);
415
416 tlbBlocked[tid] = true;
417
418 cache_req->tlbStall = true;
419
420 scheduleEvent(slot_idx, 1);
421
422 cpu->trap(cache_req->fault, tid);
423 } else {
424 DPRINTF(InOrderTLB, "[tid:%i]: [sn:%i] virt. addr %08p translated "
425 "to phys. addr:%08p.\n", tid, inst->seqNum,
426 cache_req->memReq->getVaddr(),
427 cache_req->memReq->getPaddr());
428 }
429
430 return cache_req->fault;
431 }
432
433 template <class T>
434 Fault
435 CacheUnit::read(DynInstPtr inst, Addr addr, T &data, unsigned flags)
436 {
437 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
438 assert(cache_req && "Can't Find Instruction for Read!");
439
440 // The block size of our peer
441 unsigned blockSize = this->cachePort->peerBlockSize();
442
443 //The size of the data we're trying to read.
444 int dataSize = sizeof(T);
445
446 if (inst->split2ndAccess) {
447 dataSize = inst->split2ndSize;
448 cache_req->splitAccess = true;
449 cache_req->split2ndAccess = true;
450
451 DPRINTF(InOrderCachePort, "[sn:%i] Split Read Access (2 of 2) for (%#x, %#x).\n", inst->seqNum,
452 inst->getMemAddr(), inst->split2ndAddr);
453 }
454
455
456 //The address of the second part of this access if it needs to be split
457 //across a cache line boundary.
458 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
459
460
461 if (secondAddr > addr && !inst->split2ndAccess) {
462 DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for (%#x, %#x).\n", curTick, inst->seqNum,
463 addr, secondAddr);
464
465 // Save All "Total" Split Information
466 // ==============================
467 inst->splitInst = true;
468 inst->splitMemData = new uint8_t[dataSize];
469 inst->splitTotalSize = dataSize;
470
471 if (!inst->splitInstSked) {
472 // Schedule Split Read/Complete for Instruction
473 // ==============================
474 int stage_num = cache_req->getStageNum();
475
476 int stage_pri = ThePipeline::getNextPriority(inst, stage_num);
477
478 inst->resSched.push(new ScheduleEntry(stage_num,
479 stage_pri,
480 cpu->resPool->getResIdx(DCache),
481 CacheUnit::InitSecondSplitRead,
482 1)
483 );
484
485 inst->resSched.push(new ScheduleEntry(stage_num + 1,
486 1/*stage_pri*/,
487 cpu->resPool->getResIdx(DCache),
488 CacheUnit::CompleteSecondSplitRead,
489 1)
490 );
491 inst->splitInstSked = true;
492 } else {
493 DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] Retrying Split Read Access (1 of 2) for (%#x, %#x).\n",
494 inst->readTid(), inst->seqNum, addr, secondAddr);
495 }
496
497 // Split Information for First Access
498 // ==============================
499 dataSize = secondAddr - addr;
500 cache_req->splitAccess = true;
501
502 // Split Information for Second Access
503 // ==============================
504 inst->split2ndSize = addr + sizeof(T) - secondAddr;
505 inst->split2ndAddr = secondAddr;
506 inst->split2ndDataPtr = inst->splitMemData + dataSize;
507 inst->split2ndFlags = flags;
508 }
509
510 doTLBAccess(inst, cache_req, dataSize, flags, TheISA::TLB::Read);
511
512 if (cache_req->fault == NoFault) {
513 if (!cache_req->splitAccess) {
514 cache_req->reqData = new uint8_t[dataSize];
515 doCacheAccess(inst, NULL);
516 } else {
517 if (!inst->split2ndAccess) {
518 cache_req->reqData = inst->splitMemData;
519 } else {
520 cache_req->reqData = inst->split2ndDataPtr;
521 }
522
523 doCacheAccess(inst, NULL, cache_req);
524 }
525 }
526
527 return cache_req->fault;
528 }
529
530 template <class T>
531 Fault
532 CacheUnit::write(DynInstPtr inst, T data, Addr addr, unsigned flags,
533 uint64_t *write_res)
534 {
535 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
536 assert(cache_req && "Can't Find Instruction for Write!");
537
538 // The block size of our peer
539 unsigned blockSize = this->cachePort->peerBlockSize();
540
541 //The size of the data we're trying to read.
542 int dataSize = sizeof(T);
543
544 if (inst->split2ndAccess) {
545 dataSize = inst->split2ndSize;
546 cache_req->splitAccess = true;
547 cache_req->split2ndAccess = true;
548
549 DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (2 of 2) for (%#x, %#x).\n", inst->seqNum,
550 inst->getMemAddr(), inst->split2ndAddr);
551 }
552
553 //The address of the second part of this access if it needs to be split
554 //across a cache line boundary.
555 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
556
557 if (secondAddr > addr && !inst->split2ndAccess) {
558
559 DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (1 of 2) for (%#x, %#x).\n", inst->seqNum,
560 addr, secondAddr);
561
562 // Save All "Total" Split Information
563 // ==============================
564 inst->splitInst = true;
565 inst->splitTotalSize = dataSize;
566
567 if (!inst->splitInstSked) {
568 // Schedule Split Read/Complete for Instruction
569 // ==============================
570 int stage_num = cache_req->getStageNum();
571
572 int stage_pri = ThePipeline::getNextPriority(inst, stage_num);
573
574 inst->resSched.push(new ScheduleEntry(stage_num,
575 stage_pri,
576 cpu->resPool->getResIdx(DCache),
577 CacheUnit::InitSecondSplitWrite,
578 1)
579 );
580
581 inst->resSched.push(new ScheduleEntry(stage_num + 1,
582 1/*stage_pri*/,
583 cpu->resPool->getResIdx(DCache),
584 CacheUnit::CompleteSecondSplitWrite,
585 1)
586 );
587 inst->splitInstSked = true;
588 } else {
589 DPRINTF(InOrderCachePort, "[tid:%i] sn:%i] Retrying Split Read Access (1 of 2) for (%#x, %#x).\n",
590 inst->readTid(), inst->seqNum, addr, secondAddr);
591 }
592
593
594
595 // Split Information for First Access
596 // ==============================
597 dataSize = secondAddr - addr;
598 cache_req->splitAccess = true;
599
600 // Split Information for Second Access
601 // ==============================
602 inst->split2ndSize = addr + sizeof(T) - secondAddr;
603 inst->split2ndAddr = secondAddr;
604 inst->split2ndStoreDataPtr = &cache_req->inst->storeData;
605 inst->split2ndStoreDataPtr += dataSize;
606 inst->split2ndFlags = flags;
607 inst->splitInstSked = true;
608 }
609
610 doTLBAccess(inst, cache_req, dataSize, flags, TheISA::TLB::Write);
611
612 if (cache_req->fault == NoFault) {
613 if (!cache_req->splitAccess) {
614 // Remove this line since storeData is saved in INST?
615 cache_req->reqData = new uint8_t[dataSize];
616 doCacheAccess(inst, write_res);
617 } else {
618 doCacheAccess(inst, write_res, cache_req);
619 }
620
621 }
622
623 return cache_req->fault;
624 }
625
626
627 void
628 CacheUnit::execute(int slot_num)
629 {
630 if (cachePortBlocked) {
631 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
632 return;
633 }
634
635 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
636 assert(cache_req);
637
638 DynInstPtr inst = cache_req->inst;
639 #if TRACING_ON
640 ThreadID tid = inst->readTid();
641 int seq_num = inst->seqNum;
642 std::string acc_type = "write";
643
644 #endif
645
646 cache_req->fault = NoFault;
647
648 switch (cache_req->cmd)
649 {
650 case InitiateFetch:
651 {
652 //@TODO: Switch to size of full cache block. Store in fetch buffer
653 int acc_size = sizeof(TheISA::MachInst);
654
655 doTLBAccess(inst, cache_req, acc_size, 0, TheISA::TLB::Execute);
656
657 // Only Do Access if no fault from TLB
658 if (cache_req->fault == NoFault) {
659
660 DPRINTF(InOrderCachePort,
661 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
662 tid, name(), cache_req->inst->getMemAddr());
663
664 cache_req->reqData = new uint8_t[acc_size];
665
666 inst->setCurResSlot(slot_num);
667
668 doCacheAccess(inst);
669 }
670
671 break;
672 }
673
674 case InitiateReadData:
675 #if TRACING_ON
676 acc_type = "read";
677 #endif
678 case InitiateWriteData:
679
680 DPRINTF(InOrderCachePort,
681 "[tid:%u]: [sn:%i] Initiating data %s access to %s for addr. %08p\n",
682 tid, inst->seqNum, acc_type, name(), cache_req->inst->getMemAddr());
683
684 inst->setCurResSlot(slot_num);
685
686 if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
687 inst->execute();
688 } else {
689 inst->initiateAcc();
690 }
691
692 break;
693
694 case InitSecondSplitRead:
695 DPRINTF(InOrderCachePort,
696 "[tid:%u]: [sn:%i] Initiating split data read access to %s for addr. %08p\n",
697 tid, inst->seqNum, name(), cache_req->inst->split2ndAddr);
698 inst->split2ndAccess = true;
699 assert(inst->split2ndAddr != 0);
700 read(inst, inst->split2ndAddr, inst->split2ndData, inst->split2ndFlags);
701 break;
702
703 case InitSecondSplitWrite:
704 DPRINTF(InOrderCachePort,
705 "[tid:%u]: [sn:%i] Initiating split data write access to %s for addr. %08p\n",
706 tid, inst->seqNum, name(), cache_req->inst->getMemAddr());
707
708 inst->split2ndAccess = true;
709 assert(inst->split2ndAddr != 0);
710 write(inst, inst->split2ndAddr, inst->split2ndData, inst->split2ndFlags, NULL);
711 break;
712
713
714 case CompleteFetch:
715 if (cache_req->isMemAccComplete()) {
716 DPRINTF(InOrderCachePort,
717 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
718 tid, inst->seqNum);
719
720
721 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
722 tid, seq_num, inst->staticInst->disassemble(inst->PC));
723
724 removeAddrDependency(inst);
725
726 delete cache_req->dataPkt;
727
728 // Do not stall and switch threads for fetch... for now..
729 // TODO: We need to detect cache misses for latencies > 1
730 // cache_req->setMemStall(false);
731
732 cache_req->done();
733 } else {
734 DPRINTF(InOrderCachePort,
735 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
736 tid, inst->seqNum);
737 DPRINTF(InOrderStall,
738 "STALL: [tid:%i]: Fetch miss from %08p\n",
739 tid, cache_req->inst->readPC());
740 cache_req->setCompleted(false);
741 //cache_req->setMemStall(true);
742 }
743 break;
744
745 case CompleteReadData:
746 case CompleteWriteData:
747 DPRINTF(InOrderCachePort,
748 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
749 tid, inst->seqNum);
750
751 if (cache_req->isMemAccComplete() ||
752 inst->isDataPrefetch() ||
753 inst->isInstPrefetch()) {
754 removeAddrDependency(inst);
755 cache_req->setMemStall(false);
756 cache_req->done();
757 } else {
758 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
759 tid, cache_req->inst->getMemAddr());
760 cache_req->setCompleted(false);
761 cache_req->setMemStall(true);
762 }
763 break;
764
765 case CompleteSecondSplitRead:
766 DPRINTF(InOrderCachePort,
767 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Read Access\n",
768 tid, inst->seqNum);
769
770 if (cache_req->isMemAccComplete() ||
771 inst->isDataPrefetch() ||
772 inst->isInstPrefetch()) {
773 removeAddrDependency(inst);
774 cache_req->setMemStall(false);
775 cache_req->done();
776 } else {
777 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
778 tid, cache_req->inst->split2ndAddr);
779 cache_req->setCompleted(false);
780 cache_req->setMemStall(true);
781 }
782 break;
783
784 case CompleteSecondSplitWrite:
785 DPRINTF(InOrderCachePort,
786 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Write Access\n",
787 tid, inst->seqNum);
788
789 if (cache_req->isMemAccComplete() ||
790 inst->isDataPrefetch() ||
791 inst->isInstPrefetch()) {
792 removeAddrDependency(inst);
793 cache_req->setMemStall(false);
794 cache_req->done();
795 } else {
796 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
797 tid, cache_req->inst->split2ndAddr);
798 cache_req->setCompleted(false);
799 cache_req->setMemStall(true);
800 }
801 break;
802
803 default:
804 fatal("Unrecognized command to %s", resName);
805 }
806 }
807
808 void
809 CacheUnit::prefetch(DynInstPtr inst)
810 {
811 warn_once("Prefetching currently unimplemented");
812
813 CacheReqPtr cache_req
814 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
815 assert(cache_req);
816
817 // Clean-Up cache resource request so
818 // other memory insts. can use them
819 cache_req->setCompleted();
820 cachePortBlocked = false;
821 cache_req->setMemAccPending(false);
822 cache_req->setMemAccCompleted();
823 inst->unsetMemAddr();
824 }
825
826
827 void
828 CacheUnit::writeHint(DynInstPtr inst)
829 {
830 warn_once("Write Hints currently unimplemented");
831
832 CacheReqPtr cache_req
833 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
834 assert(cache_req);
835
836 // Clean-Up cache resource request so
837 // other memory insts. can use them
838 cache_req->setCompleted();
839 cachePortBlocked = false;
840 cache_req->setMemAccPending(false);
841 cache_req->setMemAccCompleted();
842 inst->unsetMemAddr();
843 }
844
845 // @TODO: Split into doCacheRead() and doCacheWrite()
846 Fault
847 CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res, CacheReqPtr split_req)
848 {
849 Fault fault = NoFault;
850 #if TRACING_ON
851 ThreadID tid = inst->readTid();
852 #endif
853
854 CacheReqPtr cache_req;
855
856 if (split_req == NULL) {
857 cache_req = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
858 } else{
859 cache_req = split_req;
860 }
861
862 assert(cache_req);
863
864 // Check for LL/SC and if so change command
865 if (cache_req->memReq->isLLSC() && cache_req->pktCmd == MemCmd::ReadReq) {
866 cache_req->pktCmd = MemCmd::LoadLockedReq;
867 }
868
869 if (cache_req->pktCmd == MemCmd::WriteReq) {
870 cache_req->pktCmd =
871 cache_req->memReq->isSwap() ? MemCmd::SwapReq :
872 (cache_req->memReq->isLLSC() ? MemCmd::StoreCondReq
873 : MemCmd::WriteReq);
874 }
875
876 cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
877 Packet::Broadcast, cache_req->instIdx);
878
879 if (cache_req->dataPkt->isRead()) {
880 cache_req->dataPkt->dataStatic(cache_req->reqData);
881 } else if (cache_req->dataPkt->isWrite()) {
882 if (inst->split2ndAccess) {
883 cache_req->dataPkt->dataStatic(inst->split2ndStoreDataPtr);
884 } else {
885 cache_req->dataPkt->dataStatic(&cache_req->inst->storeData);
886 }
887
888 if (cache_req->memReq->isCondSwap()) {
889 assert(write_res);
890 cache_req->memReq->setExtraData(*write_res);
891 }
892 }
893
894 bool do_access = true; // flag to suppress cache access
895
896 Request *memReq = cache_req->dataPkt->req;
897
898 if (cache_req->dataPkt->isWrite() && cache_req->memReq->isLLSC()) {
899 assert(cache_req->inst->isStoreConditional());
900 DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
901 do_access = TheISA::handleLockedWrite(cpu, memReq);
902 }
903
904 DPRINTF(InOrderCachePort,
905 "[tid:%i] [sn:%i] attempting to access cache\n",
906 tid, inst->seqNum);
907
908 if (do_access) {
909 if (!cachePort->sendTiming(cache_req->dataPkt)) {
910 DPRINTF(InOrderCachePort,
911 "[tid:%i] [sn:%i] cannot access cache, because port "
912 "is blocked. now waiting to retry request\n", tid,
913 inst->seqNum);
914 cache_req->setCompleted(false);
915 cachePortBlocked = true;
916 } else {
917 DPRINTF(InOrderCachePort,
918 "[tid:%i] [sn:%i] is now waiting for cache response\n",
919 tid, inst->seqNum);
920 cache_req->setCompleted();
921 cache_req->setMemAccPending();
922 cachePortBlocked = false;
923 }
924 } else if (!do_access && memReq->isLLSC()){
925 // Store-Conditional instructions complete even if they "failed"
926 assert(cache_req->inst->isStoreConditional());
927 cache_req->setCompleted(true);
928
929 DPRINTF(LLSC,
930 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
931 tid, tid);
932
933 processCacheCompletion(cache_req->dataPkt);
934 } else {
935 // Make cache request again since access due to
936 // inability to access
937 DPRINTF(InOrderStall, "STALL: \n");
938 cache_req->setCompleted(false);
939 }
940
941 return fault;
942 }
943
944 void
945 CacheUnit::processCacheCompletion(PacketPtr pkt)
946 {
947 // Cast to correct packet type
948 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
949
950 assert(cache_pkt);
951
952 if (cache_pkt->cacheReq->isSquashed()) {
953 DPRINTF(InOrderCachePort,
954 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
955 cache_pkt->cacheReq->getInst()->readTid(),
956 cache_pkt->cacheReq->getInst()->seqNum);
957 DPRINTF(RefCount,
958 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
959 cache_pkt->cacheReq->getTid(),
960 cache_pkt->cacheReq->seqNum);
961
962 cache_pkt->cacheReq->done();
963 delete cache_pkt;
964
965 cpu->wakeCPU();
966
967 return;
968 }
969
970 DPRINTF(InOrderCachePort,
971 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
972 cache_pkt->cacheReq->getInst()->readTid(),
973 cache_pkt->cacheReq->getInst()->seqNum,
974 cache_pkt->cacheReq->getInst()->getMemAddr());
975
976 // Cast to correct request type
977 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
978 findSplitRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
979
980 if (!cache_req) {
981 warn(
982 "[tid:%u]: [sn:%i]: Can't find slot for cache access to addr. %08p\n",
983 cache_pkt->cacheReq->getInst()->readTid(),
984 cache_pkt->cacheReq->getInst()->seqNum,
985 cache_pkt->cacheReq->getInst()->getMemAddr());
986 }
987
988 assert(cache_req);
989
990
991 // Get resource request info
992 unsigned stage_num = cache_req->getStageNum();
993 DynInstPtr inst = cache_req->inst;
994 ThreadID tid = cache_req->inst->readTid();
995
996 if (!cache_req->isSquashed()) {
997 if (inst->resSched.top()->cmd == CompleteFetch) {
998 DPRINTF(InOrderCachePort,
999 "[tid:%u]: [sn:%i]: Processing fetch access\n",
1000 tid, inst->seqNum);
1001
1002 // NOTE: This is only allowing a thread to fetch one line
1003 // at a time. Re-examine when/if prefetching
1004 // gets implemented.
1005 //memcpy(fetchData[tid], cache_pkt->getPtr<uint8_t>(),
1006 // cache_pkt->getSize());
1007
1008 // Get the instruction from the array of the cache line.
1009 // @todo: update thsi
1010 ExtMachInst ext_inst;
1011 StaticInstPtr staticInst = NULL;
1012 Addr inst_pc = inst->readPC();
1013 MachInst mach_inst =
1014 TheISA::gtoh(*reinterpret_cast<TheISA::MachInst *>
1015 (cache_pkt->getPtr<uint8_t>()));
1016
1017 predecoder.setTC(cpu->thread[tid]->getTC());
1018 predecoder.moreBytes(inst_pc, inst_pc, mach_inst);
1019 ext_inst = predecoder.getExtMachInst();
1020
1021 inst->setMachInst(ext_inst);
1022
1023 // Set Up More TraceData info
1024 if (inst->traceData) {
1025 inst->traceData->setStaticInst(inst->staticInst);
1026 inst->traceData->setPC(inst->readPC());
1027 }
1028
1029 } else if (inst->staticInst && inst->isMemRef()) {
1030 DPRINTF(InOrderCachePort,
1031 "[tid:%u]: [sn:%i]: Processing cache access\n",
1032 tid, inst->seqNum);
1033
1034 if (inst->splitInst) {
1035 inst->splitFinishCnt++;
1036
1037 if (inst->splitFinishCnt == 2) {
1038 cache_req->memReq->setVirt(0/*inst->tid*/,
1039 inst->getMemAddr(),
1040 inst->splitTotalSize,
1041 0,
1042 0);
1043
1044 Packet split_pkt(cache_req->memReq, cache_req->pktCmd,
1045 Packet::Broadcast);
1046
1047
1048 if (inst->isLoad()) {
1049 split_pkt.dataStatic(inst->splitMemData);
1050 } else {
1051 split_pkt.dataStatic(&inst->storeData);
1052 }
1053
1054 inst->completeAcc(&split_pkt);
1055 }
1056 } else {
1057 inst->completeAcc(pkt);
1058 }
1059
1060 if (inst->isLoad()) {
1061 assert(cache_pkt->isRead());
1062
1063 if (cache_pkt->req->isLLSC()) {
1064 DPRINTF(InOrderCachePort,
1065 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
1066 tid, inst->seqNum);
1067 TheISA::handleLockedRead(cpu, cache_pkt->req);
1068 }
1069
1070 // @NOTE: Hardcoded to for load instructions. Assumes that
1071 // the dest. idx 0 is always where the data is loaded to.
1072 DPRINTF(InOrderCachePort,
1073 "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
1074 tid, inst->seqNum, inst->readIntResult(0));
1075 DPRINTF(InOrderCachePort,
1076 "[tid:%u]: [sn:%i]: FP Data loaded was: %08p\n",
1077 tid, inst->seqNum, inst->readFloatResult(0));
1078 } else if(inst->isStore()) {
1079 assert(cache_pkt->isWrite());
1080
1081 DPRINTF(InOrderCachePort,
1082 "[tid:%u]: [sn:%i]: Data stored was: FIX ME\n",
1083 tid, inst->seqNum/*,
1084 getMemData(cache_pkt)*/);
1085 }
1086
1087 delete cache_pkt;
1088 }
1089
1090 cache_req->setMemAccPending(false);
1091 cache_req->setMemAccCompleted();
1092
1093 if (cache_req->isMemStall() &&
1094 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
1095 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n", tid);
1096
1097 cpu->activateContext(tid);
1098
1099 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
1100 "miss.\n", tid);
1101 }
1102
1103 // Wake up the CPU (if it went to sleep and was waiting on this
1104 // completion event).
1105 cpu->wakeCPU();
1106
1107 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
1108 tid, cpu->pipelineStage[stage_num]->name());
1109
1110 cpu->switchToActive(stage_num);
1111 } else {
1112 DPRINTF(InOrderCachePort,
1113 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
1114 tid, cache_req->inst->readPC());
1115 cache_req->setMemAccCompleted();
1116 }
1117 }
1118
1119 void
1120 CacheUnit::recvRetry()
1121 {
1122 DPRINTF(InOrderCachePort, "Unblocking Cache Port. \n");
1123
1124 assert(cachePortBlocked);
1125
1126 // Clear the cache port for use again
1127 cachePortBlocked = false;
1128
1129 cpu->wakeCPU();
1130 }
1131
1132 CacheUnitEvent::CacheUnitEvent()
1133 : ResourceEvent()
1134 { }
1135
1136 void
1137 CacheUnitEvent::process()
1138 {
1139 DynInstPtr inst = resource->reqMap[slotIdx]->inst;
1140 int stage_num = resource->reqMap[slotIdx]->getStageNum();
1141 ThreadID tid = inst->threadNumber;
1142 CacheReqPtr req_ptr = dynamic_cast<CacheReqPtr>(resource->reqMap[slotIdx]);
1143
1144 DPRINTF(InOrderTLB, "Waking up from TLB Miss caused by [sn:%i].\n",
1145 inst->seqNum);
1146
1147 CacheUnit* tlb_res = dynamic_cast<CacheUnit*>(resource);
1148 assert(tlb_res);
1149
1150 tlb_res->tlbBlocked[tid] = false;
1151
1152 tlb_res->cpu->pipelineStage[stage_num]->
1153 unsetResStall(tlb_res->reqMap[slotIdx], tid);
1154
1155 req_ptr->tlbStall = false;
1156
1157 if (req_ptr->isSquashed()) {
1158 req_ptr->done();
1159 }
1160 }
1161
1162 void
1163 CacheUnit::squashDueToMemStall(DynInstPtr inst, int stage_num,
1164 InstSeqNum squash_seq_num, ThreadID tid)
1165 {
1166 // If squashing due to memory stall, then we do NOT want to
1167 // squash the instruction that caused the stall so we
1168 // increment the sequence number here to prevent that.
1169 //
1170 // NOTE: This is only for the SwitchOnCacheMiss Model
1171 // NOTE: If you have multiple outstanding misses from the same
1172 // thread then you need to reevaluate this code
1173 // NOTE: squash should originate from
1174 // pipeline_stage.cc:processInstSchedule
1175 DPRINTF(InOrderCachePort, "Squashing above [sn:%u]\n",
1176 squash_seq_num + 1);
1177
1178 squash(inst, stage_num, squash_seq_num + 1, tid);
1179 }
1180
1181
1182 void
1183 CacheUnit::squash(DynInstPtr inst, int stage_num,
1184 InstSeqNum squash_seq_num, ThreadID tid)
1185 {
1186 vector<int> slot_remove_list;
1187
1188 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
1189 map<int, ResReqPtr>::iterator map_end = reqMap.end();
1190
1191 while (map_it != map_end) {
1192 ResReqPtr req_ptr = (*map_it).second;
1193
1194 if (req_ptr &&
1195 req_ptr->getInst()->readTid() == tid &&
1196 req_ptr->getInst()->seqNum > squash_seq_num) {
1197
1198 DPRINTF(InOrderCachePort,
1199 "[tid:%i] Squashing request from [sn:%i]\n",
1200 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
1201
1202 if (req_ptr->isSquashed()) {
1203 DPRINTF(AddrDep, "Request for [tid:%i] [sn:%i] already squashed, ignoring squash process.\n",
1204 req_ptr->getInst()->readTid(),
1205 req_ptr->getInst()->seqNum);
1206 map_it++;
1207 continue;
1208 }
1209
1210 req_ptr->setSquashed();
1211
1212 req_ptr->getInst()->setSquashed();
1213
1214 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
1215 assert(cache_req);
1216
1217 int req_slot_num = req_ptr->getSlot();
1218
1219 if (cache_req->tlbStall) {
1220 tlbBlocked[tid] = false;
1221
1222 int stall_stage = reqMap[req_slot_num]->getStageNum();
1223
1224 cpu->pipelineStage[stall_stage]->
1225 unsetResStall(reqMap[req_slot_num], tid);
1226 }
1227
1228 if (!cache_req->tlbStall && !cache_req->isMemAccPending()) {
1229 // Mark request for later removal
1230 cpu->reqRemoveList.push(req_ptr);
1231
1232 // Mark slot for removal from resource
1233 slot_remove_list.push_back(req_ptr->getSlot());
1234 } else {
1235 DPRINTF(InOrderCachePort,
1236 "[tid:%i] Request from [sn:%i] squashed, but still pending completion.\n",
1237 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
1238 DPRINTF(RefCount,
1239 "[tid:%i] Request from [sn:%i] squashed (split:%i), but still pending completion.\n",
1240 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum,
1241 req_ptr->getInst()->splitInst);
1242 }
1243
1244 if (req_ptr->getInst()->validMemAddr()) {
1245 DPRINTF(AddrDep, "Squash of [tid:%i] [sn:%i], attempting to remove addr. %08p dependencies.\n",
1246 req_ptr->getInst()->readTid(),
1247 req_ptr->getInst()->seqNum,
1248 req_ptr->getInst()->getMemAddr());
1249
1250 removeAddrDependency(req_ptr->getInst());
1251 }
1252 }
1253
1254 map_it++;
1255 }
1256
1257 // Now Delete Slot Entry from Req. Map
1258 for (int i = 0; i < slot_remove_list.size(); i++)
1259 freeSlot(slot_remove_list[i]);
1260 }
1261
1262 uint64_t
1263 CacheUnit::getMemData(Packet *packet)
1264 {
1265 switch (packet->getSize())
1266 {
1267 case 8:
1268 return packet->get<uint8_t>();
1269
1270 case 16:
1271 return packet->get<uint16_t>();
1272
1273 case 32:
1274 return packet->get<uint32_t>();
1275
1276 case 64:
1277 return packet->get<uint64_t>();
1278
1279 default:
1280 panic("bad store data size = %d\n", packet->getSize());
1281 }
1282 }
1283
1284 // Extra Template Definitions
1285 #ifndef DOXYGEN_SHOULD_SKIP_THIS
1286
1287 template
1288 Fault
1289 CacheUnit::read(DynInstPtr inst, Addr addr, Twin32_t &data, unsigned flags);
1290
1291 template
1292 Fault
1293 CacheUnit::read(DynInstPtr inst, Addr addr, Twin64_t &data, unsigned flags);
1294
1295 template
1296 Fault
1297 CacheUnit::read(DynInstPtr inst, Addr addr, uint64_t &data, unsigned flags);
1298
1299 template
1300 Fault
1301 CacheUnit::read(DynInstPtr inst, Addr addr, uint32_t &data, unsigned flags);
1302
1303 template
1304 Fault
1305 CacheUnit::read(DynInstPtr inst, Addr addr, uint16_t &data, unsigned flags);
1306
1307 template
1308 Fault
1309 CacheUnit::read(DynInstPtr inst, Addr addr, uint8_t &data, unsigned flags);
1310
1311 #endif //DOXYGEN_SHOULD_SKIP_THIS
1312
1313 template<>
1314 Fault
1315 CacheUnit::read(DynInstPtr inst, Addr addr, double &data, unsigned flags)
1316 {
1317 return read(inst, addr, *(uint64_t*)&data, flags);
1318 }
1319
1320 template<>
1321 Fault
1322 CacheUnit::read(DynInstPtr inst, Addr addr, float &data, unsigned flags)
1323 {
1324 return read(inst, addr, *(uint32_t*)&data, flags);
1325 }
1326
1327
1328 template<>
1329 Fault
1330 CacheUnit::read(DynInstPtr inst, Addr addr, int32_t &data, unsigned flags)
1331 {
1332 return read(inst, addr, (uint32_t&)data, flags);
1333 }
1334
1335 #ifndef DOXYGEN_SHOULD_SKIP_THIS
1336
1337 template
1338 Fault
1339 CacheUnit::write(DynInstPtr inst, Twin32_t data, Addr addr,
1340 unsigned flags, uint64_t *res);
1341
1342 template
1343 Fault
1344 CacheUnit::write(DynInstPtr inst, Twin64_t data, Addr addr,
1345 unsigned flags, uint64_t *res);
1346
1347 template
1348 Fault
1349 CacheUnit::write(DynInstPtr inst, uint64_t data, Addr addr,
1350 unsigned flags, uint64_t *res);
1351
1352 template
1353 Fault
1354 CacheUnit::write(DynInstPtr inst, uint32_t data, Addr addr,
1355 unsigned flags, uint64_t *res);
1356
1357 template
1358 Fault
1359 CacheUnit::write(DynInstPtr inst, uint16_t data, Addr addr,
1360 unsigned flags, uint64_t *res);
1361
1362 template
1363 Fault
1364 CacheUnit::write(DynInstPtr inst, uint8_t data, Addr addr,
1365 unsigned flags, uint64_t *res);
1366
1367 #endif //DOXYGEN_SHOULD_SKIP_THIS
1368
1369 template<>
1370 Fault
1371 CacheUnit::write(DynInstPtr inst, double data, Addr addr, unsigned flags,
1372 uint64_t *res)
1373 {
1374 return write(inst, *(uint64_t*)&data, addr, flags, res);
1375 }
1376
1377 template<>
1378 Fault
1379 CacheUnit::write(DynInstPtr inst, float data, Addr addr, unsigned flags,
1380 uint64_t *res)
1381 {
1382 return write(inst, *(uint32_t*)&data, addr, flags, res);
1383 }
1384
1385
1386 template<>
1387 Fault
1388 CacheUnit::write(DynInstPtr inst, int32_t data, Addr addr, unsigned flags,
1389 uint64_t *res)
1390 {
1391 return write(inst, (uint32_t)data, addr, flags, res);
1392 }
1393