merge
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <vector>
33 #include <list>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/pipeline_traits.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "mem/request.hh"
45
46 using namespace std;
47 using namespace TheISA;
48 using namespace ThePipeline;
49
50 Tick
51 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
52 {
53 panic("CacheUnit::CachePort doesn't expect recvAtomic callback!");
54 return curTick;
55 }
56
57 void
58 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
59 {
60 panic("CacheUnit::CachePort doesn't expect recvFunctional callback!");
61 }
62
63 void
64 CacheUnit::CachePort::recvStatusChange(Status status)
65 {
66 if (status == RangeChange)
67 return;
68
69 panic("CacheUnit::CachePort doesn't expect recvStatusChange callback!");
70 }
71
72 bool
73 CacheUnit::CachePort::recvTiming(Packet *pkt)
74 {
75 cachePortUnit->processCacheCompletion(pkt);
76 return true;
77 }
78
79 void
80 CacheUnit::CachePort::recvRetry()
81 {
82 cachePortUnit->recvRetry();
83 }
84
85 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
86 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
87 : Resource(res_name, res_id, res_width, res_latency, _cpu),
88 cachePortBlocked(false), predecoder(NULL)
89 {
90 cachePort = new CachePort(this);
91
92 // Hard-Code Selection For Now
93 if (res_name == "icache_port")
94 _tlb = params->itb;
95 else if (res_name == "dcache_port")
96 _tlb = params->dtb;
97 else
98 fatal("Unrecognized TLB name passed by user");
99
100 for (int i=0; i < MaxThreads; i++) {
101 tlbBlocked[i] = false;
102 }
103 }
104
105 TheISA::TLB*
106 CacheUnit::tlb()
107 {
108 return _tlb;
109
110 }
111
112 Port *
113 CacheUnit::getPort(const string &if_name, int idx)
114 {
115 if (if_name == resName)
116 return cachePort;
117 else
118 return NULL;
119 }
120
121 void
122 CacheUnit::init()
123 {
124 // Currently Used to Model TLB Latency. Eventually
125 // Switch to Timing TLB translations.
126 resourceEvent = new CacheUnitEvent[width];
127
128 initSlots();
129 }
130
131 int
132 CacheUnit::getSlot(DynInstPtr inst)
133 {
134 ThreadID tid = inst->readTid();
135
136 if (tlbBlocked[inst->threadNumber]) {
137 return -1;
138 }
139
140 // For a Split-Load, the instruction would have processed once already
141 // causing the address to be unset.
142 if (!inst->validMemAddr() && !inst->splitInst) {
143 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting cache access\n",
144 inst->readTid(), inst->seqNum);
145 }
146
147 Addr req_addr = inst->getMemAddr();
148
149 if (resName == "icache_port" ||
150 find(addrList[tid].begin(), addrList[tid].end(), req_addr) ==
151 addrList[tid].end()) {
152
153 int new_slot = Resource::getSlot(inst);
154
155 if (new_slot == -1)
156 return -1;
157
158 inst->memTime = curTick;
159 setAddrDependency(inst);
160 return new_slot;
161 } else {
162 // Allow same instruction multiple accesses to same address
163 // should only happen maybe after a squashed inst. needs to replay
164 if (addrMap[tid][req_addr] == inst->seqNum) {
165 int new_slot = Resource::getSlot(inst);
166
167 if (new_slot == -1)
168 return -1;
169
170 return new_slot;
171 } else {
172 DPRINTF(InOrderCachePort,
173 "[tid:%i] Denying request because there is an outstanding"
174 " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
175 inst->readTid(), req_addr, addrMap[tid][req_addr], inst->memTime);
176 return -1;
177 }
178 }
179
180 return -1;
181 }
182
183 void
184 CacheUnit::setAddrDependency(DynInstPtr inst)
185 {
186 Addr req_addr = inst->getMemAddr();
187 ThreadID tid = inst->readTid();
188
189 addrList[tid].push_back(req_addr);
190 addrMap[tid][req_addr] = inst->seqNum;
191 DPRINTF(InOrderCachePort,
192 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
193 inst->readTid(), inst->seqNum, req_addr);
194 DPRINTF(AddrDep,
195 "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
196 inst->readTid(), inst->seqNum, req_addr);
197 }
198
199 void
200 CacheUnit::removeAddrDependency(DynInstPtr inst)
201 {
202 ThreadID tid = inst->readTid();
203
204 Addr mem_addr = inst->getMemAddr();
205
206 // Erase from Address List
207 vector<Addr>::iterator vect_it = find(addrList[tid].begin(), addrList[tid].end(),
208 mem_addr);
209 assert(vect_it != addrList[tid].end() || inst->splitInst);
210
211 if (vect_it != addrList[tid].end()) {
212 DPRINTF(AddrDep,
213 "[tid:%i]: [sn:%i] Address %08p removed from dependency list\n",
214 inst->readTid(), inst->seqNum, (*vect_it));
215
216 addrList[tid].erase(vect_it);
217
218 // Erase From Address Map (Used for Debugging)
219 addrMap[tid].erase(addrMap[tid].find(mem_addr));
220 }
221
222
223 }
224
225 ResReqPtr
226 CacheUnit::findRequest(DynInstPtr inst)
227 {
228 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
229 map<int, ResReqPtr>::iterator map_end = reqMap.end();
230
231 while (map_it != map_end) {
232 CacheRequest* cache_req = dynamic_cast<CacheRequest*>((*map_it).second);
233 assert(cache_req);
234
235 if (cache_req &&
236 cache_req->getInst() == inst &&
237 cache_req->instIdx == inst->resSched.top()->idx) {
238 return cache_req;
239 }
240 map_it++;
241 }
242
243 return NULL;
244 }
245
246 ResReqPtr
247 CacheUnit::findSplitRequest(DynInstPtr inst, int idx)
248 {
249 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
250 map<int, ResReqPtr>::iterator map_end = reqMap.end();
251
252 while (map_it != map_end) {
253 CacheRequest* cache_req = dynamic_cast<CacheRequest*>((*map_it).second);
254 assert(cache_req);
255
256 if (cache_req &&
257 cache_req->getInst() == inst &&
258 cache_req->instIdx == idx) {
259 return cache_req;
260 }
261 map_it++;
262 }
263
264 return NULL;
265 }
266
267
268 ResReqPtr
269 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
270 int slot_num, unsigned cmd)
271 {
272 ScheduleEntry* sched_entry = inst->resSched.top();
273
274 if (!inst->validMemAddr()) {
275 panic("Mem. Addr. must be set before requesting cache access\n");
276 }
277
278 MemCmd::Command pkt_cmd;
279
280 switch (sched_entry->cmd)
281 {
282 case InitSecondSplitRead:
283 pkt_cmd = MemCmd::ReadReq;
284
285 DPRINTF(InOrderCachePort,
286 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
287 inst->readTid(), inst->seqNum, inst->split2ndAddr);
288 break;
289
290 case InitiateReadData:
291 pkt_cmd = MemCmd::ReadReq;
292
293 DPRINTF(InOrderCachePort,
294 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
295 inst->readTid(), inst->seqNum, inst->getMemAddr());
296 break;
297
298 case InitSecondSplitWrite:
299 pkt_cmd = MemCmd::WriteReq;
300
301 DPRINTF(InOrderCachePort,
302 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
303 inst->readTid(), inst->seqNum, inst->split2ndAddr);
304 break;
305
306 case InitiateWriteData:
307 pkt_cmd = MemCmd::WriteReq;
308
309 DPRINTF(InOrderCachePort,
310 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
311 inst->readTid(), inst->seqNum, inst->getMemAddr());
312 break;
313
314 case InitiateFetch:
315 pkt_cmd = MemCmd::ReadReq;
316
317 DPRINTF(InOrderCachePort,
318 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
319 inst->readTid(), inst->seqNum, inst->getMemAddr());
320 break;
321
322 default:
323 panic("%i: Unexpected request type (%i) to %s", curTick,
324 sched_entry->cmd, name());
325 }
326
327 return new CacheRequest(this, inst, stage_num, id, slot_num,
328 sched_entry->cmd, 0, pkt_cmd,
329 0/*flags*/, this->cpu->readCpuId(),
330 inst->resSched.top()->idx);
331 }
332
333 void
334 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
335 {
336 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
337 assert(cache_req);
338
339 // Check to see if this instruction is requesting the same command
340 // or a different one
341 if (cache_req->cmd != inst->resSched.top()->cmd &&
342 cache_req->instIdx == inst->resSched.top()->idx) {
343 // If different, then update command in the request
344 cache_req->cmd = inst->resSched.top()->cmd;
345 DPRINTF(InOrderCachePort,
346 "[tid:%i]: [sn:%i]: Updating the command for this "
347 "instruction\n ", inst->readTid(), inst->seqNum);
348
349 service_request = true;
350 } else if (inst->resSched.top()->idx != CacheUnit::InitSecondSplitRead &&
351 inst->resSched.top()->idx != CacheUnit::InitSecondSplitWrite) {
352 // If same command, just check to see if memory access was completed
353 // but dont try to re-execute
354 DPRINTF(InOrderCachePort,
355 "[tid:%i]: [sn:%i]: requesting this resource again\n",
356 inst->readTid(), inst->seqNum);
357
358 service_request = true;
359 }
360 }
361
362 Fault
363 CacheUnit::doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
364 int flags, TheISA::TLB::Mode tlb_mode)
365 {
366 ThreadID tid = inst->readTid();
367 Addr aligned_addr = inst->getMemAddr();
368 unsigned stage_num = cache_req->getStageNum();
369 unsigned slot_idx = cache_req->getSlot();
370
371 if (tlb_mode == TheISA::TLB::Execute) {
372 inst->fetchMemReq = new Request(inst->readTid(), aligned_addr,
373 acc_size, flags, inst->readPC(),
374 cpu->readCpuId(), inst->readTid());
375 cache_req->memReq = inst->fetchMemReq;
376 } else {
377 if (!cache_req->is2ndSplit()) {
378 inst->dataMemReq = new Request(cpu->asid[tid], aligned_addr,
379 acc_size, flags, inst->readPC(),
380 cpu->readCpuId(), inst->readTid());
381 cache_req->memReq = inst->dataMemReq;
382 } else {
383 assert(inst->splitInst);
384
385 inst->splitMemReq = new Request(cpu->asid[tid],
386 inst->split2ndAddr,
387 acc_size,
388 flags,
389 inst->readPC(),
390 cpu->readCpuId(),
391 tid);
392 cache_req->memReq = inst->splitMemReq;
393 }
394 }
395
396
397 cache_req->fault =
398 _tlb->translateAtomic(cache_req->memReq,
399 cpu->thread[tid]->getTC(), tlb_mode);
400
401 if (cache_req->fault != NoFault) {
402 DPRINTF(InOrderTLB, "[tid:%i]: %s encountered while translating "
403 "addr:%08p for [sn:%i].\n", tid, cache_req->fault->name(),
404 cache_req->memReq->getVaddr(), inst->seqNum);
405
406 cpu->pipelineStage[stage_num]->setResStall(cache_req, tid);
407
408 tlbBlocked[tid] = true;
409
410 cache_req->tlbStall = true;
411
412 scheduleEvent(slot_idx, 1);
413
414 cpu->trap(cache_req->fault, tid);
415 } else {
416 DPRINTF(InOrderTLB, "[tid:%i]: [sn:%i] virt. addr %08p translated "
417 "to phys. addr:%08p.\n", tid, inst->seqNum,
418 cache_req->memReq->getVaddr(),
419 cache_req->memReq->getPaddr());
420 }
421
422 return cache_req->fault;
423 }
424
425 template <class T>
426 Fault
427 CacheUnit::read(DynInstPtr inst, Addr addr, T &data, unsigned flags)
428 {
429 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
430 assert(cache_req && "Can't Find Instruction for Read!");
431
432 // The block size of our peer
433 unsigned blockSize = this->cachePort->peerBlockSize();
434
435 //The size of the data we're trying to read.
436 int dataSize = sizeof(T);
437
438 if (inst->split2ndAccess) {
439 dataSize = inst->split2ndSize;
440 cache_req->splitAccess = true;
441 cache_req->split2ndAccess = true;
442
443 DPRINTF(InOrderCachePort, "[sn:%i] Split Read Access (2 of 2) for (%#x, %#x).\n", inst->seqNum,
444 inst->getMemAddr(), inst->split2ndAddr);
445 }
446
447
448 //The address of the second part of this access if it needs to be split
449 //across a cache line boundary.
450 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
451
452
453 if (secondAddr > addr && !inst->split2ndAccess) {
454 DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for (%#x, %#x).\n", curTick, inst->seqNum,
455 addr, secondAddr);
456
457 // Save All "Total" Split Information
458 // ==============================
459 inst->splitInst = true;
460 inst->splitMemData = new uint8_t[dataSize];
461 inst->splitTotalSize = dataSize;
462
463 if (!inst->splitInstSked) {
464 // Schedule Split Read/Complete for Instruction
465 // ==============================
466 int stage_num = cache_req->getStageNum();
467
468 int stage_pri = ThePipeline::getNextPriority(inst, stage_num);
469
470 inst->resSched.push(new ScheduleEntry(stage_num,
471 stage_pri,
472 cpu->resPool->getResIdx(DCache),
473 CacheUnit::InitSecondSplitRead,
474 1)
475 );
476
477 inst->resSched.push(new ScheduleEntry(stage_num + 1,
478 1/*stage_pri*/,
479 cpu->resPool->getResIdx(DCache),
480 CacheUnit::CompleteSecondSplitRead,
481 1)
482 );
483 inst->splitInstSked = true;
484 } else {
485 DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] Retrying Split Read Access (1 of 2) for (%#x, %#x).\n",
486 inst->readTid(), inst->seqNum, addr, secondAddr);
487 }
488
489 // Split Information for First Access
490 // ==============================
491 dataSize = secondAddr - addr;
492 cache_req->splitAccess = true;
493
494 // Split Information for Second Access
495 // ==============================
496 inst->split2ndSize = addr + sizeof(T) - secondAddr;
497 inst->split2ndAddr = secondAddr;
498 inst->split2ndDataPtr = inst->splitMemData + dataSize;
499 inst->split2ndFlags = flags;
500 }
501
502 doTLBAccess(inst, cache_req, dataSize, flags, TheISA::TLB::Read);
503
504 if (cache_req->fault == NoFault) {
505 if (!cache_req->splitAccess) {
506 cache_req->reqData = new uint8_t[dataSize];
507 doCacheAccess(inst, NULL);
508 } else {
509 if (!inst->split2ndAccess) {
510 cache_req->reqData = inst->splitMemData;
511 } else {
512 cache_req->reqData = inst->split2ndDataPtr;
513 }
514
515 doCacheAccess(inst, NULL, cache_req);
516 }
517 }
518
519 return cache_req->fault;
520 }
521
522 template <class T>
523 Fault
524 CacheUnit::write(DynInstPtr inst, T data, Addr addr, unsigned flags,
525 uint64_t *write_res)
526 {
527 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
528 assert(cache_req && "Can't Find Instruction for Write!");
529
530 // The block size of our peer
531 unsigned blockSize = this->cachePort->peerBlockSize();
532
533 //The size of the data we're trying to read.
534 int dataSize = sizeof(T);
535
536 if (inst->split2ndAccess) {
537 dataSize = inst->split2ndSize;
538 cache_req->splitAccess = true;
539 cache_req->split2ndAccess = true;
540
541 DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (2 of 2) for (%#x, %#x).\n", inst->seqNum,
542 inst->getMemAddr(), inst->split2ndAddr);
543 }
544
545 //The address of the second part of this access if it needs to be split
546 //across a cache line boundary.
547 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
548
549 if (secondAddr > addr && !inst->split2ndAccess) {
550
551 DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (1 of 2) for (%#x, %#x).\n", inst->seqNum,
552 addr, secondAddr);
553
554 // Save All "Total" Split Information
555 // ==============================
556 inst->splitInst = true;
557 inst->splitTotalSize = dataSize;
558
559 if (!inst->splitInstSked) {
560 // Schedule Split Read/Complete for Instruction
561 // ==============================
562 int stage_num = cache_req->getStageNum();
563
564 int stage_pri = ThePipeline::getNextPriority(inst, stage_num);
565
566 inst->resSched.push(new ScheduleEntry(stage_num,
567 stage_pri,
568 cpu->resPool->getResIdx(DCache),
569 CacheUnit::InitSecondSplitWrite,
570 1)
571 );
572
573 inst->resSched.push(new ScheduleEntry(stage_num + 1,
574 1/*stage_pri*/,
575 cpu->resPool->getResIdx(DCache),
576 CacheUnit::CompleteSecondSplitWrite,
577 1)
578 );
579 inst->splitInstSked = true;
580 } else {
581 DPRINTF(InOrderCachePort, "[tid:%i] sn:%i] Retrying Split Read Access (1 of 2) for (%#x, %#x).\n",
582 inst->readTid(), inst->seqNum, addr, secondAddr);
583 }
584
585
586
587 // Split Information for First Access
588 // ==============================
589 dataSize = secondAddr - addr;
590 cache_req->splitAccess = true;
591
592 // Split Information for Second Access
593 // ==============================
594 inst->split2ndSize = addr + sizeof(T) - secondAddr;
595 inst->split2ndAddr = secondAddr;
596 inst->split2ndStoreDataPtr = &cache_req->inst->storeData;
597 inst->split2ndStoreDataPtr += dataSize;
598 inst->split2ndFlags = flags;
599 inst->splitInstSked = true;
600 }
601
602 doTLBAccess(inst, cache_req, dataSize, flags, TheISA::TLB::Write);
603
604 if (cache_req->fault == NoFault) {
605 if (!cache_req->splitAccess) {
606 // Remove this line since storeData is saved in INST?
607 cache_req->reqData = new uint8_t[dataSize];
608 doCacheAccess(inst, write_res);
609 } else {
610 doCacheAccess(inst, write_res, cache_req);
611 }
612
613 }
614
615 return cache_req->fault;
616 }
617
618
619 void
620 CacheUnit::execute(int slot_num)
621 {
622 if (cachePortBlocked) {
623 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
624 return;
625 }
626
627 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
628 assert(cache_req);
629
630 DynInstPtr inst = cache_req->inst;
631 #if TRACING_ON
632 ThreadID tid = inst->readTid();
633 int seq_num = inst->seqNum;
634 std::string acc_type = "write";
635
636 #endif
637
638 cache_req->fault = NoFault;
639
640 switch (cache_req->cmd)
641 {
642 case InitiateFetch:
643 {
644 //@TODO: Switch to size of full cache block. Store in fetch buffer
645 int acc_size = sizeof(TheISA::MachInst);
646
647 doTLBAccess(inst, cache_req, acc_size, 0, TheISA::TLB::Execute);
648
649 // Only Do Access if no fault from TLB
650 if (cache_req->fault == NoFault) {
651
652 DPRINTF(InOrderCachePort,
653 "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
654 tid, name(), cache_req->inst->getMemAddr());
655
656 cache_req->reqData = new uint8_t[acc_size];
657
658 inst->setCurResSlot(slot_num);
659
660 doCacheAccess(inst);
661 }
662
663 break;
664 }
665
666 case InitiateReadData:
667 #if TRACING_ON
668 acc_type = "read";
669 #endif
670 case InitiateWriteData:
671
672 DPRINTF(InOrderCachePort,
673 "[tid:%u]: [sn:%i] Initiating data %s access to %s for addr. %08p\n",
674 tid, inst->seqNum, acc_type, name(), cache_req->inst->getMemAddr());
675
676 inst->setCurResSlot(slot_num);
677
678 if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
679 inst->execute();
680 } else {
681 inst->initiateAcc();
682 }
683
684 break;
685
686 case InitSecondSplitRead:
687 DPRINTF(InOrderCachePort,
688 "[tid:%u]: [sn:%i] Initiating split data read access to %s for addr. %08p\n",
689 tid, inst->seqNum, name(), cache_req->inst->split2ndAddr);
690 inst->split2ndAccess = true;
691 assert(inst->split2ndAddr != 0);
692 read(inst, inst->split2ndAddr, inst->split2ndData, inst->split2ndFlags);
693 break;
694
695 case InitSecondSplitWrite:
696 DPRINTF(InOrderCachePort,
697 "[tid:%u]: [sn:%i] Initiating split data write access to %s for addr. %08p\n",
698 tid, inst->seqNum, name(), cache_req->inst->getMemAddr());
699
700 inst->split2ndAccess = true;
701 assert(inst->split2ndAddr != 0);
702 write(inst, inst->split2ndAddr, inst->split2ndData, inst->split2ndFlags, NULL);
703 break;
704
705
706 case CompleteFetch:
707 if (cache_req->isMemAccComplete()) {
708 DPRINTF(InOrderCachePort,
709 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
710 tid, inst->seqNum);
711
712
713 DPRINTF(InOrderCachePort, "[tid:%i]: Instruction [sn:%i] is: %s\n",
714 tid, seq_num, inst->staticInst->disassemble(inst->PC));
715
716 removeAddrDependency(inst);
717
718 delete cache_req->dataPkt;
719
720 // Do not stall and switch threads for fetch... for now..
721 // TODO: We need to detect cache misses for latencies > 1
722 // cache_req->setMemStall(false);
723
724 cache_req->done();
725 } else {
726 DPRINTF(InOrderCachePort,
727 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
728 tid, inst->seqNum);
729 DPRINTF(InOrderStall,
730 "STALL: [tid:%i]: Fetch miss from %08p\n",
731 tid, cache_req->inst->readPC());
732 cache_req->setCompleted(false);
733 //cache_req->setMemStall(true);
734 }
735 break;
736
737 case CompleteReadData:
738 case CompleteWriteData:
739 DPRINTF(InOrderCachePort,
740 "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
741 tid, inst->seqNum);
742
743 if (cache_req->isMemAccComplete() ||
744 inst->isDataPrefetch() ||
745 inst->isInstPrefetch()) {
746 removeAddrDependency(inst);
747 cache_req->setMemStall(false);
748 cache_req->done();
749 } else {
750 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
751 tid, cache_req->inst->getMemAddr());
752 cache_req->setCompleted(false);
753 cache_req->setMemStall(true);
754 }
755 break;
756
757 case CompleteSecondSplitRead:
758 DPRINTF(InOrderCachePort,
759 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Read Access\n",
760 tid, inst->seqNum);
761
762 if (cache_req->isMemAccComplete() ||
763 inst->isDataPrefetch() ||
764 inst->isInstPrefetch()) {
765 removeAddrDependency(inst);
766 cache_req->setMemStall(false);
767 cache_req->done();
768 } else {
769 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
770 tid, cache_req->inst->split2ndAddr);
771 cache_req->setCompleted(false);
772 cache_req->setMemStall(true);
773 }
774 break;
775
776 case CompleteSecondSplitWrite:
777 DPRINTF(InOrderCachePort,
778 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Write Access\n",
779 tid, inst->seqNum);
780
781 if (cache_req->isMemAccComplete() ||
782 inst->isDataPrefetch() ||
783 inst->isInstPrefetch()) {
784 removeAddrDependency(inst);
785 cache_req->setMemStall(false);
786 cache_req->done();
787 } else {
788 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
789 tid, cache_req->inst->split2ndAddr);
790 cache_req->setCompleted(false);
791 cache_req->setMemStall(true);
792 }
793 break;
794
795 default:
796 fatal("Unrecognized command to %s", resName);
797 }
798 }
799
800 void
801 CacheUnit::prefetch(DynInstPtr inst)
802 {
803 warn_once("Prefetching currently unimplemented");
804
805 CacheReqPtr cache_req
806 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
807 assert(cache_req);
808
809 // Clean-Up cache resource request so
810 // other memory insts. can use them
811 cache_req->setCompleted();
812 cachePortBlocked = false;
813 cache_req->setMemAccPending(false);
814 cache_req->setMemAccCompleted();
815 inst->unsetMemAddr();
816 }
817
818
819 void
820 CacheUnit::writeHint(DynInstPtr inst)
821 {
822 warn_once("Write Hints currently unimplemented");
823
824 CacheReqPtr cache_req
825 = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
826 assert(cache_req);
827
828 // Clean-Up cache resource request so
829 // other memory insts. can use them
830 cache_req->setCompleted();
831 cachePortBlocked = false;
832 cache_req->setMemAccPending(false);
833 cache_req->setMemAccCompleted();
834 inst->unsetMemAddr();
835 }
836
837 // @TODO: Split into doCacheRead() and doCacheWrite()
838 Fault
839 CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res, CacheReqPtr split_req)
840 {
841 Fault fault = NoFault;
842 #if TRACING_ON
843 ThreadID tid = inst->readTid();
844 #endif
845
846 CacheReqPtr cache_req;
847
848 if (split_req == NULL) {
849 cache_req = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
850 } else{
851 cache_req = split_req;
852 }
853
854 assert(cache_req);
855
856 // Check for LL/SC and if so change command
857 if (cache_req->memReq->isLLSC() && cache_req->pktCmd == MemCmd::ReadReq) {
858 cache_req->pktCmd = MemCmd::LoadLockedReq;
859 }
860
861 if (cache_req->pktCmd == MemCmd::WriteReq) {
862 cache_req->pktCmd =
863 cache_req->memReq->isSwap() ? MemCmd::SwapReq :
864 (cache_req->memReq->isLLSC() ? MemCmd::StoreCondReq
865 : MemCmd::WriteReq);
866 }
867
868 cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
869 Packet::Broadcast, cache_req->instIdx);
870
871 if (cache_req->dataPkt->isRead()) {
872 cache_req->dataPkt->dataStatic(cache_req->reqData);
873 } else if (cache_req->dataPkt->isWrite()) {
874 if (inst->split2ndAccess) {
875 cache_req->dataPkt->dataStatic(inst->split2ndStoreDataPtr);
876 } else {
877 cache_req->dataPkt->dataStatic(&cache_req->inst->storeData);
878 }
879
880 if (cache_req->memReq->isCondSwap()) {
881 assert(write_res);
882 cache_req->memReq->setExtraData(*write_res);
883 }
884 }
885
886 bool do_access = true; // flag to suppress cache access
887
888 Request *memReq = cache_req->dataPkt->req;
889
890 if (cache_req->dataPkt->isWrite() && cache_req->memReq->isLLSC()) {
891 assert(cache_req->inst->isStoreConditional());
892 DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
893 do_access = TheISA::handleLockedWrite(cpu, memReq);
894 }
895
896 DPRINTF(InOrderCachePort,
897 "[tid:%i] [sn:%i] attempting to access cache\n",
898 tid, inst->seqNum);
899
900 if (do_access) {
901 if (!cachePort->sendTiming(cache_req->dataPkt)) {
902 DPRINTF(InOrderCachePort,
903 "[tid:%i] [sn:%i] cannot access cache, because port "
904 "is blocked. now waiting to retry request\n", tid,
905 inst->seqNum);
906 cache_req->setCompleted(false);
907 cachePortBlocked = true;
908 } else {
909 DPRINTF(InOrderCachePort,
910 "[tid:%i] [sn:%i] is now waiting for cache response\n",
911 tid, inst->seqNum);
912 cache_req->setCompleted();
913 cache_req->setMemAccPending();
914 cachePortBlocked = false;
915 }
916 } else if (!do_access && memReq->isLLSC()){
917 // Store-Conditional instructions complete even if they "failed"
918 assert(cache_req->inst->isStoreConditional());
919 cache_req->setCompleted(true);
920
921 DPRINTF(LLSC,
922 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
923 tid, tid);
924
925 processCacheCompletion(cache_req->dataPkt);
926 } else {
927 // Make cache request again since access due to
928 // inability to access
929 DPRINTF(InOrderStall, "STALL: \n");
930 cache_req->setCompleted(false);
931 }
932
933 return fault;
934 }
935
936 void
937 CacheUnit::processCacheCompletion(PacketPtr pkt)
938 {
939 // Cast to correct packet type
940 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
941
942 assert(cache_pkt);
943
944 if (cache_pkt->cacheReq->isSquashed()) {
945 DPRINTF(InOrderCachePort,
946 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
947 cache_pkt->cacheReq->getInst()->readTid(),
948 cache_pkt->cacheReq->getInst()->seqNum);
949 DPRINTF(RefCount,
950 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
951 cache_pkt->cacheReq->getTid(),
952 cache_pkt->cacheReq->seqNum);
953
954 cache_pkt->cacheReq->done();
955 delete cache_pkt;
956
957 cpu->wakeCPU();
958
959 return;
960 }
961
962 DPRINTF(InOrderCachePort,
963 "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
964 cache_pkt->cacheReq->getInst()->readTid(),
965 cache_pkt->cacheReq->getInst()->seqNum,
966 cache_pkt->cacheReq->getInst()->getMemAddr());
967
968 // Cast to correct request type
969 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
970 findSplitRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
971
972 if (!cache_req) {
973 warn(
974 "[tid:%u]: [sn:%i]: Can't find slot for cache access to addr. %08p\n",
975 cache_pkt->cacheReq->getInst()->readTid(),
976 cache_pkt->cacheReq->getInst()->seqNum,
977 cache_pkt->cacheReq->getInst()->getMemAddr());
978 }
979
980 assert(cache_req);
981
982
983 // Get resource request info
984 unsigned stage_num = cache_req->getStageNum();
985 DynInstPtr inst = cache_req->inst;
986 ThreadID tid = cache_req->inst->readTid();
987
988 if (!cache_req->isSquashed()) {
989 if (inst->resSched.top()->cmd == CompleteFetch) {
990 DPRINTF(InOrderCachePort,
991 "[tid:%u]: [sn:%i]: Processing fetch access\n",
992 tid, inst->seqNum);
993
994 // NOTE: This is only allowing a thread to fetch one line
995 // at a time. Re-examine when/if prefetching
996 // gets implemented.
997 //memcpy(fetchData[tid], cache_pkt->getPtr<uint8_t>(),
998 // cache_pkt->getSize());
999
1000 // Get the instruction from the array of the cache line.
1001 // @todo: update thsi
1002 ExtMachInst ext_inst;
1003 StaticInstPtr staticInst = NULL;
1004 Addr inst_pc = inst->readPC();
1005 MachInst mach_inst =
1006 TheISA::gtoh(*reinterpret_cast<TheISA::MachInst *>
1007 (cache_pkt->getPtr<uint8_t>()));
1008
1009 predecoder.setTC(cpu->thread[tid]->getTC());
1010 predecoder.moreBytes(inst_pc, inst_pc, mach_inst);
1011 ext_inst = predecoder.getExtMachInst();
1012
1013 inst->setMachInst(ext_inst);
1014
1015 // Set Up More TraceData info
1016 if (inst->traceData) {
1017 inst->traceData->setStaticInst(inst->staticInst);
1018 inst->traceData->setPC(inst->readPC());
1019 }
1020
1021 } else if (inst->staticInst && inst->isMemRef()) {
1022 DPRINTF(InOrderCachePort,
1023 "[tid:%u]: [sn:%i]: Processing cache access\n",
1024 tid, inst->seqNum);
1025
1026 if (inst->splitInst) {
1027 inst->splitFinishCnt++;
1028
1029 if (inst->splitFinishCnt == 2) {
1030 cache_req->memReq->setVirt(0/*inst->tid*/,
1031 inst->getMemAddr(),
1032 inst->splitTotalSize,
1033 0,
1034 0);
1035
1036 Packet split_pkt(cache_req->memReq, cache_req->pktCmd,
1037 Packet::Broadcast);
1038
1039
1040 if (inst->isLoad()) {
1041 split_pkt.dataStatic(inst->splitMemData);
1042 } else {
1043 split_pkt.dataStatic(&inst->storeData);
1044 }
1045
1046 inst->completeAcc(&split_pkt);
1047 }
1048 } else {
1049 inst->completeAcc(pkt);
1050 }
1051
1052 if (inst->isLoad()) {
1053 assert(cache_pkt->isRead());
1054
1055 if (cache_pkt->req->isLLSC()) {
1056 DPRINTF(InOrderCachePort,
1057 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
1058 tid, inst->seqNum);
1059 TheISA::handleLockedRead(cpu, cache_pkt->req);
1060 }
1061
1062 // @NOTE: Hardcoded to for load instructions. Assumes that
1063 // the dest. idx 0 is always where the data is loaded to.
1064 DPRINTF(InOrderCachePort,
1065 "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
1066 tid, inst->seqNum, inst->readIntResult(0));
1067 DPRINTF(InOrderCachePort,
1068 "[tid:%u]: [sn:%i]: FP Data loaded was: %08p\n",
1069 tid, inst->seqNum, inst->readFloatResult(0));
1070 } else if(inst->isStore()) {
1071 assert(cache_pkt->isWrite());
1072
1073 DPRINTF(InOrderCachePort,
1074 "[tid:%u]: [sn:%i]: Data stored was: FIX ME\n",
1075 tid, inst->seqNum/*,
1076 getMemData(cache_pkt)*/);
1077 }
1078
1079 delete cache_pkt;
1080 }
1081
1082 cache_req->setMemAccPending(false);
1083 cache_req->setMemAccCompleted();
1084
1085 if (cache_req->isMemStall() &&
1086 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
1087 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n", tid);
1088
1089 cpu->activateContext(tid);
1090
1091 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
1092 "miss.\n", tid);
1093 }
1094
1095 // Wake up the CPU (if it went to sleep and was waiting on this
1096 // completion event).
1097 cpu->wakeCPU();
1098
1099 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
1100 tid, cpu->pipelineStage[stage_num]->name());
1101
1102 cpu->switchToActive(stage_num);
1103 } else {
1104 DPRINTF(InOrderCachePort,
1105 "[tid:%u] Miss on block @ %08p completed, but squashed\n",
1106 tid, cache_req->inst->readPC());
1107 cache_req->setMemAccCompleted();
1108 }
1109
1110 inst->unsetMemAddr();
1111 }
1112
1113 void
1114 CacheUnit::recvRetry()
1115 {
1116 DPRINTF(InOrderCachePort, "Unblocking Cache Port. \n");
1117
1118 assert(cachePortBlocked);
1119
1120 // Clear the cache port for use again
1121 cachePortBlocked = false;
1122
1123 cpu->wakeCPU();
1124 }
1125
1126 CacheUnitEvent::CacheUnitEvent()
1127 : ResourceEvent()
1128 { }
1129
1130 void
1131 CacheUnitEvent::process()
1132 {
1133 DynInstPtr inst = resource->reqMap[slotIdx]->inst;
1134 int stage_num = resource->reqMap[slotIdx]->getStageNum();
1135 ThreadID tid = inst->threadNumber;
1136 CacheReqPtr req_ptr = dynamic_cast<CacheReqPtr>(resource->reqMap[slotIdx]);
1137
1138 DPRINTF(InOrderTLB, "Waking up from TLB Miss caused by [sn:%i].\n",
1139 inst->seqNum);
1140
1141 CacheUnit* tlb_res = dynamic_cast<CacheUnit*>(resource);
1142 assert(tlb_res);
1143
1144 tlb_res->tlbBlocked[tid] = false;
1145
1146 tlb_res->cpu->pipelineStage[stage_num]->
1147 unsetResStall(tlb_res->reqMap[slotIdx], tid);
1148
1149 req_ptr->tlbStall = false;
1150
1151 if (req_ptr->isSquashed()) {
1152 req_ptr->done();
1153 }
1154 }
1155
1156 void
1157 CacheUnit::squashDueToMemStall(DynInstPtr inst, int stage_num,
1158 InstSeqNum squash_seq_num, ThreadID tid)
1159 {
1160 // If squashing due to memory stall, then we do NOT want to
1161 // squash the instruction that caused the stall so we
1162 // increment the sequence number here to prevent that.
1163 //
1164 // NOTE: This is only for the SwitchOnCacheMiss Model
1165 // NOTE: If you have multiple outstanding misses from the same
1166 // thread then you need to reevaluate this code
1167 // NOTE: squash should originate from
1168 // pipeline_stage.cc:processInstSchedule
1169 DPRINTF(InOrderCachePort, "Squashing above [sn:%u]\n",
1170 squash_seq_num + 1);
1171
1172 squash(inst, stage_num, squash_seq_num + 1, tid);
1173 }
1174
1175
1176 void
1177 CacheUnit::squash(DynInstPtr inst, int stage_num,
1178 InstSeqNum squash_seq_num, ThreadID tid)
1179 {
1180 vector<int> slot_remove_list;
1181
1182 map<int, ResReqPtr>::iterator map_it = reqMap.begin();
1183 map<int, ResReqPtr>::iterator map_end = reqMap.end();
1184
1185 while (map_it != map_end) {
1186 ResReqPtr req_ptr = (*map_it).second;
1187
1188 if (req_ptr &&
1189 req_ptr->getInst()->readTid() == tid &&
1190 req_ptr->getInst()->seqNum > squash_seq_num) {
1191
1192 DPRINTF(InOrderCachePort,
1193 "[tid:%i] Squashing request from [sn:%i]\n",
1194 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
1195
1196 if (req_ptr->isSquashed()) {
1197 DPRINTF(AddrDep, "Request for [tid:%i] [sn:%i] already squashed, ignoring squash process.\n",
1198 req_ptr->getInst()->readTid(),
1199 req_ptr->getInst()->seqNum);
1200 map_it++;
1201 continue;
1202 }
1203
1204 req_ptr->setSquashed();
1205
1206 req_ptr->getInst()->setSquashed();
1207
1208 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
1209 assert(cache_req);
1210
1211 int req_slot_num = req_ptr->getSlot();
1212
1213 if (cache_req->tlbStall) {
1214 tlbBlocked[tid] = false;
1215
1216 int stall_stage = reqMap[req_slot_num]->getStageNum();
1217
1218 cpu->pipelineStage[stall_stage]->
1219 unsetResStall(reqMap[req_slot_num], tid);
1220 }
1221
1222 if (!cache_req->tlbStall && !cache_req->isMemAccPending()) {
1223 // Mark request for later removal
1224 cpu->reqRemoveList.push(req_ptr);
1225
1226 // Mark slot for removal from resource
1227 slot_remove_list.push_back(req_ptr->getSlot());
1228
1229 DPRINTF(InOrderCachePort,
1230 "[tid:%i] Squashing request from [sn:%i]\n",
1231 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
1232 } else {
1233 DPRINTF(InOrderCachePort,
1234 "[tid:%i] Request from [sn:%i] squashed, but still pending completion.\n",
1235 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
1236 DPRINTF(RefCount,
1237 "[tid:%i] Request from [sn:%i] squashed (split:%i), but still pending completion.\n",
1238 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum,
1239 req_ptr->getInst()->splitInst);
1240 }
1241
1242 if (req_ptr->getInst()->validMemAddr()) {
1243 DPRINTF(AddrDep, "Squash of [tid:%i] [sn:%i], attempting to remove addr. %08p dependencies.\n",
1244 req_ptr->getInst()->readTid(),
1245 req_ptr->getInst()->seqNum,
1246 req_ptr->getInst()->getMemAddr());
1247
1248 removeAddrDependency(req_ptr->getInst());
1249 }
1250
1251 }
1252
1253 map_it++;
1254 }
1255
1256 // Now Delete Slot Entry from Req. Map
1257 for (int i = 0; i < slot_remove_list.size(); i++)
1258 freeSlot(slot_remove_list[i]);
1259 }
1260
1261 uint64_t
1262 CacheUnit::getMemData(Packet *packet)
1263 {
1264 switch (packet->getSize())
1265 {
1266 case 8:
1267 return packet->get<uint8_t>();
1268
1269 case 16:
1270 return packet->get<uint16_t>();
1271
1272 case 32:
1273 return packet->get<uint32_t>();
1274
1275 case 64:
1276 return packet->get<uint64_t>();
1277
1278 default:
1279 panic("bad store data size = %d\n", packet->getSize());
1280 }
1281 }
1282
1283 // Extra Template Definitions
1284 #ifndef DOXYGEN_SHOULD_SKIP_THIS
1285
1286 template
1287 Fault
1288 CacheUnit::read(DynInstPtr inst, Addr addr, Twin32_t &data, unsigned flags);
1289
1290 template
1291 Fault
1292 CacheUnit::read(DynInstPtr inst, Addr addr, Twin64_t &data, unsigned flags);
1293
1294 template
1295 Fault
1296 CacheUnit::read(DynInstPtr inst, Addr addr, uint64_t &data, unsigned flags);
1297
1298 template
1299 Fault
1300 CacheUnit::read(DynInstPtr inst, Addr addr, uint32_t &data, unsigned flags);
1301
1302 template
1303 Fault
1304 CacheUnit::read(DynInstPtr inst, Addr addr, uint16_t &data, unsigned flags);
1305
1306 template
1307 Fault
1308 CacheUnit::read(DynInstPtr inst, Addr addr, uint8_t &data, unsigned flags);
1309
1310 #endif //DOXYGEN_SHOULD_SKIP_THIS
1311
1312 template<>
1313 Fault
1314 CacheUnit::read(DynInstPtr inst, Addr addr, double &data, unsigned flags)
1315 {
1316 return read(inst, addr, *(uint64_t*)&data, flags);
1317 }
1318
1319 template<>
1320 Fault
1321 CacheUnit::read(DynInstPtr inst, Addr addr, float &data, unsigned flags)
1322 {
1323 return read(inst, addr, *(uint32_t*)&data, flags);
1324 }
1325
1326
1327 template<>
1328 Fault
1329 CacheUnit::read(DynInstPtr inst, Addr addr, int32_t &data, unsigned flags)
1330 {
1331 return read(inst, addr, (uint32_t&)data, flags);
1332 }
1333
1334 #ifndef DOXYGEN_SHOULD_SKIP_THIS
1335
1336 template
1337 Fault
1338 CacheUnit::write(DynInstPtr inst, Twin32_t data, Addr addr,
1339 unsigned flags, uint64_t *res);
1340
1341 template
1342 Fault
1343 CacheUnit::write(DynInstPtr inst, Twin64_t data, Addr addr,
1344 unsigned flags, uint64_t *res);
1345
1346 template
1347 Fault
1348 CacheUnit::write(DynInstPtr inst, uint64_t data, Addr addr,
1349 unsigned flags, uint64_t *res);
1350
1351 template
1352 Fault
1353 CacheUnit::write(DynInstPtr inst, uint32_t data, Addr addr,
1354 unsigned flags, uint64_t *res);
1355
1356 template
1357 Fault
1358 CacheUnit::write(DynInstPtr inst, uint16_t data, Addr addr,
1359 unsigned flags, uint64_t *res);
1360
1361 template
1362 Fault
1363 CacheUnit::write(DynInstPtr inst, uint8_t data, Addr addr,
1364 unsigned flags, uint64_t *res);
1365
1366 #endif //DOXYGEN_SHOULD_SKIP_THIS
1367
1368 template<>
1369 Fault
1370 CacheUnit::write(DynInstPtr inst, double data, Addr addr, unsigned flags,
1371 uint64_t *res)
1372 {
1373 return write(inst, *(uint64_t*)&data, addr, flags, res);
1374 }
1375
1376 template<>
1377 Fault
1378 CacheUnit::write(DynInstPtr inst, float data, Addr addr, unsigned flags,
1379 uint64_t *res)
1380 {
1381 return write(inst, *(uint32_t*)&data, addr, flags, res);
1382 }
1383
1384
1385 template<>
1386 Fault
1387 CacheUnit::write(DynInstPtr inst, int32_t data, Addr addr, unsigned flags,
1388 uint64_t *res)
1389 {
1390 return write(inst, (uint32_t)data, addr, flags, res);
1391 }
1392