Merge zeep.eecs.umich.edu:/home/gblack/m5/newmem
[gem5.git] / src / cpu / o3 / lsq_unit_impl.hh
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32 #include "config/use_checker.hh"
33
34 #include "cpu/o3/lsq.hh"
35 #include "cpu/o3/lsq_unit.hh"
36 #include "base/str.hh"
37 #include "mem/packet.hh"
38 #include "mem/request.hh"
39
40 #if USE_CHECKER
41 #include "cpu/checker/cpu.hh"
42 #endif
43
44 template<class Impl>
45 LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
46 LSQUnit *lsq_ptr)
47 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
48 {
49 this->setFlags(Event::AutoDelete);
50 }
51
52 template<class Impl>
53 void
54 LSQUnit<Impl>::WritebackEvent::process()
55 {
56 if (!lsqPtr->isSwitchedOut()) {
57 lsqPtr->writeback(inst, pkt);
58 }
59 delete pkt;
60 }
61
62 template<class Impl>
63 const char *
64 LSQUnit<Impl>::WritebackEvent::description()
65 {
66 return "Store writeback event";
67 }
68
69 template<class Impl>
70 void
71 LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
72 {
73 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
74 DynInstPtr inst = state->inst;
75 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
76 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
77
78 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
79
80 if (isSwitchedOut() || inst->isSquashed()) {
81 iewStage->decrWb(inst->seqNum);
82 delete state;
83 delete pkt;
84 return;
85 } else {
86 if (!state->noWB) {
87 writeback(inst, pkt);
88 }
89
90 if (inst->isStore()) {
91 completeStore(state->idx);
92 }
93 }
94
95 delete state;
96 delete pkt;
97 }
98
99 template <class Impl>
100 LSQUnit<Impl>::LSQUnit()
101 : loads(0), stores(0), storesToWB(0), stalled(false),
102 isStoreBlocked(false), isLoadBlocked(false),
103 loadBlockedHandled(false)
104 {
105 }
106
107 template<class Impl>
108 void
109 LSQUnit<Impl>::init(Params *params, LSQ *lsq_ptr, unsigned maxLQEntries,
110 unsigned maxSQEntries, unsigned id)
111 {
112 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
113
114 switchedOut = false;
115
116 lsq = lsq_ptr;
117
118 lsqID = id;
119
120 // Add 1 for the sentinel entry (they are circular queues).
121 LQEntries = maxLQEntries + 1;
122 SQEntries = maxSQEntries + 1;
123
124 loadQueue.resize(LQEntries);
125 storeQueue.resize(SQEntries);
126
127 loadHead = loadTail = 0;
128
129 storeHead = storeWBIdx = storeTail = 0;
130
131 usedPorts = 0;
132 cachePorts = params->cachePorts;
133
134 retryPkt = NULL;
135 memDepViolator = NULL;
136
137 blockedLoadSeqNum = 0;
138 }
139
140 template<class Impl>
141 void
142 LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr)
143 {
144 cpu = cpu_ptr;
145
146 #if USE_CHECKER
147 if (cpu->checker) {
148 cpu->checker->setDcachePort(dcachePort);
149 }
150 #endif
151 }
152
153 template<class Impl>
154 std::string
155 LSQUnit<Impl>::name() const
156 {
157 if (Impl::MaxThreads == 1) {
158 return iewStage->name() + ".lsq";
159 } else {
160 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
161 }
162 }
163
164 template<class Impl>
165 void
166 LSQUnit<Impl>::regStats()
167 {
168 lsqForwLoads
169 .name(name() + ".forwLoads")
170 .desc("Number of loads that had data forwarded from stores");
171
172 invAddrLoads
173 .name(name() + ".invAddrLoads")
174 .desc("Number of loads ignored due to an invalid address");
175
176 lsqSquashedLoads
177 .name(name() + ".squashedLoads")
178 .desc("Number of loads squashed");
179
180 lsqIgnoredResponses
181 .name(name() + ".ignoredResponses")
182 .desc("Number of memory responses ignored because the instruction is squashed");
183
184 lsqMemOrderViolation
185 .name(name() + ".memOrderViolation")
186 .desc("Number of memory ordering violations");
187
188 lsqSquashedStores
189 .name(name() + ".squashedStores")
190 .desc("Number of stores squashed");
191
192 invAddrSwpfs
193 .name(name() + ".invAddrSwpfs")
194 .desc("Number of software prefetches ignored due to an invalid address");
195
196 lsqBlockedLoads
197 .name(name() + ".blockedLoads")
198 .desc("Number of blocked loads due to partial load-store forwarding");
199
200 lsqRescheduledLoads
201 .name(name() + ".rescheduledLoads")
202 .desc("Number of loads that were rescheduled");
203
204 lsqCacheBlocked
205 .name(name() + ".cacheBlocked")
206 .desc("Number of times an access to memory failed due to the cache being blocked");
207 }
208
209 template<class Impl>
210 void
211 LSQUnit<Impl>::clearLQ()
212 {
213 loadQueue.clear();
214 }
215
216 template<class Impl>
217 void
218 LSQUnit<Impl>::clearSQ()
219 {
220 storeQueue.clear();
221 }
222
223 template<class Impl>
224 void
225 LSQUnit<Impl>::switchOut()
226 {
227 switchedOut = true;
228 for (int i = 0; i < loadQueue.size(); ++i) {
229 assert(!loadQueue[i]);
230 loadQueue[i] = NULL;
231 }
232
233 assert(storesToWB == 0);
234 }
235
236 template<class Impl>
237 void
238 LSQUnit<Impl>::takeOverFrom()
239 {
240 switchedOut = false;
241 loads = stores = storesToWB = 0;
242
243 loadHead = loadTail = 0;
244
245 storeHead = storeWBIdx = storeTail = 0;
246
247 usedPorts = 0;
248
249 memDepViolator = NULL;
250
251 blockedLoadSeqNum = 0;
252
253 stalled = false;
254 isLoadBlocked = false;
255 loadBlockedHandled = false;
256 }
257
258 template<class Impl>
259 void
260 LSQUnit<Impl>::resizeLQ(unsigned size)
261 {
262 unsigned size_plus_sentinel = size + 1;
263 assert(size_plus_sentinel >= LQEntries);
264
265 if (size_plus_sentinel > LQEntries) {
266 while (size_plus_sentinel > loadQueue.size()) {
267 DynInstPtr dummy;
268 loadQueue.push_back(dummy);
269 LQEntries++;
270 }
271 } else {
272 LQEntries = size_plus_sentinel;
273 }
274
275 }
276
277 template<class Impl>
278 void
279 LSQUnit<Impl>::resizeSQ(unsigned size)
280 {
281 unsigned size_plus_sentinel = size + 1;
282 if (size_plus_sentinel > SQEntries) {
283 while (size_plus_sentinel > storeQueue.size()) {
284 SQEntry dummy;
285 storeQueue.push_back(dummy);
286 SQEntries++;
287 }
288 } else {
289 SQEntries = size_plus_sentinel;
290 }
291 }
292
293 template <class Impl>
294 void
295 LSQUnit<Impl>::insert(DynInstPtr &inst)
296 {
297 assert(inst->isMemRef());
298
299 assert(inst->isLoad() || inst->isStore());
300
301 if (inst->isLoad()) {
302 insertLoad(inst);
303 } else {
304 insertStore(inst);
305 }
306
307 inst->setInLSQ();
308 }
309
310 template <class Impl>
311 void
312 LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
313 {
314 assert((loadTail + 1) % LQEntries != loadHead);
315 assert(loads < LQEntries);
316
317 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
318 load_inst->readPC(), loadTail, load_inst->seqNum);
319
320 load_inst->lqIdx = loadTail;
321
322 if (stores == 0) {
323 load_inst->sqIdx = -1;
324 } else {
325 load_inst->sqIdx = storeTail;
326 }
327
328 loadQueue[loadTail] = load_inst;
329
330 incrLdIdx(loadTail);
331
332 ++loads;
333 }
334
335 template <class Impl>
336 void
337 LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
338 {
339 // Make sure it is not full before inserting an instruction.
340 assert((storeTail + 1) % SQEntries != storeHead);
341 assert(stores < SQEntries);
342
343 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
344 store_inst->readPC(), storeTail, store_inst->seqNum);
345
346 store_inst->sqIdx = storeTail;
347 store_inst->lqIdx = loadTail;
348
349 storeQueue[storeTail] = SQEntry(store_inst);
350
351 incrStIdx(storeTail);
352
353 ++stores;
354 }
355
356 template <class Impl>
357 typename Impl::DynInstPtr
358 LSQUnit<Impl>::getMemDepViolator()
359 {
360 DynInstPtr temp = memDepViolator;
361
362 memDepViolator = NULL;
363
364 return temp;
365 }
366
367 template <class Impl>
368 unsigned
369 LSQUnit<Impl>::numFreeEntries()
370 {
371 unsigned free_lq_entries = LQEntries - loads;
372 unsigned free_sq_entries = SQEntries - stores;
373
374 // Both the LQ and SQ entries have an extra dummy entry to differentiate
375 // empty/full conditions. Subtract 1 from the free entries.
376 if (free_lq_entries < free_sq_entries) {
377 return free_lq_entries - 1;
378 } else {
379 return free_sq_entries - 1;
380 }
381 }
382
383 template <class Impl>
384 int
385 LSQUnit<Impl>::numLoadsReady()
386 {
387 int load_idx = loadHead;
388 int retval = 0;
389
390 while (load_idx != loadTail) {
391 assert(loadQueue[load_idx]);
392
393 if (loadQueue[load_idx]->readyToIssue()) {
394 ++retval;
395 }
396 }
397
398 return retval;
399 }
400
401 template <class Impl>
402 Fault
403 LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
404 {
405 // Execute a specific load.
406 Fault load_fault = NoFault;
407
408 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
409 inst->readPC(),inst->seqNum);
410
411 load_fault = inst->initiateAcc();
412
413 // If the instruction faulted, then we need to send it along to commit
414 // without the instruction completing.
415 if (load_fault != NoFault) {
416 // Send this instruction to commit, also make sure iew stage
417 // realizes there is activity.
418 // Mark it as executed unless it is an uncached load that
419 // needs to hit the head of commit.
420 if (!(inst->req->isUncacheable()) || inst->isAtCommit()) {
421 inst->setExecuted();
422 }
423 iewStage->instToCommit(inst);
424 iewStage->activityThisCycle();
425 }
426
427 return load_fault;
428 }
429
430 template <class Impl>
431 Fault
432 LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
433 {
434 using namespace TheISA;
435 // Make sure that a store exists.
436 assert(stores != 0);
437
438 int store_idx = store_inst->sqIdx;
439
440 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
441 store_inst->readPC(), store_inst->seqNum);
442
443 // Check the recently completed loads to see if any match this store's
444 // address. If so, then we have a memory ordering violation.
445 int load_idx = store_inst->lqIdx;
446
447 Fault store_fault = store_inst->initiateAcc();
448
449 if (storeQueue[store_idx].size == 0) {
450 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
451 store_inst->readPC(),store_inst->seqNum);
452
453 return store_fault;
454 }
455
456 assert(store_fault == NoFault);
457
458 if (store_inst->isStoreConditional()) {
459 // Store conditionals need to set themselves as able to
460 // writeback if we haven't had a fault by here.
461 storeQueue[store_idx].canWB = true;
462
463 ++storesToWB;
464 }
465
466 if (!memDepViolator) {
467 while (load_idx != loadTail) {
468 // Really only need to check loads that have actually executed
469 // It's safe to check all loads because effAddr is set to
470 // InvalAddr when the dyn inst is created.
471
472 // @todo: For now this is extra conservative, detecting a
473 // violation if the addresses match assuming all accesses
474 // are quad word accesses.
475
476 // @todo: Fix this, magic number being used here
477 if ((loadQueue[load_idx]->effAddr >> 8) ==
478 (store_inst->effAddr >> 8)) {
479 // A load incorrectly passed this store. Squash and refetch.
480 // For now return a fault to show that it was unsuccessful.
481 memDepViolator = loadQueue[load_idx];
482 ++lsqMemOrderViolation;
483
484 return genMachineCheckFault();
485 }
486
487 incrLdIdx(load_idx);
488 }
489
490 // If we've reached this point, there was no violation.
491 memDepViolator = NULL;
492 }
493
494 return store_fault;
495 }
496
497 template <class Impl>
498 void
499 LSQUnit<Impl>::commitLoad()
500 {
501 assert(loadQueue[loadHead]);
502
503 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
504 loadQueue[loadHead]->readPC());
505
506 loadQueue[loadHead] = NULL;
507
508 incrLdIdx(loadHead);
509
510 --loads;
511 }
512
513 template <class Impl>
514 void
515 LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
516 {
517 assert(loads == 0 || loadQueue[loadHead]);
518
519 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
520 commitLoad();
521 }
522 }
523
524 template <class Impl>
525 void
526 LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
527 {
528 assert(stores == 0 || storeQueue[storeHead].inst);
529
530 int store_idx = storeHead;
531
532 while (store_idx != storeTail) {
533 assert(storeQueue[store_idx].inst);
534 // Mark any stores that are now committed and have not yet
535 // been marked as able to write back.
536 if (!storeQueue[store_idx].canWB) {
537 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
538 break;
539 }
540 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
541 "%#x [sn:%lli]\n",
542 storeQueue[store_idx].inst->readPC(),
543 storeQueue[store_idx].inst->seqNum);
544
545 storeQueue[store_idx].canWB = true;
546
547 ++storesToWB;
548 }
549
550 incrStIdx(store_idx);
551 }
552 }
553
554 template <class Impl>
555 void
556 LSQUnit<Impl>::writebackStores()
557 {
558 while (storesToWB > 0 &&
559 storeWBIdx != storeTail &&
560 storeQueue[storeWBIdx].inst &&
561 storeQueue[storeWBIdx].canWB &&
562 usedPorts < cachePorts) {
563
564 if (isStoreBlocked || lsq->cacheBlocked()) {
565 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
566 " is blocked!\n");
567 break;
568 }
569
570 // Store didn't write any data so no need to write it back to
571 // memory.
572 if (storeQueue[storeWBIdx].size == 0) {
573 completeStore(storeWBIdx);
574
575 incrStIdx(storeWBIdx);
576
577 continue;
578 }
579
580 ++usedPorts;
581
582 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
583 incrStIdx(storeWBIdx);
584
585 continue;
586 }
587
588 assert(storeQueue[storeWBIdx].req);
589 assert(!storeQueue[storeWBIdx].committed);
590
591 DynInstPtr inst = storeQueue[storeWBIdx].inst;
592
593 Request *req = storeQueue[storeWBIdx].req;
594 storeQueue[storeWBIdx].committed = true;
595
596 assert(!inst->memData);
597 inst->memData = new uint8_t[64];
598 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
599 req->getSize());
600
601 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
602 data_pkt->dataStatic(inst->memData);
603
604 LSQSenderState *state = new LSQSenderState;
605 state->isLoad = false;
606 state->idx = storeWBIdx;
607 state->inst = inst;
608 data_pkt->senderState = state;
609
610 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
611 "to Addr:%#x, data:%#x [sn:%lli]\n",
612 storeWBIdx, inst->readPC(),
613 req->getPaddr(), *(inst->memData),
614 inst->seqNum);
615
616 // @todo: Remove this SC hack once the memory system handles it.
617 if (req->isLocked()) {
618 if (req->isUncacheable()) {
619 req->setScResult(2);
620 } else {
621 if (cpu->lockFlag) {
622 req->setScResult(1);
623 DPRINTF(LSQUnit, "Store conditional [sn:%lli] succeeded.",
624 inst->seqNum);
625 } else {
626 req->setScResult(0);
627 // Hack: Instantly complete this store.
628 // completeDataAccess(data_pkt);
629 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
630 "Instantly completing it.\n",
631 inst->seqNum);
632 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
633 wb->schedule(curTick + 1);
634 delete state;
635 completeStore(storeWBIdx);
636 incrStIdx(storeWBIdx);
637 continue;
638 }
639 }
640 } else {
641 // Non-store conditionals do not need a writeback.
642 state->noWB = true;
643 }
644
645 if (!dcachePort->sendTiming(data_pkt)) {
646 if (data_pkt->result == Packet::BadAddress) {
647 panic("LSQ sent out a bad address for a completed store!");
648 }
649 // Need to handle becoming blocked on a store.
650 DPRINTF(IEW, "D-Cache became blcoked when writing [sn:%lli], will"
651 "retry later\n",
652 inst->seqNum);
653 isStoreBlocked = true;
654 ++lsqCacheBlocked;
655 assert(retryPkt == NULL);
656 retryPkt = data_pkt;
657 lsq->setRetryTid(lsqID);
658 } else {
659 storePostSend(data_pkt);
660 }
661 }
662
663 // Not sure this should set it to 0.
664 usedPorts = 0;
665
666 assert(stores >= 0 && storesToWB >= 0);
667 }
668
669 /*template <class Impl>
670 void
671 LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
672 {
673 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
674 mshrSeqNums.end(),
675 seqNum);
676
677 if (mshr_it != mshrSeqNums.end()) {
678 mshrSeqNums.erase(mshr_it);
679 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
680 }
681 }*/
682
683 template <class Impl>
684 void
685 LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
686 {
687 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
688 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
689
690 int load_idx = loadTail;
691 decrLdIdx(load_idx);
692
693 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
694 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
695 "[sn:%lli]\n",
696 loadQueue[load_idx]->readPC(),
697 loadQueue[load_idx]->seqNum);
698
699 if (isStalled() && load_idx == stallingLoadIdx) {
700 stalled = false;
701 stallingStoreIsn = 0;
702 stallingLoadIdx = 0;
703 }
704
705 // Clear the smart pointer to make sure it is decremented.
706 loadQueue[load_idx]->setSquashed();
707 loadQueue[load_idx] = NULL;
708 --loads;
709
710 // Inefficient!
711 loadTail = load_idx;
712
713 decrLdIdx(load_idx);
714 ++lsqSquashedLoads;
715 }
716
717 if (isLoadBlocked) {
718 if (squashed_num < blockedLoadSeqNum) {
719 isLoadBlocked = false;
720 loadBlockedHandled = false;
721 blockedLoadSeqNum = 0;
722 }
723 }
724
725 int store_idx = storeTail;
726 decrStIdx(store_idx);
727
728 while (stores != 0 &&
729 storeQueue[store_idx].inst->seqNum > squashed_num) {
730 // Instructions marked as can WB are already committed.
731 if (storeQueue[store_idx].canWB) {
732 break;
733 }
734
735 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
736 "idx:%i [sn:%lli]\n",
737 storeQueue[store_idx].inst->readPC(),
738 store_idx, storeQueue[store_idx].inst->seqNum);
739
740 // I don't think this can happen. It should have been cleared
741 // by the stalling load.
742 if (isStalled() &&
743 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
744 panic("Is stalled should have been cleared by stalling load!\n");
745 stalled = false;
746 stallingStoreIsn = 0;
747 }
748
749 // Clear the smart pointer to make sure it is decremented.
750 storeQueue[store_idx].inst->setSquashed();
751 storeQueue[store_idx].inst = NULL;
752 storeQueue[store_idx].canWB = 0;
753
754 storeQueue[store_idx].req = NULL;
755 --stores;
756
757 // Inefficient!
758 storeTail = store_idx;
759
760 decrStIdx(store_idx);
761 ++lsqSquashedStores;
762 }
763 }
764
765 template <class Impl>
766 void
767 LSQUnit<Impl>::storePostSend(PacketPtr pkt)
768 {
769 if (isStalled() &&
770 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
771 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
772 "load idx:%i\n",
773 stallingStoreIsn, stallingLoadIdx);
774 stalled = false;
775 stallingStoreIsn = 0;
776 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
777 }
778
779 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
780 // The store is basically completed at this time. This
781 // only works so long as the checker doesn't try to
782 // verify the value in memory for stores.
783 storeQueue[storeWBIdx].inst->setCompleted();
784 #if USE_CHECKER
785 if (cpu->checker) {
786 cpu->checker->verify(storeQueue[storeWBIdx].inst);
787 }
788 #endif
789 }
790
791 if (pkt->result != Packet::Success) {
792 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
793 storeWBIdx);
794
795 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
796 storeQueue[storeWBIdx].inst->seqNum);
797
798 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
799
800 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
801
802 // @todo: Increment stat here.
803 } else {
804 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
805 storeWBIdx);
806
807 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
808 storeQueue[storeWBIdx].inst->seqNum);
809 }
810
811 incrStIdx(storeWBIdx);
812 }
813
814 template <class Impl>
815 void
816 LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
817 {
818 iewStage->wakeCPU();
819
820 // Squashed instructions do not need to complete their access.
821 if (inst->isSquashed()) {
822 iewStage->decrWb(inst->seqNum);
823 assert(!inst->isStore());
824 ++lsqIgnoredResponses;
825 return;
826 }
827
828 if (!inst->isExecuted()) {
829 inst->setExecuted();
830
831 // Complete access to copy data to proper place.
832 inst->completeAcc(pkt);
833 }
834
835 // Need to insert instruction into queue to commit
836 iewStage->instToCommit(inst);
837
838 iewStage->activityThisCycle();
839 }
840
841 template <class Impl>
842 void
843 LSQUnit<Impl>::completeStore(int store_idx)
844 {
845 assert(storeQueue[store_idx].inst);
846 storeQueue[store_idx].completed = true;
847 --storesToWB;
848 // A bit conservative because a store completion may not free up entries,
849 // but hopefully avoids two store completions in one cycle from making
850 // the CPU tick twice.
851 cpu->wakeCPU();
852 cpu->activityThisCycle();
853
854 if (store_idx == storeHead) {
855 do {
856 incrStIdx(storeHead);
857
858 --stores;
859 } while (storeQueue[storeHead].completed &&
860 storeHead != storeTail);
861
862 iewStage->updateLSQNextCycle = true;
863 }
864
865 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
866 "idx:%i\n",
867 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
868
869 if (isStalled() &&
870 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
871 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
872 "load idx:%i\n",
873 stallingStoreIsn, stallingLoadIdx);
874 stalled = false;
875 stallingStoreIsn = 0;
876 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
877 }
878
879 storeQueue[store_idx].inst->setCompleted();
880
881 // Tell the checker we've completed this instruction. Some stores
882 // may get reported twice to the checker, but the checker can
883 // handle that case.
884 #if USE_CHECKER
885 if (cpu->checker) {
886 cpu->checker->verify(storeQueue[store_idx].inst);
887 }
888 #endif
889 }
890
891 template <class Impl>
892 void
893 LSQUnit<Impl>::recvRetry()
894 {
895 if (isStoreBlocked) {
896 assert(retryPkt != NULL);
897
898 if (dcachePort->sendTiming(retryPkt)) {
899 if (retryPkt->result == Packet::BadAddress) {
900 panic("LSQ sent out a bad address for a completed store!");
901 }
902 storePostSend(retryPkt);
903 retryPkt = NULL;
904 isStoreBlocked = false;
905 lsq->setRetryTid(-1);
906 } else {
907 // Still blocked!
908 ++lsqCacheBlocked;
909 lsq->setRetryTid(lsqID);
910 }
911 } else if (isLoadBlocked) {
912 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
913 "no need to resend packet.\n");
914 } else {
915 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
916 }
917 }
918
919 template <class Impl>
920 inline void
921 LSQUnit<Impl>::incrStIdx(int &store_idx)
922 {
923 if (++store_idx >= SQEntries)
924 store_idx = 0;
925 }
926
927 template <class Impl>
928 inline void
929 LSQUnit<Impl>::decrStIdx(int &store_idx)
930 {
931 if (--store_idx < 0)
932 store_idx += SQEntries;
933 }
934
935 template <class Impl>
936 inline void
937 LSQUnit<Impl>::incrLdIdx(int &load_idx)
938 {
939 if (++load_idx >= LQEntries)
940 load_idx = 0;
941 }
942
943 template <class Impl>
944 inline void
945 LSQUnit<Impl>::decrLdIdx(int &load_idx)
946 {
947 if (--load_idx < 0)
948 load_idx += LQEntries;
949 }
950
951 template <class Impl>
952 void
953 LSQUnit<Impl>::dumpInsts()
954 {
955 cprintf("Load store queue: Dumping instructions.\n");
956 cprintf("Load queue size: %i\n", loads);
957 cprintf("Load queue: ");
958
959 int load_idx = loadHead;
960
961 while (load_idx != loadTail && loadQueue[load_idx]) {
962 cprintf("%#x ", loadQueue[load_idx]->readPC());
963
964 incrLdIdx(load_idx);
965 }
966
967 cprintf("Store queue size: %i\n", stores);
968 cprintf("Store queue: ");
969
970 int store_idx = storeHead;
971
972 while (store_idx != storeTail && storeQueue[store_idx].inst) {
973 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
974
975 incrStIdx(store_idx);
976 }
977
978 cprintf("\n");
979 }