2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "config/use_checker.hh"
34 #include "cpu/o3/lsq.hh"
35 #include "cpu/o3/lsq_unit.hh"
36 #include "base/str.hh"
37 #include "mem/packet.hh"
38 #include "mem/request.hh"
41 #include "cpu/checker/cpu.hh"
45 LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
47 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
49 this->setFlags(Event::AutoDelete);
54 LSQUnit<Impl>::WritebackEvent::process()
56 if (!lsqPtr->isSwitchedOut()) {
57 lsqPtr->writeback(inst, pkt);
64 LSQUnit<Impl>::WritebackEvent::description()
66 return "Store writeback event";
71 LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
73 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
74 DynInstPtr inst = state->inst;
75 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
76 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
78 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
80 if (isSwitchedOut() || inst->isSquashed()) {
81 iewStage->decrWb(inst->seqNum);
90 if (inst->isStore()) {
91 completeStore(state->idx);
100 LSQUnit<Impl>::LSQUnit()
101 : loads(0), stores(0), storesToWB(0), stalled(false),
102 isStoreBlocked(false), isLoadBlocked(false),
103 loadBlockedHandled(false)
109 LSQUnit<Impl>::init(Params *params, LSQ *lsq_ptr, unsigned maxLQEntries,
110 unsigned maxSQEntries, unsigned id)
112 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
120 // Add 1 for the sentinel entry (they are circular queues).
121 LQEntries = maxLQEntries + 1;
122 SQEntries = maxSQEntries + 1;
124 loadQueue.resize(LQEntries);
125 storeQueue.resize(SQEntries);
127 loadHead = loadTail = 0;
129 storeHead = storeWBIdx = storeTail = 0;
132 cachePorts = params->cachePorts;
134 memDepViolator = NULL;
136 blockedLoadSeqNum = 0;
141 LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr)
147 cpu->checker->setDcachePort(dcachePort);
154 LSQUnit<Impl>::name() const
156 if (Impl::MaxThreads == 1) {
157 return iewStage->name() + ".lsq";
159 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
165 LSQUnit<Impl>::regStats()
168 .name(name() + ".forwLoads")
169 .desc("Number of loads that had data forwarded from stores");
172 .name(name() + ".invAddrLoads")
173 .desc("Number of loads ignored due to an invalid address");
176 .name(name() + ".squashedLoads")
177 .desc("Number of loads squashed");
180 .name(name() + ".ignoredResponses")
181 .desc("Number of memory responses ignored because the instruction is squashed");
184 .name(name() + ".squashedStores")
185 .desc("Number of stores squashed");
188 .name(name() + ".invAddrSwpfs")
189 .desc("Number of software prefetches ignored due to an invalid address");
192 .name(name() + ".blockedLoads")
193 .desc("Number of blocked loads due to partial load-store forwarding");
196 .name(name() + ".rescheduledLoads")
197 .desc("Number of loads that were rescheduled");
200 .name(name() + ".cacheBlocked")
201 .desc("Number of times an access to memory failed due to the cache being blocked");
206 LSQUnit<Impl>::clearLQ()
213 LSQUnit<Impl>::clearSQ()
220 LSQUnit<Impl>::switchOut()
223 for (int i = 0; i < loadQueue.size(); ++i)
226 assert(storesToWB == 0);
231 LSQUnit<Impl>::takeOverFrom()
234 loads = stores = storesToWB = 0;
236 loadHead = loadTail = 0;
238 storeHead = storeWBIdx = storeTail = 0;
242 memDepViolator = NULL;
244 blockedLoadSeqNum = 0;
247 isLoadBlocked = false;
248 loadBlockedHandled = false;
253 LSQUnit<Impl>::resizeLQ(unsigned size)
255 unsigned size_plus_sentinel = size + 1;
256 assert(size_plus_sentinel >= LQEntries);
258 if (size_plus_sentinel > LQEntries) {
259 while (size_plus_sentinel > loadQueue.size()) {
261 loadQueue.push_back(dummy);
265 LQEntries = size_plus_sentinel;
272 LSQUnit<Impl>::resizeSQ(unsigned size)
274 unsigned size_plus_sentinel = size + 1;
275 if (size_plus_sentinel > SQEntries) {
276 while (size_plus_sentinel > storeQueue.size()) {
278 storeQueue.push_back(dummy);
282 SQEntries = size_plus_sentinel;
286 template <class Impl>
288 LSQUnit<Impl>::insert(DynInstPtr &inst)
290 assert(inst->isMemRef());
292 assert(inst->isLoad() || inst->isStore());
294 if (inst->isLoad()) {
303 template <class Impl>
305 LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
307 assert((loadTail + 1) % LQEntries != loadHead);
308 assert(loads < LQEntries);
310 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
311 load_inst->readPC(), loadTail, load_inst->seqNum);
313 load_inst->lqIdx = loadTail;
316 load_inst->sqIdx = -1;
318 load_inst->sqIdx = storeTail;
321 loadQueue[loadTail] = load_inst;
328 template <class Impl>
330 LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
332 // Make sure it is not full before inserting an instruction.
333 assert((storeTail + 1) % SQEntries != storeHead);
334 assert(stores < SQEntries);
336 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
337 store_inst->readPC(), storeTail, store_inst->seqNum);
339 store_inst->sqIdx = storeTail;
340 store_inst->lqIdx = loadTail;
342 storeQueue[storeTail] = SQEntry(store_inst);
344 incrStIdx(storeTail);
349 template <class Impl>
350 typename Impl::DynInstPtr
351 LSQUnit<Impl>::getMemDepViolator()
353 DynInstPtr temp = memDepViolator;
355 memDepViolator = NULL;
360 template <class Impl>
362 LSQUnit<Impl>::numFreeEntries()
364 unsigned free_lq_entries = LQEntries - loads;
365 unsigned free_sq_entries = SQEntries - stores;
367 // Both the LQ and SQ entries have an extra dummy entry to differentiate
368 // empty/full conditions. Subtract 1 from the free entries.
369 if (free_lq_entries < free_sq_entries) {
370 return free_lq_entries - 1;
372 return free_sq_entries - 1;
376 template <class Impl>
378 LSQUnit<Impl>::numLoadsReady()
380 int load_idx = loadHead;
383 while (load_idx != loadTail) {
384 assert(loadQueue[load_idx]);
386 if (loadQueue[load_idx]->readyToIssue()) {
394 template <class Impl>
396 LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
398 // Execute a specific load.
399 Fault load_fault = NoFault;
401 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
402 inst->readPC(),inst->seqNum);
404 load_fault = inst->initiateAcc();
406 // If the instruction faulted, then we need to send it along to commit
407 // without the instruction completing.
408 if (load_fault != NoFault) {
409 // Send this instruction to commit, also make sure iew stage
410 // realizes there is activity.
411 iewStage->instToCommit(inst);
412 iewStage->activityThisCycle();
418 template <class Impl>
420 LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
422 using namespace TheISA;
423 // Make sure that a store exists.
426 int store_idx = store_inst->sqIdx;
428 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
429 store_inst->readPC(), store_inst->seqNum);
431 // Check the recently completed loads to see if any match this store's
432 // address. If so, then we have a memory ordering violation.
433 int load_idx = store_inst->lqIdx;
435 Fault store_fault = store_inst->initiateAcc();
437 if (storeQueue[store_idx].size == 0) {
438 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
439 store_inst->readPC(),store_inst->seqNum);
444 assert(store_fault == NoFault);
446 if (store_inst->isStoreConditional()) {
447 // Store conditionals need to set themselves as able to
448 // writeback if we haven't had a fault by here.
449 storeQueue[store_idx].canWB = true;
454 if (!memDepViolator) {
455 while (load_idx != loadTail) {
456 // Really only need to check loads that have actually executed
457 // It's safe to check all loads because effAddr is set to
458 // InvalAddr when the dyn inst is created.
460 // @todo: For now this is extra conservative, detecting a
461 // violation if the addresses match assuming all accesses
462 // are quad word accesses.
464 // @todo: Fix this, magic number being used here
465 if ((loadQueue[load_idx]->effAddr >> 8) ==
466 (store_inst->effAddr >> 8)) {
467 // A load incorrectly passed this store. Squash and refetch.
468 // For now return a fault to show that it was unsuccessful.
469 memDepViolator = loadQueue[load_idx];
471 return genMachineCheckFault();
477 // If we've reached this point, there was no violation.
478 memDepViolator = NULL;
484 template <class Impl>
486 LSQUnit<Impl>::commitLoad()
488 assert(loadQueue[loadHead]);
490 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
491 loadQueue[loadHead]->readPC());
493 loadQueue[loadHead] = NULL;
500 template <class Impl>
502 LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
504 assert(loads == 0 || loadQueue[loadHead]);
506 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
511 template <class Impl>
513 LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
515 assert(stores == 0 || storeQueue[storeHead].inst);
517 int store_idx = storeHead;
519 while (store_idx != storeTail) {
520 assert(storeQueue[store_idx].inst);
521 // Mark any stores that are now committed and have not yet
522 // been marked as able to write back.
523 if (!storeQueue[store_idx].canWB) {
524 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
527 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
529 storeQueue[store_idx].inst->readPC(),
530 storeQueue[store_idx].inst->seqNum);
532 storeQueue[store_idx].canWB = true;
537 incrStIdx(store_idx);
541 template <class Impl>
543 LSQUnit<Impl>::writebackStores()
545 while (storesToWB > 0 &&
546 storeWBIdx != storeTail &&
547 storeQueue[storeWBIdx].inst &&
548 storeQueue[storeWBIdx].canWB &&
549 usedPorts < cachePorts) {
551 if (isStoreBlocked || lsq->cacheBlocked()) {
552 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
557 // Store didn't write any data so no need to write it back to
559 if (storeQueue[storeWBIdx].size == 0) {
560 completeStore(storeWBIdx);
562 incrStIdx(storeWBIdx);
569 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
570 incrStIdx(storeWBIdx);
575 assert(storeQueue[storeWBIdx].req);
576 assert(!storeQueue[storeWBIdx].committed);
578 DynInstPtr inst = storeQueue[storeWBIdx].inst;
580 Request *req = storeQueue[storeWBIdx].req;
581 storeQueue[storeWBIdx].committed = true;
583 assert(!inst->memData);
584 inst->memData = new uint8_t[64];
585 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
588 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
589 data_pkt->dataStatic(inst->memData);
591 LSQSenderState *state = new LSQSenderState;
592 state->isLoad = false;
593 state->idx = storeWBIdx;
595 data_pkt->senderState = state;
597 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
598 "to Addr:%#x, data:%#x [sn:%lli]\n",
599 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
600 req->getPaddr(), *(inst->memData),
601 storeQueue[storeWBIdx].inst->seqNum);
603 // @todo: Remove this SC hack once the memory system handles it.
604 if (req->getFlags() & LOCKED) {
605 if (req->getFlags() & UNCACHEABLE) {
612 // Hack: Instantly complete this store.
613 completeDataAccess(data_pkt);
614 incrStIdx(storeWBIdx);
619 // Non-store conditionals do not need a writeback.
623 if (!dcachePort->sendTiming(data_pkt)) {
624 // Need to handle becoming blocked on a store.
625 isStoreBlocked = true;
627 assert(retryPkt == NULL);
630 storePostSend(data_pkt);
634 // Not sure this should set it to 0.
637 assert(stores >= 0 && storesToWB >= 0);
640 /*template <class Impl>
642 LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
644 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
648 if (mshr_it != mshrSeqNums.end()) {
649 mshrSeqNums.erase(mshr_it);
650 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
654 template <class Impl>
656 LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
658 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
659 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
661 int load_idx = loadTail;
664 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
665 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
667 loadQueue[load_idx]->readPC(),
668 loadQueue[load_idx]->seqNum);
670 if (isStalled() && load_idx == stallingLoadIdx) {
672 stallingStoreIsn = 0;
676 // Clear the smart pointer to make sure it is decremented.
677 loadQueue[load_idx]->setSquashed();
678 loadQueue[load_idx] = NULL;
689 if (squashed_num < blockedLoadSeqNum) {
690 isLoadBlocked = false;
691 loadBlockedHandled = false;
692 blockedLoadSeqNum = 0;
696 int store_idx = storeTail;
697 decrStIdx(store_idx);
699 while (stores != 0 &&
700 storeQueue[store_idx].inst->seqNum > squashed_num) {
701 // Instructions marked as can WB are already committed.
702 if (storeQueue[store_idx].canWB) {
706 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
707 "idx:%i [sn:%lli]\n",
708 storeQueue[store_idx].inst->readPC(),
709 store_idx, storeQueue[store_idx].inst->seqNum);
711 // I don't think this can happen. It should have been cleared
712 // by the stalling load.
714 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
715 panic("Is stalled should have been cleared by stalling load!\n");
717 stallingStoreIsn = 0;
720 // Clear the smart pointer to make sure it is decremented.
721 storeQueue[store_idx].inst->setSquashed();
722 storeQueue[store_idx].inst = NULL;
723 storeQueue[store_idx].canWB = 0;
725 storeQueue[store_idx].req = NULL;
729 storeTail = store_idx;
731 decrStIdx(store_idx);
736 template <class Impl>
738 LSQUnit<Impl>::storePostSend(Packet *pkt)
741 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
742 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
744 stallingStoreIsn, stallingLoadIdx);
746 stallingStoreIsn = 0;
747 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
750 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
751 // The store is basically completed at this time. This
752 // only works so long as the checker doesn't try to
753 // verify the value in memory for stores.
754 storeQueue[storeWBIdx].inst->setCompleted();
757 cpu->checker->verify(storeQueue[storeWBIdx].inst);
762 if (pkt->result != Packet::Success) {
763 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
766 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
767 storeQueue[storeWBIdx].inst->seqNum);
769 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
771 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
773 // @todo: Increment stat here.
775 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
778 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
779 storeQueue[storeWBIdx].inst->seqNum);
782 incrStIdx(storeWBIdx);
785 template <class Impl>
787 LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
791 // Squashed instructions do not need to complete their access.
792 if (inst->isSquashed()) {
793 iewStage->decrWb(inst->seqNum);
794 assert(!inst->isStore());
795 ++lsqIgnoredResponses;
799 if (!inst->isExecuted()) {
802 // Complete access to copy data to proper place.
803 inst->completeAcc(pkt);
806 // Need to insert instruction into queue to commit
807 iewStage->instToCommit(inst);
809 iewStage->activityThisCycle();
812 template <class Impl>
814 LSQUnit<Impl>::completeStore(int store_idx)
816 assert(storeQueue[store_idx].inst);
817 storeQueue[store_idx].completed = true;
819 // A bit conservative because a store completion may not free up entries,
820 // but hopefully avoids two store completions in one cycle from making
821 // the CPU tick twice.
822 cpu->activityThisCycle();
824 if (store_idx == storeHead) {
826 incrStIdx(storeHead);
829 } while (storeQueue[storeHead].completed &&
830 storeHead != storeTail);
832 iewStage->updateLSQNextCycle = true;
835 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
837 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
840 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
841 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
843 stallingStoreIsn, stallingLoadIdx);
845 stallingStoreIsn = 0;
846 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
849 storeQueue[store_idx].inst->setCompleted();
851 // Tell the checker we've completed this instruction. Some stores
852 // may get reported twice to the checker, but the checker can
856 cpu->checker->verify(storeQueue[store_idx].inst);
861 template <class Impl>
863 LSQUnit<Impl>::recvRetry()
865 if (isStoreBlocked) {
866 assert(retryPkt != NULL);
868 if (dcachePort->sendTiming(retryPkt)) {
869 storePostSend(retryPkt);
871 isStoreBlocked = false;
875 lsq->setRetryTid(lsqID);
877 } else if (isLoadBlocked) {
878 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
879 "no need to resend packet.\n");
881 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
885 template <class Impl>
887 LSQUnit<Impl>::incrStIdx(int &store_idx)
889 if (++store_idx >= SQEntries)
893 template <class Impl>
895 LSQUnit<Impl>::decrStIdx(int &store_idx)
898 store_idx += SQEntries;
901 template <class Impl>
903 LSQUnit<Impl>::incrLdIdx(int &load_idx)
905 if (++load_idx >= LQEntries)
909 template <class Impl>
911 LSQUnit<Impl>::decrLdIdx(int &load_idx)
914 load_idx += LQEntries;
917 template <class Impl>
919 LSQUnit<Impl>::dumpInsts()
921 cprintf("Load store queue: Dumping instructions.\n");
922 cprintf("Load queue size: %i\n", loads);
923 cprintf("Load queue: ");
925 int load_idx = loadHead;
927 while (load_idx != loadTail && loadQueue[load_idx]) {
928 cprintf("%#x ", loadQueue[load_idx]->readPC());
933 cprintf("Store queue size: %i\n", stores);
934 cprintf("Store queue: ");
936 int store_idx = storeHead;
938 while (store_idx != storeTail && storeQueue[store_idx].inst) {
939 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
941 incrStIdx(store_idx);