cpu-o3: Add cache read ports limit to LSQ
[gem5.git] / src / cpu / o3 / lsq_unit_impl.hh
1
2 /*
3 * Copyright (c) 2010-2014, 2017-2018 ARM Limited
4 * Copyright (c) 2013 Advanced Micro Devices, Inc.
5 * All rights reserved
6 *
7 * The license below extends only to copyright in the software and shall
8 * not be construed as granting a license to any other intellectual
9 * property including but not limited to intellectual property relating
10 * to a hardware implementation of the functionality of the software
11 * licensed hereunder. You may use the software subject to the license
12 * terms below provided that you ensure that this notice is replicated
13 * unmodified and in its entirety in all distributions of the software,
14 * modified or unmodified, in source code or in binary form.
15 *
16 * Copyright (c) 2004-2005 The Regents of The University of Michigan
17 * All rights reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are
21 * met: redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer;
23 * redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution;
26 * neither the name of the copyright holders nor the names of its
27 * contributors may be used to endorse or promote products derived from
28 * this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Authors: Kevin Lim
43 * Korey Sewell
44 */
45
46 #ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
47 #define __CPU_O3_LSQ_UNIT_IMPL_HH__
48
49 #include "arch/generic/debugfaults.hh"
50 #include "arch/locked_mem.hh"
51 #include "base/str.hh"
52 #include "config/the_isa.hh"
53 #include "cpu/checker/cpu.hh"
54 #include "cpu/o3/lsq.hh"
55 #include "cpu/o3/lsq_unit.hh"
56 #include "debug/Activity.hh"
57 #include "debug/IEW.hh"
58 #include "debug/LSQUnit.hh"
59 #include "debug/O3PipeView.hh"
60 #include "mem/packet.hh"
61 #include "mem/request.hh"
62
63 template<class Impl>
64 LSQUnit<Impl>::WritebackEvent::WritebackEvent(const DynInstPtr &_inst,
65 PacketPtr _pkt, LSQUnit *lsq_ptr)
66 : Event(Default_Pri, AutoDelete),
67 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
68 {
69 assert(_inst->savedReq);
70 _inst->savedReq->writebackScheduled();
71 }
72
73 template<class Impl>
74 void
75 LSQUnit<Impl>::WritebackEvent::process()
76 {
77 assert(!lsqPtr->cpu->switchedOut());
78
79 lsqPtr->writeback(inst, pkt);
80
81 assert(inst->savedReq);
82 inst->savedReq->writebackDone();
83 delete pkt;
84 }
85
86 template<class Impl>
87 const char *
88 LSQUnit<Impl>::WritebackEvent::description() const
89 {
90 return "Store writeback";
91 }
92
93 template <class Impl>
94 bool
95 LSQUnit<Impl>::recvTimingResp(PacketPtr pkt)
96 {
97 auto senderState = dynamic_cast<LSQSenderState*>(pkt->senderState);
98 LSQRequest* req = senderState->request();
99 assert(req != nullptr);
100 bool ret = true;
101 /* Check that the request is still alive before any further action. */
102 if (senderState->alive()) {
103 ret = req->recvTimingResp(pkt);
104 } else {
105 senderState->outstanding--;
106 }
107 return ret;
108
109 }
110
111 template<class Impl>
112 void
113 LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
114 {
115 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
116 DynInstPtr inst = state->inst;
117
118 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
119
120 /* Notify the sender state that the access is complete (for ownership
121 * tracking). */
122 state->complete();
123
124 assert(!cpu->switchedOut());
125 if (!inst->isSquashed()) {
126 if (state->needWB) {
127 // Only loads, store conditionals and atomics perform the writeback
128 // after receving the response from the memory
129 assert(inst->isLoad() || inst->isStoreConditional() ||
130 inst->isAtomic());
131 writeback(inst, state->request()->mainPacket());
132 if (inst->isStore() || inst->isAtomic()) {
133 auto ss = dynamic_cast<SQSenderState*>(state);
134 ss->writebackDone();
135 completeStore(ss->idx);
136 }
137 } else if (inst->isStore()) {
138 // This is a regular store (i.e., not store conditionals and
139 // atomics), so it can complete without writing back
140 completeStore(dynamic_cast<SQSenderState*>(state)->idx);
141 }
142 }
143 }
144
145 template <class Impl>
146 LSQUnit<Impl>::LSQUnit(uint32_t lqEntries, uint32_t sqEntries)
147 : lsqID(-1), storeQueue(sqEntries+1), loadQueue(lqEntries+1),
148 loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
149 isStoreBlocked(false), storeInFlight(false), hasPendingRequest(false),
150 pendingRequest(nullptr)
151 {
152 }
153
154 template<class Impl>
155 void
156 LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
157 LSQ *lsq_ptr, unsigned id)
158 {
159 lsqID = id;
160
161 cpu = cpu_ptr;
162 iewStage = iew_ptr;
163
164 lsq = lsq_ptr;
165
166 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",lsqID);
167
168 depCheckShift = params->LSQDepCheckShift;
169 checkLoads = params->LSQCheckLoads;
170 needsTSO = params->needsTSO;
171
172 resetState();
173 }
174
175
176 template<class Impl>
177 void
178 LSQUnit<Impl>::resetState()
179 {
180 loads = stores = storesToWB = 0;
181
182
183 storeWBIt = storeQueue.begin();
184
185 retryPkt = NULL;
186 memDepViolator = NULL;
187
188 stalled = false;
189
190 cacheBlockMask = ~(cpu->cacheLineSize() - 1);
191 }
192
193 template<class Impl>
194 std::string
195 LSQUnit<Impl>::name() const
196 {
197 if (Impl::MaxThreads == 1) {
198 return iewStage->name() + ".lsq";
199 } else {
200 return iewStage->name() + ".lsq.thread" + std::to_string(lsqID);
201 }
202 }
203
204 template<class Impl>
205 void
206 LSQUnit<Impl>::regStats()
207 {
208 lsqForwLoads
209 .name(name() + ".forwLoads")
210 .desc("Number of loads that had data forwarded from stores");
211
212 invAddrLoads
213 .name(name() + ".invAddrLoads")
214 .desc("Number of loads ignored due to an invalid address");
215
216 lsqSquashedLoads
217 .name(name() + ".squashedLoads")
218 .desc("Number of loads squashed");
219
220 lsqIgnoredResponses
221 .name(name() + ".ignoredResponses")
222 .desc("Number of memory responses ignored because the instruction is squashed");
223
224 lsqMemOrderViolation
225 .name(name() + ".memOrderViolation")
226 .desc("Number of memory ordering violations");
227
228 lsqSquashedStores
229 .name(name() + ".squashedStores")
230 .desc("Number of stores squashed");
231
232 invAddrSwpfs
233 .name(name() + ".invAddrSwpfs")
234 .desc("Number of software prefetches ignored due to an invalid address");
235
236 lsqBlockedLoads
237 .name(name() + ".blockedLoads")
238 .desc("Number of blocked loads due to partial load-store forwarding");
239
240 lsqRescheduledLoads
241 .name(name() + ".rescheduledLoads")
242 .desc("Number of loads that were rescheduled");
243
244 lsqCacheBlocked
245 .name(name() + ".cacheBlocked")
246 .desc("Number of times an access to memory failed due to the cache being blocked");
247 }
248
249 template<class Impl>
250 void
251 LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
252 {
253 dcachePort = dcache_port;
254 }
255
256 template<class Impl>
257 void
258 LSQUnit<Impl>::drainSanityCheck() const
259 {
260 for (int i = 0; i < loadQueue.capacity(); ++i)
261 assert(!loadQueue[i].valid());
262
263 assert(storesToWB == 0);
264 assert(!retryPkt);
265 }
266
267 template<class Impl>
268 void
269 LSQUnit<Impl>::takeOverFrom()
270 {
271 resetState();
272 }
273
274 template <class Impl>
275 void
276 LSQUnit<Impl>::insert(const DynInstPtr &inst)
277 {
278 assert(inst->isMemRef());
279
280 assert(inst->isLoad() || inst->isStore() || inst->isAtomic());
281
282 if (inst->isLoad()) {
283 insertLoad(inst);
284 } else {
285 insertStore(inst);
286 }
287
288 inst->setInLSQ();
289 }
290
291 template <class Impl>
292 void
293 LSQUnit<Impl>::insertLoad(const DynInstPtr &load_inst)
294 {
295 assert(!loadQueue.full());
296 assert(loads < loadQueue.capacity());
297
298 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
299 load_inst->pcState(), loadQueue.tail(), load_inst->seqNum);
300
301 /* Grow the queue. */
302 loadQueue.advance_tail();
303
304 load_inst->sqIt = storeQueue.end();
305
306 assert(!loadQueue.back().valid());
307 loadQueue.back().set(load_inst);
308 load_inst->lqIdx = loadQueue.tail();
309 load_inst->lqIt = loadQueue.getIterator(load_inst->lqIdx);
310
311 ++loads;
312 }
313
314 template <class Impl>
315 void
316 LSQUnit<Impl>::insertStore(const DynInstPtr& store_inst)
317 {
318 // Make sure it is not full before inserting an instruction.
319 assert(!storeQueue.full());
320 assert(stores < storeQueue.capacity());
321
322 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
323 store_inst->pcState(), storeQueue.tail(), store_inst->seqNum);
324 storeQueue.advance_tail();
325
326 store_inst->sqIdx = storeQueue.tail();
327 store_inst->lqIdx = loadQueue.moduloAdd(loadQueue.tail(), 1);
328 store_inst->lqIt = loadQueue.end();
329
330 storeQueue.back().set(store_inst);
331
332 ++stores;
333 }
334
335 template <class Impl>
336 typename Impl::DynInstPtr
337 LSQUnit<Impl>::getMemDepViolator()
338 {
339 DynInstPtr temp = memDepViolator;
340
341 memDepViolator = NULL;
342
343 return temp;
344 }
345
346 template <class Impl>
347 unsigned
348 LSQUnit<Impl>::numFreeLoadEntries()
349 {
350 //LQ has an extra dummy entry to differentiate
351 //empty/full conditions. Subtract 1 from the free entries.
352 DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n",
353 1 + loadQueue.capacity(), loads);
354 return loadQueue.capacity() - loads;
355 }
356
357 template <class Impl>
358 unsigned
359 LSQUnit<Impl>::numFreeStoreEntries()
360 {
361 //SQ has an extra dummy entry to differentiate
362 //empty/full conditions. Subtract 1 from the free entries.
363 DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n",
364 1 + storeQueue.capacity(), stores);
365 return storeQueue.capacity() - stores;
366
367 }
368
369 template <class Impl>
370 void
371 LSQUnit<Impl>::checkSnoop(PacketPtr pkt)
372 {
373 // Should only ever get invalidations in here
374 assert(pkt->isInvalidate());
375
376 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
377
378 for (int x = 0; x < cpu->numContexts(); x++) {
379 ThreadContext *tc = cpu->getContext(x);
380 bool no_squash = cpu->thread[x]->noSquashFromTC;
381 cpu->thread[x]->noSquashFromTC = true;
382 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
383 cpu->thread[x]->noSquashFromTC = no_squash;
384 }
385
386 if (loadQueue.empty())
387 return;
388
389 auto iter = loadQueue.begin();
390
391 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
392
393 DynInstPtr ld_inst = iter->instruction();
394 assert(ld_inst);
395 LSQRequest *req = iter->request();
396
397 // Check that this snoop didn't just invalidate our lock flag
398 if (ld_inst->effAddrValid() &&
399 req->isCacheBlockHit(invalidate_addr, cacheBlockMask)
400 && ld_inst->memReqFlags & Request::LLSC)
401 TheISA::handleLockedSnoopHit(ld_inst.get());
402
403 bool force_squash = false;
404
405 while (++iter != loadQueue.end()) {
406 ld_inst = iter->instruction();
407 assert(ld_inst);
408 req = iter->request();
409 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered())
410 continue;
411
412 DPRINTF(LSQUnit, "-- inst [sn:%lli] to pktAddr:%#x\n",
413 ld_inst->seqNum, invalidate_addr);
414
415 if (force_squash ||
416 req->isCacheBlockHit(invalidate_addr, cacheBlockMask)) {
417 if (needsTSO) {
418 // If we have a TSO system, as all loads must be ordered with
419 // all other loads, this load as well as *all* subsequent loads
420 // need to be squashed to prevent possible load reordering.
421 force_squash = true;
422 }
423 if (ld_inst->possibleLoadViolation() || force_squash) {
424 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
425 pkt->getAddr(), ld_inst->seqNum);
426
427 // Mark the load for re-execution
428 ld_inst->fault = std::make_shared<ReExec>();
429 } else {
430 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n",
431 pkt->getAddr(), ld_inst->seqNum);
432
433 // Make sure that we don't lose a snoop hitting a LOCKED
434 // address since the LOCK* flags don't get updated until
435 // commit.
436 if (ld_inst->memReqFlags & Request::LLSC)
437 TheISA::handleLockedSnoopHit(ld_inst.get());
438
439 // If a older load checks this and it's true
440 // then we might have missed the snoop
441 // in which case we need to invalidate to be sure
442 ld_inst->hitExternalSnoop(true);
443 }
444 }
445 }
446 return;
447 }
448
449 template <class Impl>
450 Fault
451 LSQUnit<Impl>::checkViolations(typename LoadQueue::iterator& loadIt,
452 const DynInstPtr& inst)
453 {
454 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
455 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
456
457 /** @todo in theory you only need to check an instruction that has executed
458 * however, there isn't a good way in the pipeline at the moment to check
459 * all instructions that will execute before the store writes back. Thus,
460 * like the implementation that came before it, we're overly conservative.
461 */
462 while (loadIt != loadQueue.end()) {
463 DynInstPtr ld_inst = loadIt->instruction();
464 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
465 ++loadIt;
466 continue;
467 }
468
469 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
470 Addr ld_eff_addr2 =
471 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
472
473 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
474 if (inst->isLoad()) {
475 // If this load is to the same block as an external snoop
476 // invalidate that we've observed then the load needs to be
477 // squashed as it could have newer data
478 if (ld_inst->hitExternalSnoop()) {
479 if (!memDepViolator ||
480 ld_inst->seqNum < memDepViolator->seqNum) {
481 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
482 "and [sn:%lli] at address %#x\n",
483 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
484 memDepViolator = ld_inst;
485
486 ++lsqMemOrderViolation;
487
488 return std::make_shared<GenericISA::M5PanicFault>(
489 "Detected fault with inst [sn:%lli] and "
490 "[sn:%lli] at address %#x\n",
491 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
492 }
493 }
494
495 // Otherwise, mark the load has a possible load violation
496 // and if we see a snoop before it's commited, we need to squash
497 ld_inst->possibleLoadViolation(true);
498 DPRINTF(LSQUnit, "Found possible load violation at addr: %#x"
499 " between instructions [sn:%lli] and [sn:%lli]\n",
500 inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
501 } else {
502 // A load/store incorrectly passed this store.
503 // Check if we already have a violator, or if it's newer
504 // squash and refetch.
505 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
506 break;
507
508 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
509 "[sn:%lli] at address %#x\n",
510 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
511 memDepViolator = ld_inst;
512
513 ++lsqMemOrderViolation;
514
515 return std::make_shared<GenericISA::M5PanicFault>(
516 "Detected fault with "
517 "inst [sn:%lli] and [sn:%lli] at address %#x\n",
518 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
519 }
520 }
521
522 ++loadIt;
523 }
524 return NoFault;
525 }
526
527
528
529
530 template <class Impl>
531 Fault
532 LSQUnit<Impl>::executeLoad(const DynInstPtr &inst)
533 {
534 using namespace TheISA;
535 // Execute a specific load.
536 Fault load_fault = NoFault;
537
538 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
539 inst->pcState(), inst->seqNum);
540
541 assert(!inst->isSquashed());
542
543 load_fault = inst->initiateAcc();
544
545 if (inst->isTranslationDelayed() && load_fault == NoFault)
546 return load_fault;
547
548 // If the instruction faulted or predicated false, then we need to send it
549 // along to commit without the instruction completing.
550 if (load_fault != NoFault || !inst->readPredicate()) {
551 // Send this instruction to commit, also make sure iew stage
552 // realizes there is activity. Mark it as executed unless it
553 // is a strictly ordered load that needs to hit the head of
554 // commit.
555 if (!inst->readPredicate())
556 inst->forwardOldRegs();
557 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
558 inst->seqNum,
559 (load_fault != NoFault ? "fault" : "predication"));
560 if (!(inst->hasRequest() && inst->strictlyOrdered()) ||
561 inst->isAtCommit()) {
562 inst->setExecuted();
563 }
564 iewStage->instToCommit(inst);
565 iewStage->activityThisCycle();
566 } else {
567 if (inst->effAddrValid()) {
568 auto it = inst->lqIt;
569 ++it;
570
571 if (checkLoads)
572 return checkViolations(it, inst);
573 }
574 }
575
576 return load_fault;
577 }
578
579 template <class Impl>
580 Fault
581 LSQUnit<Impl>::executeStore(const DynInstPtr &store_inst)
582 {
583 using namespace TheISA;
584 // Make sure that a store exists.
585 assert(stores != 0);
586
587 int store_idx = store_inst->sqIdx;
588
589 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
590 store_inst->pcState(), store_inst->seqNum);
591
592 assert(!store_inst->isSquashed());
593
594 // Check the recently completed loads to see if any match this store's
595 // address. If so, then we have a memory ordering violation.
596 typename LoadQueue::iterator loadIt = store_inst->lqIt;
597
598 Fault store_fault = store_inst->initiateAcc();
599
600 if (store_inst->isTranslationDelayed() &&
601 store_fault == NoFault)
602 return store_fault;
603
604 if (!store_inst->readPredicate()) {
605 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
606 store_inst->seqNum);
607 store_inst->forwardOldRegs();
608 return store_fault;
609 }
610
611 if (storeQueue[store_idx].size() == 0) {
612 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
613 store_inst->pcState(), store_inst->seqNum);
614
615 return store_fault;
616 }
617
618 assert(store_fault == NoFault);
619
620 if (store_inst->isStoreConditional() || store_inst->isAtomic()) {
621 // Store conditionals and Atomics need to set themselves as able to
622 // writeback if we haven't had a fault by here.
623 storeQueue[store_idx].canWB() = true;
624
625 ++storesToWB;
626 }
627
628 return checkViolations(loadIt, store_inst);
629
630 }
631
632 template <class Impl>
633 void
634 LSQUnit<Impl>::commitLoad()
635 {
636 assert(loadQueue.front().valid());
637
638 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
639 loadQueue.front().instruction()->pcState());
640
641 loadQueue.front().clear();
642 loadQueue.pop_front();
643
644 --loads;
645 }
646
647 template <class Impl>
648 void
649 LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
650 {
651 assert(loads == 0 || loadQueue.front().valid());
652
653 while (loads != 0 && loadQueue.front().instruction()->seqNum
654 <= youngest_inst) {
655 commitLoad();
656 }
657 }
658
659 template <class Impl>
660 void
661 LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
662 {
663 assert(stores == 0 || storeQueue.front().valid());
664
665 /* Forward iterate the store queue (age order). */
666 for (auto& x : storeQueue) {
667 assert(x.valid());
668 // Mark any stores that are now committed and have not yet
669 // been marked as able to write back.
670 if (!x.canWB()) {
671 if (x.instruction()->seqNum > youngest_inst) {
672 break;
673 }
674 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
675 "%s [sn:%lli]\n",
676 x.instruction()->pcState(),
677 x.instruction()->seqNum);
678
679 x.canWB() = true;
680
681 ++storesToWB;
682 }
683 }
684 }
685
686 template <class Impl>
687 void
688 LSQUnit<Impl>::writebackBlockedStore()
689 {
690 assert(isStoreBlocked);
691 storeWBIt->request()->sendPacketToCache();
692 if (storeWBIt->request()->isSent()){
693 storePostSend();
694 }
695 }
696
697 template <class Impl>
698 void
699 LSQUnit<Impl>::writebackStores()
700 {
701 if (isStoreBlocked) {
702 DPRINTF(LSQUnit, "Writing back blocked store\n");
703 writebackBlockedStore();
704 }
705
706 while (storesToWB > 0 &&
707 storeWBIt.dereferenceable() &&
708 storeWBIt->valid() &&
709 storeWBIt->canWB() &&
710 ((!needsTSO) || (!storeInFlight)) &&
711 lsq->cachePortAvailable(false)) {
712
713 if (isStoreBlocked) {
714 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
715 " is blocked!\n");
716 break;
717 }
718
719 // Store didn't write any data so no need to write it back to
720 // memory.
721 if (storeWBIt->size() == 0) {
722 /* It is important that the preincrement happens at (or before)
723 * the call, as the the code of completeStore checks
724 * storeWBIt. */
725 completeStore(storeWBIt++);
726 continue;
727 }
728
729 if (storeWBIt->instruction()->isDataPrefetch()) {
730 storeWBIt++;
731 continue;
732 }
733
734 assert(storeWBIt->hasRequest());
735 assert(!storeWBIt->committed());
736
737 DynInstPtr inst = storeWBIt->instruction();
738 LSQRequest* req = storeWBIt->request();
739 storeWBIt->committed() = true;
740
741 assert(!inst->memData);
742 inst->memData = new uint8_t[req->_size];
743
744 if (storeWBIt->isAllZeros())
745 memset(inst->memData, 0, req->_size);
746 else
747 memcpy(inst->memData, storeWBIt->data(), req->_size);
748
749
750 if (req->senderState() == nullptr) {
751 SQSenderState *state = new SQSenderState(storeWBIt);
752 state->isLoad = false;
753 state->needWB = false;
754 state->inst = inst;
755
756 req->senderState(state);
757 if (inst->isStoreConditional() || inst->isAtomic()) {
758 /* Only store conditionals and atomics need a writeback. */
759 state->needWB = true;
760 }
761 }
762 req->buildPackets();
763
764 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
765 "to Addr:%#x, data:%#x [sn:%lli]\n",
766 storeWBIt.idx(), inst->pcState(),
767 req->request()->getPaddr(), (int)*(inst->memData),
768 inst->seqNum);
769
770 // @todo: Remove this SC hack once the memory system handles it.
771 if (inst->isStoreConditional()) {
772 // Disable recording the result temporarily. Writing to
773 // misc regs normally updates the result, but this is not
774 // the desired behavior when handling store conditionals.
775 inst->recordResult(false);
776 bool success = TheISA::handleLockedWrite(inst.get(),
777 req->request(), cacheBlockMask);
778 inst->recordResult(true);
779 req->packetSent();
780
781 if (!success) {
782 req->complete();
783 // Instantly complete this store.
784 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
785 "Instantly completing it.\n",
786 inst->seqNum);
787 PacketPtr new_pkt = new Packet(*req->packet());
788 WritebackEvent *wb = new WritebackEvent(inst,
789 new_pkt, this);
790 cpu->schedule(wb, curTick() + 1);
791 completeStore(storeWBIt);
792 if (!storeQueue.empty())
793 storeWBIt++;
794 else
795 storeWBIt = storeQueue.end();
796 continue;
797 }
798 }
799
800 if (req->request()->isMmappedIpr()) {
801 assert(!inst->isStoreConditional());
802 ThreadContext *thread = cpu->tcBase(lsqID);
803 PacketPtr main_pkt = new Packet(req->mainRequest(),
804 MemCmd::WriteReq);
805 main_pkt->dataStatic(inst->memData);
806 req->handleIprWrite(thread, main_pkt);
807 delete main_pkt;
808 completeStore(storeWBIt);
809 storeWBIt++;
810 continue;
811 }
812 /* Send to cache */
813 req->sendPacketToCache();
814
815 /* If successful, do the post send */
816 if (req->isSent()) {
817 storePostSend();
818 } else {
819 DPRINTF(LSQUnit, "D-Cache became blocked when writing [sn:%lli], "
820 "will retry later\n",
821 inst->seqNum);
822 }
823 }
824 assert(stores >= 0 && storesToWB >= 0);
825 }
826
827 template <class Impl>
828 void
829 LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
830 {
831 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
832 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
833
834 while (loads != 0 &&
835 loadQueue.back().instruction()->seqNum > squashed_num) {
836 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
837 "[sn:%lli]\n",
838 loadQueue.back().instruction()->pcState(),
839 loadQueue.back().instruction()->seqNum);
840
841 if (isStalled() && loadQueue.tail() == stallingLoadIdx) {
842 stalled = false;
843 stallingStoreIsn = 0;
844 stallingLoadIdx = 0;
845 }
846
847 // Clear the smart pointer to make sure it is decremented.
848 loadQueue.back().instruction()->setSquashed();
849 loadQueue.back().clear();
850
851 --loads;
852
853 loadQueue.pop_back();
854 ++lsqSquashedLoads;
855 }
856
857 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
858 memDepViolator = NULL;
859 }
860
861 while (stores != 0 &&
862 storeQueue.back().instruction()->seqNum > squashed_num) {
863 // Instructions marked as can WB are already committed.
864 if (storeQueue.back().canWB()) {
865 break;
866 }
867
868 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
869 "idx:%i [sn:%lli]\n",
870 storeQueue.back().instruction()->pcState(),
871 storeQueue.tail(), storeQueue.back().instruction()->seqNum);
872
873 // I don't think this can happen. It should have been cleared
874 // by the stalling load.
875 if (isStalled() &&
876 storeQueue.back().instruction()->seqNum == stallingStoreIsn) {
877 panic("Is stalled should have been cleared by stalling load!\n");
878 stalled = false;
879 stallingStoreIsn = 0;
880 }
881
882 // Clear the smart pointer to make sure it is decremented.
883 storeQueue.back().instruction()->setSquashed();
884
885 // Must delete request now that it wasn't handed off to
886 // memory. This is quite ugly. @todo: Figure out the proper
887 // place to really handle request deletes.
888 storeQueue.back().clear();
889 --stores;
890
891 storeQueue.pop_back();
892 ++lsqSquashedStores;
893 }
894 }
895
896 template <class Impl>
897 void
898 LSQUnit<Impl>::storePostSend()
899 {
900 if (isStalled() &&
901 storeWBIt->instruction()->seqNum == stallingStoreIsn) {
902 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
903 "load idx:%i\n",
904 stallingStoreIsn, stallingLoadIdx);
905 stalled = false;
906 stallingStoreIsn = 0;
907 iewStage->replayMemInst(loadQueue[stallingLoadIdx].instruction());
908 }
909
910 if (!storeWBIt->instruction()->isStoreConditional()) {
911 // The store is basically completed at this time. This
912 // only works so long as the checker doesn't try to
913 // verify the value in memory for stores.
914 storeWBIt->instruction()->setCompleted();
915
916 if (cpu->checker) {
917 cpu->checker->verify(storeWBIt->instruction());
918 }
919 }
920
921 if (needsTSO) {
922 storeInFlight = true;
923 }
924
925 storeWBIt++;
926 }
927
928 template <class Impl>
929 void
930 LSQUnit<Impl>::writeback(const DynInstPtr &inst, PacketPtr pkt)
931 {
932 iewStage->wakeCPU();
933
934 // Squashed instructions do not need to complete their access.
935 if (inst->isSquashed()) {
936 assert(!inst->isStore());
937 ++lsqIgnoredResponses;
938 return;
939 }
940
941 if (!inst->isExecuted()) {
942 inst->setExecuted();
943
944 if (inst->fault == NoFault) {
945 // Complete access to copy data to proper place.
946 inst->completeAcc(pkt);
947 } else {
948 // If the instruction has an outstanding fault, we cannot complete
949 // the access as this discards the current fault.
950
951 // If we have an outstanding fault, the fault should only be of
952 // type ReExec.
953 assert(dynamic_cast<ReExec*>(inst->fault.get()) != nullptr);
954
955 DPRINTF(LSQUnit, "Not completing instruction [sn:%lli] access "
956 "due to pending fault.\n", inst->seqNum);
957 }
958 }
959
960 // Need to insert instruction into queue to commit
961 iewStage->instToCommit(inst);
962
963 iewStage->activityThisCycle();
964
965 // see if this load changed the PC
966 iewStage->checkMisprediction(inst);
967 }
968
969 template <class Impl>
970 void
971 LSQUnit<Impl>::completeStore(typename StoreQueue::iterator store_idx)
972 {
973 assert(store_idx->valid());
974 store_idx->completed() = true;
975 --storesToWB;
976 // A bit conservative because a store completion may not free up entries,
977 // but hopefully avoids two store completions in one cycle from making
978 // the CPU tick twice.
979 cpu->wakeCPU();
980 cpu->activityThisCycle();
981
982 /* We 'need' a copy here because we may clear the entry from the
983 * store queue. */
984 DynInstPtr store_inst = store_idx->instruction();
985 if (store_idx == storeQueue.begin()) {
986 do {
987 storeQueue.front().clear();
988 storeQueue.pop_front();
989 --stores;
990 } while (storeQueue.front().completed() &&
991 !storeQueue.empty());
992
993 iewStage->updateLSQNextCycle = true;
994 }
995
996 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
997 "idx:%i\n",
998 store_inst->seqNum, store_idx.idx() - 1, storeQueue.head() - 1);
999
1000 #if TRACING_ON
1001 if (DTRACE(O3PipeView)) {
1002 store_idx->instruction()->storeTick =
1003 curTick() - store_idx->instruction()->fetchTick;
1004 }
1005 #endif
1006
1007 if (isStalled() &&
1008 store_inst->seqNum == stallingStoreIsn) {
1009 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1010 "load idx:%i\n",
1011 stallingStoreIsn, stallingLoadIdx);
1012 stalled = false;
1013 stallingStoreIsn = 0;
1014 iewStage->replayMemInst(loadQueue[stallingLoadIdx].instruction());
1015 }
1016
1017 store_inst->setCompleted();
1018
1019 if (needsTSO) {
1020 storeInFlight = false;
1021 }
1022
1023 // Tell the checker we've completed this instruction. Some stores
1024 // may get reported twice to the checker, but the checker can
1025 // handle that case.
1026 // Store conditionals cannot be sent to the checker yet, they have
1027 // to update the misc registers first which should take place
1028 // when they commit
1029 if (cpu->checker && !store_inst->isStoreConditional()) {
1030 cpu->checker->verify(store_inst);
1031 }
1032 }
1033
1034 template <class Impl>
1035 bool
1036 LSQUnit<Impl>::trySendPacket(bool isLoad, PacketPtr data_pkt)
1037 {
1038 bool ret = true;
1039 bool cache_got_blocked = false;
1040
1041 auto state = dynamic_cast<LSQSenderState*>(data_pkt->senderState);
1042
1043 if (!lsq->cacheBlocked() &&
1044 lsq->cachePortAvailable(isLoad)) {
1045 if (!dcachePort->sendTimingReq(data_pkt)) {
1046 ret = false;
1047 cache_got_blocked = true;
1048 }
1049 } else {
1050 ret = false;
1051 }
1052
1053 if (ret) {
1054 if (!isLoad) {
1055 isStoreBlocked = false;
1056 }
1057 lsq->cachePortBusy(isLoad);
1058 state->outstanding++;
1059 state->request()->packetSent();
1060 } else {
1061 if (cache_got_blocked) {
1062 lsq->cacheBlocked(true);
1063 ++lsqCacheBlocked;
1064 }
1065 if (!isLoad) {
1066 assert(state->request() == storeWBIt->request());
1067 isStoreBlocked = true;
1068 }
1069 state->request()->packetNotSent();
1070 }
1071 return ret;
1072 }
1073
1074 template <class Impl>
1075 void
1076 LSQUnit<Impl>::recvRetry()
1077 {
1078 if (isStoreBlocked) {
1079 DPRINTF(LSQUnit, "Receiving retry: blocked store\n");
1080 writebackBlockedStore();
1081 }
1082 }
1083
1084 template <class Impl>
1085 void
1086 LSQUnit<Impl>::dumpInsts() const
1087 {
1088 cprintf("Load store queue: Dumping instructions.\n");
1089 cprintf("Load queue size: %i\n", loads);
1090 cprintf("Load queue: ");
1091
1092 for (const auto& e: loadQueue) {
1093 const DynInstPtr &inst(e.instruction());
1094 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1095 }
1096 cprintf("\n");
1097
1098 cprintf("Store queue size: %i\n", stores);
1099 cprintf("Store queue: ");
1100
1101 for (const auto& e: storeQueue) {
1102 const DynInstPtr &inst(e.instruction());
1103 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1104 }
1105
1106 cprintf("\n");
1107 }
1108
1109 template <class Impl>
1110 unsigned int
1111 LSQUnit<Impl>::cacheLineSize()
1112 {
1113 return cpu->cacheLineSize();
1114 }
1115
1116 #endif//__CPU_O3_LSQ_UNIT_IMPL_HH__