2 * Copyright (c) 2010-2020 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "mem/mem_ctrl.hh"
43 #include "base/trace.hh"
44 #include "debug/DRAM.hh"
45 #include "debug/Drain.hh"
46 #include "debug/MemCtrl.hh"
47 #include "debug/NVM.hh"
48 #include "debug/QOS.hh"
49 #include "mem/mem_interface.hh"
50 #include "sim/system.hh"
52 MemCtrl::MemCtrl(const MemCtrlParams
&p
) :
54 port(name() + ".port", *this), isTimingMode(false),
55 retryRdReq(false), retryWrReq(false),
56 nextReqEvent([this]{ processNextReqEvent(); }, name()),
57 respondEvent([this]{ processRespondEvent(); }, name()),
58 dram(p
.dram
), nvm(p
.nvm
),
59 readBufferSize((dram
? dram
->readBufferSize
: 0) +
60 (nvm
? nvm
->readBufferSize
: 0)),
61 writeBufferSize((dram
? dram
->writeBufferSize
: 0) +
62 (nvm
? nvm
->writeBufferSize
: 0)),
63 writeHighThreshold(writeBufferSize
* p
.write_high_thresh_perc
/ 100.0),
64 writeLowThreshold(writeBufferSize
* p
.write_low_thresh_perc
/ 100.0),
65 minWritesPerSwitch(p
.min_writes_per_switch
),
66 writesThisTime(0), readsThisTime(0),
67 memSchedPolicy(p
.mem_sched_policy
),
68 frontendLatency(p
.static_frontend_latency
),
69 backendLatency(p
.static_backend_latency
),
70 commandWindow(p
.command_window
),
71 nextBurstAt(0), prevArrival(0),
75 DPRINTF(MemCtrl
, "Setting up controller\n");
76 readQueue
.resize(p
.qos_priorities
);
77 writeQueue
.resize(p
.qos_priorities
);
79 // Hook up interfaces to the controller
81 dram
->setCtrl(this, commandWindow
);
83 nvm
->setCtrl(this, commandWindow
);
85 fatal_if(!dram
&& !nvm
, "Memory controller must have an interface");
87 // perform a basic check of the write thresholds
88 if (p
.write_low_thresh_perc
>= p
.write_high_thresh_perc
)
89 fatal("Write buffer low threshold %d must be smaller than the "
90 "high threshold %d\n", p
.write_low_thresh_perc
,
91 p
.write_high_thresh_perc
);
97 if (!port
.isConnected()) {
98 fatal("MemCtrl %s is unconnected!\n", name());
100 port
.sendRangeChange();
107 // remember the memory system mode of operation
108 isTimingMode
= system()->isTimingMode();
111 // shift the bus busy time sufficiently far ahead that we never
112 // have to worry about negative values when computing the time for
113 // the next request, this will add an insignificant bubble at the
114 // start of simulation
115 nextBurstAt
= curTick() + (dram
? dram
->commandOffset() :
116 nvm
->commandOffset());
121 MemCtrl::recvAtomic(PacketPtr pkt
)
123 DPRINTF(MemCtrl
, "recvAtomic: %s 0x%x\n",
124 pkt
->cmdString(), pkt
->getAddr());
126 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
130 // do the actual memory access and turn the packet into a response
131 if (dram
&& dram
->getAddrRange().contains(pkt
->getAddr())) {
134 if (pkt
->hasData()) {
135 // this value is not supposed to be accurate, just enough to
136 // keep things going, mimic a closed page
137 latency
= dram
->accessLatency();
139 } else if (nvm
&& nvm
->getAddrRange().contains(pkt
->getAddr())) {
142 if (pkt
->hasData()) {
143 // this value is not supposed to be accurate, just enough to
144 // keep things going, mimic a closed page
145 latency
= nvm
->accessLatency();
148 panic("Can't handle address range for packet %s\n",
156 MemCtrl::recvAtomicBackdoor(PacketPtr pkt
, MemBackdoorPtr
&backdoor
)
158 Tick latency
= recvAtomic(pkt
);
160 dram
->getBackdoor(backdoor
);
162 nvm
->getBackdoor(backdoor
);
168 MemCtrl::readQueueFull(unsigned int neededEntries
) const
171 "Read queue limit %d, current size %d, entries needed %d\n",
172 readBufferSize
, totalReadQueueSize
+ respQueue
.size(),
175 auto rdsize_new
= totalReadQueueSize
+ respQueue
.size() + neededEntries
;
176 return rdsize_new
> readBufferSize
;
180 MemCtrl::writeQueueFull(unsigned int neededEntries
) const
183 "Write queue limit %d, current size %d, entries needed %d\n",
184 writeBufferSize
, totalWriteQueueSize
, neededEntries
);
186 auto wrsize_new
= (totalWriteQueueSize
+ neededEntries
);
187 return wrsize_new
> writeBufferSize
;
191 MemCtrl::addToReadQueue(PacketPtr pkt
, unsigned int pkt_count
, bool is_dram
)
193 // only add to the read queue here. whenever the request is
194 // eventually done, set the readyTime, and call schedule()
195 assert(!pkt
->isWrite());
197 assert(pkt_count
!= 0);
199 // if the request size is larger than burst size, the pkt is split into
201 // Note if the pkt starting address is not aligened to burst size, the
202 // address of first packet is kept unaliged. Subsequent packets
203 // are aligned to burst size boundaries. This is to ensure we accurately
204 // check read packets against packets in write queue.
205 const Addr base_addr
= pkt
->getAddr();
206 Addr addr
= base_addr
;
207 unsigned pktsServicedByWrQ
= 0;
208 BurstHelper
* burst_helper
= NULL
;
210 uint32_t burst_size
= is_dram
? dram
->bytesPerBurst() :
211 nvm
->bytesPerBurst();
212 for (int cnt
= 0; cnt
< pkt_count
; ++cnt
) {
213 unsigned size
= std::min((addr
| (burst_size
- 1)) + 1,
214 base_addr
+ pkt
->getSize()) - addr
;
215 stats
.readPktSize
[ceilLog2(size
)]++;
217 stats
.requestorReadAccesses
[pkt
->requestorId()]++;
219 // First check write buffer to see if the data is already at
221 bool foundInWrQ
= false;
222 Addr burst_addr
= burstAlign(addr
, is_dram
);
223 // if the burst address is not present then there is no need
224 // looking any further
225 if (isInWriteQueue
.find(burst_addr
) != isInWriteQueue
.end()) {
226 for (const auto& vec
: writeQueue
) {
227 for (const auto& p
: vec
) {
228 // check if the read is subsumed in the write queue
229 // packet we are looking at
230 if (p
->addr
<= addr
&&
231 ((addr
+ size
) <= (p
->addr
+ p
->size
))) {
234 stats
.servicedByWrQ
++;
237 "Read to addr %lld with size %d serviced by "
240 stats
.bytesReadWrQ
+= burst_size
;
247 // If not found in the write q, make a memory packet and
248 // push it onto the read queue
251 // Make the burst helper for split packets
252 if (pkt_count
> 1 && burst_helper
== NULL
) {
253 DPRINTF(MemCtrl
, "Read to addr %lld translates to %d "
254 "memory requests\n", pkt
->getAddr(), pkt_count
);
255 burst_helper
= new BurstHelper(pkt_count
);
260 mem_pkt
= dram
->decodePacket(pkt
, addr
, size
, true, true);
261 // increment read entries of the rank
262 dram
->setupRank(mem_pkt
->rank
, true);
264 mem_pkt
= nvm
->decodePacket(pkt
, addr
, size
, true, false);
265 // Increment count to trigger issue of non-deterministic read
266 nvm
->setupRank(mem_pkt
->rank
, true);
267 // Default readyTime to Max; will be reset once read is issued
268 mem_pkt
->readyTime
= MaxTick
;
270 mem_pkt
->burstHelper
= burst_helper
;
272 assert(!readQueueFull(1));
273 stats
.rdQLenPdf
[totalReadQueueSize
+ respQueue
.size()]++;
275 DPRINTF(MemCtrl
, "Adding to read queue\n");
277 readQueue
[mem_pkt
->qosValue()].push_back(mem_pkt
);
280 logRequest(MemCtrl::READ
, pkt
->requestorId(), pkt
->qosValue(),
284 stats
.avgRdQLen
= totalReadQueueSize
+ respQueue
.size();
287 // Starting address of next memory pkt (aligned to burst boundary)
288 addr
= (addr
| (burst_size
- 1)) + 1;
291 // If all packets are serviced by write queue, we send the repsonse back
292 if (pktsServicedByWrQ
== pkt_count
) {
293 accessAndRespond(pkt
, frontendLatency
);
297 // Update how many split packets are serviced by write queue
298 if (burst_helper
!= NULL
)
299 burst_helper
->burstsServiced
= pktsServicedByWrQ
;
301 // If we are not already scheduled to get a request out of the
303 if (!nextReqEvent
.scheduled()) {
304 DPRINTF(MemCtrl
, "Request scheduled immediately\n");
305 schedule(nextReqEvent
, curTick());
310 MemCtrl::addToWriteQueue(PacketPtr pkt
, unsigned int pkt_count
, bool is_dram
)
312 // only add to the write queue here. whenever the request is
313 // eventually done, set the readyTime, and call schedule()
314 assert(pkt
->isWrite());
316 // if the request size is larger than burst size, the pkt is split into
318 const Addr base_addr
= pkt
->getAddr();
319 Addr addr
= base_addr
;
320 uint32_t burst_size
= is_dram
? dram
->bytesPerBurst() :
321 nvm
->bytesPerBurst();
322 for (int cnt
= 0; cnt
< pkt_count
; ++cnt
) {
323 unsigned size
= std::min((addr
| (burst_size
- 1)) + 1,
324 base_addr
+ pkt
->getSize()) - addr
;
325 stats
.writePktSize
[ceilLog2(size
)]++;
327 stats
.requestorWriteAccesses
[pkt
->requestorId()]++;
329 // see if we can merge with an existing item in the write
330 // queue and keep track of whether we have merged or not
331 bool merged
= isInWriteQueue
.find(burstAlign(addr
, is_dram
)) !=
332 isInWriteQueue
.end();
334 // if the item was not merged we need to create a new write
339 mem_pkt
= dram
->decodePacket(pkt
, addr
, size
, false, true);
340 dram
->setupRank(mem_pkt
->rank
, false);
342 mem_pkt
= nvm
->decodePacket(pkt
, addr
, size
, false, false);
343 nvm
->setupRank(mem_pkt
->rank
, false);
345 assert(totalWriteQueueSize
< writeBufferSize
);
346 stats
.wrQLenPdf
[totalWriteQueueSize
]++;
348 DPRINTF(MemCtrl
, "Adding to write queue\n");
350 writeQueue
[mem_pkt
->qosValue()].push_back(mem_pkt
);
351 isInWriteQueue
.insert(burstAlign(addr
, is_dram
));
354 logRequest(MemCtrl::WRITE
, pkt
->requestorId(), pkt
->qosValue(),
357 assert(totalWriteQueueSize
== isInWriteQueue
.size());
360 stats
.avgWrQLen
= totalWriteQueueSize
;
364 "Merging write burst with existing queue entry\n");
366 // keep track of the fact that this burst effectively
367 // disappeared as it was merged with an existing one
368 stats
.mergedWrBursts
++;
371 // Starting address of next memory pkt (aligned to burst_size boundary)
372 addr
= (addr
| (burst_size
- 1)) + 1;
375 // we do not wait for the writes to be send to the actual memory,
376 // but instead take responsibility for the consistency here and
377 // snoop the write queue for any upcoming reads
378 // @todo, if a pkt size is larger than burst size, we might need a
379 // different front end latency
380 accessAndRespond(pkt
, frontendLatency
);
382 // If we are not already scheduled to get a request out of the
384 if (!nextReqEvent
.scheduled()) {
385 DPRINTF(MemCtrl
, "Request scheduled immediately\n");
386 schedule(nextReqEvent
, curTick());
391 MemCtrl::printQs() const
394 DPRINTF(MemCtrl
, "===READ QUEUE===\n\n");
395 for (const auto& queue
: readQueue
) {
396 for (const auto& packet
: queue
) {
397 DPRINTF(MemCtrl
, "Read %lu\n", packet
->addr
);
401 DPRINTF(MemCtrl
, "\n===RESP QUEUE===\n\n");
402 for (const auto& packet
: respQueue
) {
403 DPRINTF(MemCtrl
, "Response %lu\n", packet
->addr
);
406 DPRINTF(MemCtrl
, "\n===WRITE QUEUE===\n\n");
407 for (const auto& queue
: writeQueue
) {
408 for (const auto& packet
: queue
) {
409 DPRINTF(MemCtrl
, "Write %lu\n", packet
->addr
);
416 MemCtrl::recvTimingReq(PacketPtr pkt
)
418 // This is where we enter from the outside world
419 DPRINTF(MemCtrl
, "recvTimingReq: request %s addr %lld size %d\n",
420 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
422 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
425 panic_if(!(pkt
->isRead() || pkt
->isWrite()),
426 "Should only see read and writes at memory controller\n");
428 // Calc avg gap between requests
429 if (prevArrival
!= 0) {
430 stats
.totGap
+= curTick() - prevArrival
;
432 prevArrival
= curTick();
434 // What type of media does this packet access?
436 if (dram
&& dram
->getAddrRange().contains(pkt
->getAddr())) {
438 } else if (nvm
&& nvm
->getAddrRange().contains(pkt
->getAddr())) {
441 panic("Can't handle address range for packet %s\n",
446 // Find out how many memory packets a pkt translates to
447 // If the burst size is equal or larger than the pkt size, then a pkt
448 // translates to only one memory packet. Otherwise, a pkt translates to
449 // multiple memory packets
450 unsigned size
= pkt
->getSize();
451 uint32_t burst_size
= is_dram
? dram
->bytesPerBurst() :
452 nvm
->bytesPerBurst();
453 unsigned offset
= pkt
->getAddr() & (burst_size
- 1);
454 unsigned int pkt_count
= divCeil(offset
+ size
, burst_size
);
456 // run the QoS scheduler and assign a QoS priority value to the packet
457 qosSchedule( { &readQueue
, &writeQueue
}, burst_size
, pkt
);
459 // check local buffers and do not accept if full
460 if (pkt
->isWrite()) {
462 if (writeQueueFull(pkt_count
)) {
463 DPRINTF(MemCtrl
, "Write queue full, not accepting\n");
464 // remember that we have to retry this port
469 addToWriteQueue(pkt
, pkt_count
, is_dram
);
471 stats
.bytesWrittenSys
+= size
;
474 assert(pkt
->isRead());
476 if (readQueueFull(pkt_count
)) {
477 DPRINTF(MemCtrl
, "Read queue full, not accepting\n");
478 // remember that we have to retry this port
483 addToReadQueue(pkt
, pkt_count
, is_dram
);
485 stats
.bytesReadSys
+= size
;
493 MemCtrl::processRespondEvent()
496 "processRespondEvent(): Some req has reached its readyTime\n");
498 MemPacket
* mem_pkt
= respQueue
.front();
500 if (mem_pkt
->isDram()) {
501 // media specific checks and functions when read response is complete
502 dram
->respondEvent(mem_pkt
->rank
);
505 if (mem_pkt
->burstHelper
) {
506 // it is a split packet
507 mem_pkt
->burstHelper
->burstsServiced
++;
508 if (mem_pkt
->burstHelper
->burstsServiced
==
509 mem_pkt
->burstHelper
->burstCount
) {
510 // we have now serviced all children packets of a system packet
511 // so we can now respond to the requestor
512 // @todo we probably want to have a different front end and back
513 // end latency for split packets
514 accessAndRespond(mem_pkt
->pkt
, frontendLatency
+ backendLatency
);
515 delete mem_pkt
->burstHelper
;
516 mem_pkt
->burstHelper
= NULL
;
519 // it is not a split packet
520 accessAndRespond(mem_pkt
->pkt
, frontendLatency
+ backendLatency
);
523 delete respQueue
.front();
524 respQueue
.pop_front();
526 if (!respQueue
.empty()) {
527 assert(respQueue
.front()->readyTime
>= curTick());
528 assert(!respondEvent
.scheduled());
529 schedule(respondEvent
, respQueue
.front()->readyTime
);
531 // if there is nothing left in any queue, signal a drain
532 if (drainState() == DrainState::Draining
&&
533 !totalWriteQueueSize
&& !totalReadQueueSize
&&
536 DPRINTF(Drain
, "Controller done draining\n");
538 } else if (mem_pkt
->isDram()) {
539 // check the refresh state and kick the refresh event loop
540 // into action again if banks already closed and just waiting
541 // for read to complete
542 dram
->checkRefreshState(mem_pkt
->rank
);
546 // We have made a location in the queue available at this point,
547 // so if there is a read that was forced to wait, retry now
554 MemPacketQueue::iterator
555 MemCtrl::chooseNext(MemPacketQueue
& queue
, Tick extra_col_delay
)
557 // This method does the arbitration between requests.
559 MemPacketQueue::iterator ret
= queue
.end();
561 if (!queue
.empty()) {
562 if (queue
.size() == 1) {
563 // available rank corresponds to state refresh idle
564 MemPacket
* mem_pkt
= *(queue
.begin());
565 if (packetReady(mem_pkt
)) {
567 DPRINTF(MemCtrl
, "Single request, going to a free rank\n");
569 DPRINTF(MemCtrl
, "Single request, going to a busy rank\n");
571 } else if (memSchedPolicy
== Enums::fcfs
) {
572 // check if there is a packet going to a free rank
573 for (auto i
= queue
.begin(); i
!= queue
.end(); ++i
) {
574 MemPacket
* mem_pkt
= *i
;
575 if (packetReady(mem_pkt
)) {
580 } else if (memSchedPolicy
== Enums::frfcfs
) {
581 ret
= chooseNextFRFCFS(queue
, extra_col_delay
);
583 panic("No scheduling policy chosen\n");
589 MemPacketQueue::iterator
590 MemCtrl::chooseNextFRFCFS(MemPacketQueue
& queue
, Tick extra_col_delay
)
592 auto selected_pkt_it
= queue
.end();
593 Tick col_allowed_at
= MaxTick
;
595 // time we need to issue a column command to be seamless
596 const Tick min_col_at
= std::max(nextBurstAt
+ extra_col_delay
, curTick());
598 // find optimal packet for each interface
600 // create 2nd set of parameters for NVM
601 auto nvm_pkt_it
= queue
.end();
602 Tick nvm_col_at
= MaxTick
;
604 // Select packet by default to give priority if both
605 // can issue at the same time or seamlessly
606 std::tie(selected_pkt_it
, col_allowed_at
) =
607 dram
->chooseNextFRFCFS(queue
, min_col_at
);
608 std::tie(nvm_pkt_it
, nvm_col_at
) =
609 nvm
->chooseNextFRFCFS(queue
, min_col_at
);
611 // Compare DRAM and NVM and select NVM if it can issue
612 // earlier than the DRAM packet
613 if (col_allowed_at
> nvm_col_at
) {
614 selected_pkt_it
= nvm_pkt_it
;
617 std::tie(selected_pkt_it
, col_allowed_at
) =
618 dram
->chooseNextFRFCFS(queue
, min_col_at
);
620 std::tie(selected_pkt_it
, col_allowed_at
) =
621 nvm
->chooseNextFRFCFS(queue
, min_col_at
);
624 if (selected_pkt_it
== queue
.end()) {
625 DPRINTF(MemCtrl
, "%s no available packets found\n", __func__
);
628 return selected_pkt_it
;
632 MemCtrl::accessAndRespond(PacketPtr pkt
, Tick static_latency
)
634 DPRINTF(MemCtrl
, "Responding to Address %lld.. \n",pkt
->getAddr());
636 bool needsResponse
= pkt
->needsResponse();
637 // do the actual memory access which also turns the packet into a
639 if (dram
&& dram
->getAddrRange().contains(pkt
->getAddr())) {
641 } else if (nvm
&& nvm
->getAddrRange().contains(pkt
->getAddr())) {
644 panic("Can't handle address range for packet %s\n",
648 // turn packet around to go back to requestor if response expected
650 // access already turned the packet into a response
651 assert(pkt
->isResponse());
652 // response_time consumes the static latency and is charged also
653 // with headerDelay that takes into account the delay provided by
654 // the xbar and also the payloadDelay that takes into account the
655 // number of data beats.
656 Tick response_time
= curTick() + static_latency
+ pkt
->headerDelay
+
658 // Here we reset the timing of the packet before sending it out.
659 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
661 // queue the packet in the response queue to be sent out after
662 // the static latency has passed
663 port
.schedTimingResp(pkt
, response_time
);
665 // @todo the packet is going to be deleted, and the MemPacket
666 // is still having a pointer to it
667 pendingDelete
.reset(pkt
);
670 DPRINTF(MemCtrl
, "Done\n");
676 MemCtrl::pruneBurstTick()
678 auto it
= burstTicks
.begin();
679 while (it
!= burstTicks
.end()) {
680 auto current_it
= it
++;
681 if (curTick() > *current_it
) {
682 DPRINTF(MemCtrl
, "Removing burstTick for %d\n", *current_it
);
683 burstTicks
.erase(current_it
);
689 MemCtrl::getBurstWindow(Tick cmd_tick
)
691 // get tick aligned to burst window
692 Tick burst_offset
= cmd_tick
% commandWindow
;
693 return (cmd_tick
- burst_offset
);
697 MemCtrl::verifySingleCmd(Tick cmd_tick
, Tick max_cmds_per_burst
)
699 // start with assumption that there is no contention on command bus
700 Tick cmd_at
= cmd_tick
;
702 // get tick aligned to burst window
703 Tick burst_tick
= getBurstWindow(cmd_tick
);
705 // verify that we have command bandwidth to issue the command
706 // if not, iterate over next window(s) until slot found
707 while (burstTicks
.count(burst_tick
) >= max_cmds_per_burst
) {
708 DPRINTF(MemCtrl
, "Contention found on command bus at %d\n",
710 burst_tick
+= commandWindow
;
714 // add command into burst window and return corresponding Tick
715 burstTicks
.insert(burst_tick
);
720 MemCtrl::verifyMultiCmd(Tick cmd_tick
, Tick max_cmds_per_burst
,
721 Tick max_multi_cmd_split
)
723 // start with assumption that there is no contention on command bus
724 Tick cmd_at
= cmd_tick
;
726 // get tick aligned to burst window
727 Tick burst_tick
= getBurstWindow(cmd_tick
);
729 // Command timing requirements are from 2nd command
730 // Start with assumption that 2nd command will issue at cmd_at and
731 // find prior slot for 1st command to issue
732 // Given a maximum latency of max_multi_cmd_split between the commands,
733 // find the burst at the maximum latency prior to cmd_at
734 Tick burst_offset
= 0;
735 Tick first_cmd_offset
= cmd_tick
% commandWindow
;
736 while (max_multi_cmd_split
> (first_cmd_offset
+ burst_offset
)) {
737 burst_offset
+= commandWindow
;
739 // get the earliest burst aligned address for first command
740 // ensure that the time does not go negative
741 Tick first_cmd_tick
= burst_tick
- std::min(burst_offset
, burst_tick
);
743 // Can required commands issue?
744 bool first_can_issue
= false;
745 bool second_can_issue
= false;
746 // verify that we have command bandwidth to issue the command(s)
747 while (!first_can_issue
|| !second_can_issue
) {
748 bool same_burst
= (burst_tick
== first_cmd_tick
);
749 auto first_cmd_count
= burstTicks
.count(first_cmd_tick
);
750 auto second_cmd_count
= same_burst
? first_cmd_count
+ 1 :
751 burstTicks
.count(burst_tick
);
753 first_can_issue
= first_cmd_count
< max_cmds_per_burst
;
754 second_can_issue
= second_cmd_count
< max_cmds_per_burst
;
756 if (!second_can_issue
) {
757 DPRINTF(MemCtrl
, "Contention (cmd2) found on command bus at %d\n",
759 burst_tick
+= commandWindow
;
763 // Verify max_multi_cmd_split isn't violated when command 2 is shifted
764 // If commands initially were issued in same burst, they are
765 // now in consecutive bursts and can still issue B2B
766 bool gap_violated
= !same_burst
&&
767 ((burst_tick
- first_cmd_tick
) > max_multi_cmd_split
);
769 if (!first_can_issue
|| (!second_can_issue
&& gap_violated
)) {
770 DPRINTF(MemCtrl
, "Contention (cmd1) found on command bus at %d\n",
772 first_cmd_tick
+= commandWindow
;
776 // Add command to burstTicks
777 burstTicks
.insert(burst_tick
);
778 burstTicks
.insert(first_cmd_tick
);
784 MemCtrl::inReadBusState(bool next_state
) const
786 // check the bus state
788 // use busStateNext to get the state that will be used
789 // for the next burst
790 return (busStateNext
== MemCtrl::READ
);
792 return (busState
== MemCtrl::READ
);
797 MemCtrl::inWriteBusState(bool next_state
) const
799 // check the bus state
801 // use busStateNext to get the state that will be used
802 // for the next burst
803 return (busStateNext
== MemCtrl::WRITE
);
805 return (busState
== MemCtrl::WRITE
);
810 MemCtrl::doBurstAccess(MemPacket
* mem_pkt
)
812 // first clean up the burstTick set, removing old entries
813 // before adding new entries for next burst
816 // When was command issued?
819 // Issue the next burst and update bus state to reflect
820 // when previous command was issued
821 if (mem_pkt
->isDram()) {
822 std::vector
<MemPacketQueue
>& queue
= selQueue(mem_pkt
->isRead());
823 std::tie(cmd_at
, nextBurstAt
) =
824 dram
->doBurstAccess(mem_pkt
, nextBurstAt
, queue
);
826 // Update timing for NVM ranks if NVM is configured on this channel
828 nvm
->addRankToRankDelay(cmd_at
);
831 std::tie(cmd_at
, nextBurstAt
) =
832 nvm
->doBurstAccess(mem_pkt
, nextBurstAt
);
834 // Update timing for NVM ranks if NVM is configured on this channel
836 dram
->addRankToRankDelay(cmd_at
);
840 DPRINTF(MemCtrl
, "Access to %lld, ready at %lld next burst at %lld.\n",
841 mem_pkt
->addr
, mem_pkt
->readyTime
, nextBurstAt
);
843 // Update the minimum timing between the requests, this is a
844 // conservative estimate of when we have to schedule the next
845 // request to not introduce any unecessary bubbles. In most cases
846 // we will wake up sooner than we have to.
847 nextReqTime
= nextBurstAt
- (dram
? dram
->commandOffset() :
848 nvm
->commandOffset());
851 // Update the common bus stats
852 if (mem_pkt
->isRead()) {
854 // Update latency stats
855 stats
.requestorReadTotalLat
[mem_pkt
->requestorId()] +=
856 mem_pkt
->readyTime
- mem_pkt
->entryTime
;
857 stats
.requestorReadBytes
[mem_pkt
->requestorId()] += mem_pkt
->size
;
860 stats
.requestorWriteBytes
[mem_pkt
->requestorId()] += mem_pkt
->size
;
861 stats
.requestorWriteTotalLat
[mem_pkt
->requestorId()] +=
862 mem_pkt
->readyTime
- mem_pkt
->entryTime
;
867 MemCtrl::processNextReqEvent()
869 // transition is handled by QoS algorithm if enabled
871 // select bus state - only done if QoS algorithms are in use
872 busStateNext
= selectNextBusState();
875 // detect bus state change
876 bool switched_cmd_type
= (busState
!= busStateNext
);
878 recordTurnaroundStats();
880 DPRINTF(MemCtrl
, "QoS Turnarounds selected state %s %s\n",
881 (busState
==MemCtrl::READ
)?"READ":"WRITE",
882 switched_cmd_type
?"[turnaround triggered]":"");
884 if (switched_cmd_type
) {
885 if (busState
== MemCtrl::READ
) {
887 "Switching to writes after %d reads with %d reads "
888 "waiting\n", readsThisTime
, totalReadQueueSize
);
889 stats
.rdPerTurnAround
.sample(readsThisTime
);
893 "Switching to reads after %d writes with %d writes "
894 "waiting\n", writesThisTime
, totalWriteQueueSize
);
895 stats
.wrPerTurnAround
.sample(writesThisTime
);
900 // updates current state
901 busState
= busStateNext
;
904 for (auto queue
= readQueue
.rbegin();
905 queue
!= readQueue
.rend(); ++queue
) {
906 // select non-deterministic NVM read to issue
907 // assume that we have the command bandwidth to issue this along
908 // with additional RD/WR burst with needed bank operations
909 if (nvm
->readsWaitingToIssue()) {
910 // select non-deterministic NVM read to issue
911 nvm
->chooseRead(*queue
);
916 // check ranks for refresh/wakeup - uses busStateNext, so done after
917 // turnaround decisions
918 // Default to busy status and update based on interface specifics
919 bool dram_busy
= dram
? dram
->isBusy() : true;
920 bool nvm_busy
= true;
921 bool all_writes_nvm
= false;
923 all_writes_nvm
= nvm
->numWritesQueued
== totalWriteQueueSize
;
924 bool read_queue_empty
= totalReadQueueSize
== 0;
925 nvm_busy
= nvm
->isBusy(read_queue_empty
, all_writes_nvm
);
927 // Default state of unused interface is 'true'
928 // Simply AND the busy signals to determine if system is busy
929 if (dram_busy
&& nvm_busy
) {
930 // if all ranks are refreshing wait for them to finish
931 // and stall this state machine without taking any further
932 // action, and do not schedule a new nextReqEvent
936 // when we get here it is either a read or a write
937 if (busState
== READ
) {
939 // track if we should switch or not
940 bool switch_to_writes
= false;
942 if (totalReadQueueSize
== 0) {
943 // In the case there is no read request to go next,
944 // trigger writes if we have passed the low threshold (or
945 // if we are draining)
946 if (!(totalWriteQueueSize
== 0) &&
947 (drainState() == DrainState::Draining
||
948 totalWriteQueueSize
> writeLowThreshold
)) {
951 "Switching to writes due to read queue empty\n");
952 switch_to_writes
= true;
954 // check if we are drained
955 // not done draining until in PWR_IDLE state
956 // ensuring all banks are closed and
957 // have exited low power states
958 if (drainState() == DrainState::Draining
&&
959 respQueue
.empty() && allIntfDrained()) {
961 DPRINTF(Drain
, "MemCtrl controller done draining\n");
965 // nothing to do, not even any point in scheduling an
966 // event for the next request
971 bool read_found
= false;
972 MemPacketQueue::iterator to_read
;
973 uint8_t prio
= numPriorities();
975 for (auto queue
= readQueue
.rbegin();
976 queue
!= readQueue
.rend(); ++queue
) {
981 "Checking READ queue [%d] priority [%d elements]\n",
982 prio
, queue
->size());
984 // Figure out which read request goes next
985 // If we are changing command type, incorporate the minimum
986 // bus turnaround delay which will be rank to rank delay
987 to_read
= chooseNext((*queue
), switched_cmd_type
?
988 minWriteToReadDataGap() : 0);
990 if (to_read
!= queue
->end()) {
991 // candidate read found
997 // if no read to an available rank is found then return
998 // at this point. There could be writes to the available ranks
999 // which are above the required threshold. However, to
1000 // avoid adding more complexity to the code, return and wait
1001 // for a refresh event to kick things into action again.
1003 DPRINTF(MemCtrl
, "No Reads Found - exiting\n");
1007 auto mem_pkt
= *to_read
;
1009 doBurstAccess(mem_pkt
);
1012 assert(mem_pkt
->size
<= (mem_pkt
->isDram() ?
1013 dram
->bytesPerBurst() :
1014 nvm
->bytesPerBurst()) );
1015 assert(mem_pkt
->readyTime
>= curTick());
1018 logResponse(MemCtrl::READ
, (*to_read
)->requestorId(),
1019 mem_pkt
->qosValue(), mem_pkt
->getAddr(), 1,
1020 mem_pkt
->readyTime
- mem_pkt
->entryTime
);
1023 // Insert into response queue. It will be sent back to the
1024 // requestor at its readyTime
1025 if (respQueue
.empty()) {
1026 assert(!respondEvent
.scheduled());
1027 schedule(respondEvent
, mem_pkt
->readyTime
);
1029 assert(respQueue
.back()->readyTime
<= mem_pkt
->readyTime
);
1030 assert(respondEvent
.scheduled());
1033 respQueue
.push_back(mem_pkt
);
1035 // we have so many writes that we have to transition
1036 // don't transition if the writeRespQueue is full and
1037 // there are no other writes that can issue
1038 if ((totalWriteQueueSize
> writeHighThreshold
) &&
1039 !(nvm
&& all_writes_nvm
&& nvm
->writeRespQueueFull())) {
1040 switch_to_writes
= true;
1043 // remove the request from the queue
1044 // the iterator is no longer valid .
1045 readQueue
[mem_pkt
->qosValue()].erase(to_read
);
1048 // switching to writes, either because the read queue is empty
1049 // and the writes have passed the low threshold (or we are
1050 // draining), or because the writes hit the hight threshold
1051 if (switch_to_writes
) {
1052 // transition to writing
1053 busStateNext
= WRITE
;
1057 bool write_found
= false;
1058 MemPacketQueue::iterator to_write
;
1059 uint8_t prio
= numPriorities();
1061 for (auto queue
= writeQueue
.rbegin();
1062 queue
!= writeQueue
.rend(); ++queue
) {
1067 "Checking WRITE queue [%d] priority [%d elements]\n",
1068 prio
, queue
->size());
1070 // If we are changing command type, incorporate the minimum
1071 // bus turnaround delay
1072 to_write
= chooseNext((*queue
),
1073 switched_cmd_type
? minReadToWriteDataGap() : 0);
1075 if (to_write
!= queue
->end()) {
1081 // if there are no writes to a rank that is available to service
1082 // requests (i.e. rank is in refresh idle state) are found then
1083 // return. There could be reads to the available ranks. However, to
1084 // avoid adding more complexity to the code, return at this point and
1085 // wait for a refresh event to kick things into action again.
1087 DPRINTF(MemCtrl
, "No Writes Found - exiting\n");
1091 auto mem_pkt
= *to_write
;
1094 assert(mem_pkt
->size
<= (mem_pkt
->isDram() ?
1095 dram
->bytesPerBurst() :
1096 nvm
->bytesPerBurst()) );
1098 doBurstAccess(mem_pkt
);
1100 isInWriteQueue
.erase(burstAlign(mem_pkt
->addr
, mem_pkt
->isDram()));
1103 logResponse(MemCtrl::WRITE
, mem_pkt
->requestorId(),
1104 mem_pkt
->qosValue(), mem_pkt
->getAddr(), 1,
1105 mem_pkt
->readyTime
- mem_pkt
->entryTime
);
1108 // remove the request from the queue - the iterator is no longer valid
1109 writeQueue
[mem_pkt
->qosValue()].erase(to_write
);
1113 // If we emptied the write queue, or got sufficiently below the
1114 // threshold (using the minWritesPerSwitch as the hysteresis) and
1115 // are not draining, or we have reads waiting and have done enough
1116 // writes, then switch to reads.
1117 // If we are interfacing to NVM and have filled the writeRespQueue,
1118 // with only NVM writes in Q, then switch to reads
1119 bool below_threshold
=
1120 totalWriteQueueSize
+ minWritesPerSwitch
< writeLowThreshold
;
1122 if (totalWriteQueueSize
== 0 ||
1123 (below_threshold
&& drainState() != DrainState::Draining
) ||
1124 (totalReadQueueSize
&& writesThisTime
>= minWritesPerSwitch
) ||
1125 (totalReadQueueSize
&& nvm
&& nvm
->writeRespQueueFull() &&
1128 // turn the bus back around for reads again
1129 busStateNext
= MemCtrl::READ
;
1131 // note that the we switch back to reads also in the idle
1132 // case, which eventually will check for any draining and
1133 // also pause any further scheduling if there is really
1137 // It is possible that a refresh to another rank kicks things back into
1138 // action before reaching this point.
1139 if (!nextReqEvent
.scheduled())
1140 schedule(nextReqEvent
, std::max(nextReqTime
, curTick()));
1142 // If there is space available and we have writes waiting then let
1143 // them retry. This is done here to ensure that the retry does not
1144 // cause a nextReqEvent to be scheduled before we do so as part of
1145 // the next request processing
1146 if (retryWrReq
&& totalWriteQueueSize
< writeBufferSize
) {
1148 port
.sendRetryReq();
1153 MemCtrl::packetReady(MemPacket
* pkt
)
1155 return (pkt
->isDram() ?
1156 dram
->burstReady(pkt
) : nvm
->burstReady(pkt
));
1160 MemCtrl::minReadToWriteDataGap()
1162 Tick dram_min
= dram
? dram
->minReadToWriteDataGap() : MaxTick
;
1163 Tick nvm_min
= nvm
? nvm
->minReadToWriteDataGap() : MaxTick
;
1164 return std::min(dram_min
, nvm_min
);
1168 MemCtrl::minWriteToReadDataGap()
1170 Tick dram_min
= dram
? dram
->minWriteToReadDataGap() : MaxTick
;
1171 Tick nvm_min
= nvm
? nvm
->minWriteToReadDataGap() : MaxTick
;
1172 return std::min(dram_min
, nvm_min
);
1176 MemCtrl::burstAlign(Addr addr
, bool is_dram
) const
1179 return (addr
& ~(Addr(dram
->bytesPerBurst() - 1)));
1181 return (addr
& ~(Addr(nvm
->bytesPerBurst() - 1)));
1184 MemCtrl::CtrlStats::CtrlStats(MemCtrl
&_ctrl
)
1185 : Stats::Group(&_ctrl
),
1188 ADD_STAT(readReqs
, UNIT_COUNT
, "Number of read requests accepted"),
1189 ADD_STAT(writeReqs
, UNIT_COUNT
, "Number of write requests accepted"),
1191 ADD_STAT(readBursts
, UNIT_COUNT
,
1192 "Number of controller read bursts, including those serviced by "
1194 ADD_STAT(writeBursts
, UNIT_COUNT
,
1195 "Number of controller write bursts, including those merged in "
1197 ADD_STAT(servicedByWrQ
, UNIT_COUNT
,
1198 "Number of controller read bursts serviced by the write queue"),
1199 ADD_STAT(mergedWrBursts
, UNIT_COUNT
,
1200 "Number of controller write bursts merged with an existing one"),
1202 ADD_STAT(neitherReadNorWriteReqs
, UNIT_COUNT
,
1203 "Number of requests that are neither read nor write"),
1206 UNIT_RATE(Stats::Units::Count
, Stats::Units::Tick
),
1207 "Average read queue length when enqueuing"),
1209 UNIT_RATE(Stats::Units::Count
, Stats::Units::Tick
),
1210 "Average write queue length when enqueuing"),
1212 ADD_STAT(numRdRetry
, UNIT_COUNT
,
1213 "Number of times read queue was full causing retry"),
1214 ADD_STAT(numWrRetry
, UNIT_COUNT
,
1215 "Number of times write queue was full causing retry"),
1217 ADD_STAT(readPktSize
, UNIT_COUNT
, "Read request sizes (log2)"),
1218 ADD_STAT(writePktSize
, UNIT_COUNT
, "Write request sizes (log2)"),
1220 ADD_STAT(rdQLenPdf
, UNIT_COUNT
,
1221 "What read queue length does an incoming req see"),
1222 ADD_STAT(wrQLenPdf
, UNIT_COUNT
,
1223 "What write queue length does an incoming req see"),
1225 ADD_STAT(rdPerTurnAround
, UNIT_COUNT
,
1226 "Reads before turning the bus around for writes"),
1227 ADD_STAT(wrPerTurnAround
, UNIT_COUNT
,
1228 "Writes before turning the bus around for reads"),
1230 ADD_STAT(bytesReadWrQ
, UNIT_BYTE
,
1231 "Total number of bytes read from write queue"),
1232 ADD_STAT(bytesReadSys
, UNIT_BYTE
,
1233 "Total read bytes from the system interface side"),
1234 ADD_STAT(bytesWrittenSys
, UNIT_BYTE
,
1235 "Total written bytes from the system interface side"),
1237 ADD_STAT(avgRdBWSys
, UNIT_RATE(Stats::Units::Byte
, Stats::Units::Second
),
1238 "Average system read bandwidth in Byte/s"),
1239 ADD_STAT(avgWrBWSys
, UNIT_RATE(Stats::Units::Byte
, Stats::Units::Second
),
1240 "Average system write bandwidth in Byte/s"),
1242 ADD_STAT(totGap
, UNIT_TICK
, "Total gap between requests"),
1243 ADD_STAT(avgGap
, UNIT_RATE(Stats::Units::Tick
, Stats::Units::Count
),
1244 "Average gap between requests"),
1246 ADD_STAT(requestorReadBytes
, UNIT_BYTE
,
1247 "Per-requestor bytes read from memory"),
1248 ADD_STAT(requestorWriteBytes
, UNIT_BYTE
,
1249 "Per-requestor bytes write to memory"),
1250 ADD_STAT(requestorReadRate
,
1251 UNIT_RATE(Stats::Units::Byte
, Stats::Units::Second
),
1252 "Per-requestor bytes read from memory rate"),
1253 ADD_STAT(requestorWriteRate
,
1254 UNIT_RATE(Stats::Units::Byte
, Stats::Units::Second
),
1255 "Per-requestor bytes write to memory rate"),
1256 ADD_STAT(requestorReadAccesses
, UNIT_COUNT
,
1257 "Per-requestor read serviced memory accesses"),
1258 ADD_STAT(requestorWriteAccesses
, UNIT_COUNT
,
1259 "Per-requestor write serviced memory accesses"),
1260 ADD_STAT(requestorReadTotalLat
, UNIT_TICK
,
1261 "Per-requestor read total memory access latency"),
1262 ADD_STAT(requestorWriteTotalLat
, UNIT_TICK
,
1263 "Per-requestor write total memory access latency"),
1264 ADD_STAT(requestorReadAvgLat
,
1265 UNIT_RATE(Stats::Units::Tick
, Stats::Units::Count
),
1266 "Per-requestor read average memory access latency"),
1267 ADD_STAT(requestorWriteAvgLat
,
1268 UNIT_RATE(Stats::Units::Tick
, Stats::Units::Count
),
1269 "Per-requestor write average memory access latency")
1275 MemCtrl::CtrlStats::regStats()
1277 using namespace Stats
;
1279 assert(ctrl
.system());
1280 const auto max_requestors
= ctrl
.system()->maxRequestors();
1282 avgRdQLen
.precision(2);
1283 avgWrQLen
.precision(2);
1285 readPktSize
.init(ceilLog2(ctrl
.system()->cacheLineSize()) + 1);
1286 writePktSize
.init(ceilLog2(ctrl
.system()->cacheLineSize()) + 1);
1288 rdQLenPdf
.init(ctrl
.readBufferSize
);
1289 wrQLenPdf
.init(ctrl
.writeBufferSize
);
1292 .init(ctrl
.readBufferSize
)
1295 .init(ctrl
.writeBufferSize
)
1298 avgRdBWSys
.precision(8);
1299 avgWrBWSys
.precision(8);
1300 avgGap
.precision(2);
1302 // per-requestor bytes read and written to memory
1304 .init(max_requestors
)
1305 .flags(nozero
| nonan
);
1308 .init(max_requestors
)
1309 .flags(nozero
| nonan
);
1311 // per-requestor bytes read and written to memory rate
1313 .flags(nozero
| nonan
)
1316 requestorReadAccesses
1317 .init(max_requestors
)
1320 requestorWriteAccesses
1321 .init(max_requestors
)
1324 requestorReadTotalLat
1325 .init(max_requestors
)
1326 .flags(nozero
| nonan
);
1333 .flags(nozero
| nonan
)
1336 requestorWriteTotalLat
1337 .init(max_requestors
)
1338 .flags(nozero
| nonan
);
1340 requestorWriteAvgLat
1344 for (int i
= 0; i
< max_requestors
; i
++) {
1345 const std::string requestor
= ctrl
.system()->getRequestorName(i
);
1346 requestorReadBytes
.subname(i
, requestor
);
1347 requestorReadRate
.subname(i
, requestor
);
1348 requestorWriteBytes
.subname(i
, requestor
);
1349 requestorWriteRate
.subname(i
, requestor
);
1350 requestorReadAccesses
.subname(i
, requestor
);
1351 requestorWriteAccesses
.subname(i
, requestor
);
1352 requestorReadTotalLat
.subname(i
, requestor
);
1353 requestorReadAvgLat
.subname(i
, requestor
);
1354 requestorWriteTotalLat
.subname(i
, requestor
);
1355 requestorWriteAvgLat
.subname(i
, requestor
);
1359 avgRdBWSys
= (bytesReadSys
) / simSeconds
;
1360 avgWrBWSys
= (bytesWrittenSys
) / simSeconds
;
1362 avgGap
= totGap
/ (readReqs
+ writeReqs
);
1364 requestorReadRate
= requestorReadBytes
/ simSeconds
;
1365 requestorWriteRate
= requestorWriteBytes
/ simSeconds
;
1366 requestorReadAvgLat
= requestorReadTotalLat
/ requestorReadAccesses
;
1367 requestorWriteAvgLat
= requestorWriteTotalLat
/ requestorWriteAccesses
;
1371 MemCtrl::recvFunctional(PacketPtr pkt
)
1373 if (dram
&& dram
->getAddrRange().contains(pkt
->getAddr())) {
1374 // rely on the abstract memory
1375 dram
->functionalAccess(pkt
);
1376 } else if (nvm
&& nvm
->getAddrRange().contains(pkt
->getAddr())) {
1377 // rely on the abstract memory
1378 nvm
->functionalAccess(pkt
);
1380 panic("Can't handle address range for packet %s\n",
1386 MemCtrl::getPort(const std::string
&if_name
, PortID idx
)
1388 if (if_name
!= "port") {
1389 return QoS::MemCtrl::getPort(if_name
, idx
);
1396 MemCtrl::allIntfDrained() const
1398 // ensure dram is in power down and refresh IDLE states
1399 bool dram_drained
= !dram
|| dram
->allRanksDrained();
1400 // No outstanding NVM writes
1401 // All other queues verified as needed with calling logic
1402 bool nvm_drained
= !nvm
|| nvm
->allRanksDrained();
1403 return (dram_drained
&& nvm_drained
);
1409 // if there is anything in any of our internal queues, keep track
1411 if (!(!totalWriteQueueSize
&& !totalReadQueueSize
&& respQueue
.empty() &&
1412 allIntfDrained())) {
1414 DPRINTF(Drain
, "Memory controller not drained, write: %d, read: %d,"
1415 " resp: %d\n", totalWriteQueueSize
, totalReadQueueSize
,
1418 // the only queue that is not drained automatically over time
1419 // is the write queue, thus kick things into action if needed
1420 if (!totalWriteQueueSize
&& !nextReqEvent
.scheduled()) {
1421 schedule(nextReqEvent
, curTick());
1427 return DrainState::Draining
;
1429 return DrainState::Drained
;
1434 MemCtrl::drainResume()
1436 if (!isTimingMode
&& system()->isTimingMode()) {
1437 // if we switched to timing mode, kick things into action,
1438 // and behave as if we restored from a checkpoint
1441 } else if (isTimingMode
&& !system()->isTimingMode()) {
1442 // if we switch from timing mode, stop the refresh events to
1443 // not cause issues with KVM
1449 isTimingMode
= system()->isTimingMode();
1452 MemCtrl::MemoryPort::MemoryPort(const std::string
& name
, MemCtrl
& _ctrl
)
1453 : QueuedResponsePort(name
, &_ctrl
, queue
), queue(_ctrl
, *this, true),
1458 MemCtrl::MemoryPort::getAddrRanges() const
1460 AddrRangeList ranges
;
1462 DPRINTF(DRAM
, "Pushing DRAM ranges to port\n");
1463 ranges
.push_back(ctrl
.dram
->getAddrRange());
1466 DPRINTF(NVM
, "Pushing NVM ranges to port\n");
1467 ranges
.push_back(ctrl
.nvm
->getAddrRange());
1473 MemCtrl::MemoryPort::recvFunctional(PacketPtr pkt
)
1475 pkt
->pushLabel(ctrl
.name());
1477 if (!queue
.trySatisfyFunctional(pkt
)) {
1478 // Default implementation of SimpleTimingPort::recvFunctional()
1479 // calls recvAtomic() and throws away the latency; we can save a
1480 // little here by just not calculating the latency.
1481 ctrl
.recvFunctional(pkt
);
1488 MemCtrl::MemoryPort::recvAtomic(PacketPtr pkt
)
1490 return ctrl
.recvAtomic(pkt
);
1494 MemCtrl::MemoryPort::recvAtomicBackdoor(
1495 PacketPtr pkt
, MemBackdoorPtr
&backdoor
)
1497 return ctrl
.recvAtomicBackdoor(pkt
, backdoor
);
1501 MemCtrl::MemoryPort::recvTimingReq(PacketPtr pkt
)
1503 // pass it to the memory controller
1504 return ctrl
.recvTimingReq(pkt
);