2 * Copyright (c) 2010-2020 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "mem/mem_ctrl.hh"
43 #include "base/trace.hh"
44 #include "debug/DRAM.hh"
45 #include "debug/Drain.hh"
46 #include "debug/MemCtrl.hh"
47 #include "debug/NVM.hh"
48 #include "debug/QOS.hh"
49 #include "mem/mem_interface.hh"
50 #include "sim/system.hh"
54 MemCtrl::MemCtrl(const MemCtrlParams
* p
) :
56 port(name() + ".port", *this), isTimingMode(false),
57 retryRdReq(false), retryWrReq(false),
58 nextReqEvent([this]{ processNextReqEvent(); }, name()),
59 respondEvent([this]{ processRespondEvent(); }, name()),
60 dram(p
->dram
), nvm(p
->nvm
),
61 readBufferSize((dram
? dram
->readBufferSize
: 0) +
62 (nvm
? nvm
->readBufferSize
: 0)),
63 writeBufferSize((dram
? dram
->writeBufferSize
: 0) +
64 (nvm
? nvm
->writeBufferSize
: 0)),
65 writeHighThreshold(writeBufferSize
* p
->write_high_thresh_perc
/ 100.0),
66 writeLowThreshold(writeBufferSize
* p
->write_low_thresh_perc
/ 100.0),
67 minWritesPerSwitch(p
->min_writes_per_switch
),
68 writesThisTime(0), readsThisTime(0),
69 memSchedPolicy(p
->mem_sched_policy
),
70 frontendLatency(p
->static_frontend_latency
),
71 backendLatency(p
->static_backend_latency
),
72 commandWindow(p
->command_window
),
73 nextBurstAt(0), prevArrival(0),
77 DPRINTF(MemCtrl
, "Setting up controller\n");
78 readQueue
.resize(p
->qos_priorities
);
79 writeQueue
.resize(p
->qos_priorities
);
81 // Hook up interfaces to the controller
83 dram
->setCtrl(this, commandWindow
);
85 nvm
->setCtrl(this, commandWindow
);
87 fatal_if(!dram
&& !nvm
, "Memory controller must have an interface");
89 // perform a basic check of the write thresholds
90 if (p
->write_low_thresh_perc
>= p
->write_high_thresh_perc
)
91 fatal("Write buffer low threshold %d must be smaller than the "
92 "high threshold %d\n", p
->write_low_thresh_perc
,
93 p
->write_high_thresh_perc
);
99 if (!port
.isConnected()) {
100 fatal("MemCtrl %s is unconnected!\n", name());
102 port
.sendRangeChange();
109 // remember the memory system mode of operation
110 isTimingMode
= system()->isTimingMode();
113 // shift the bus busy time sufficiently far ahead that we never
114 // have to worry about negative values when computing the time for
115 // the next request, this will add an insignificant bubble at the
116 // start of simulation
117 nextBurstAt
= curTick() + (dram
? dram
->commandOffset() :
118 nvm
->commandOffset());
123 MemCtrl::recvAtomic(PacketPtr pkt
)
125 DPRINTF(MemCtrl
, "recvAtomic: %s 0x%x\n",
126 pkt
->cmdString(), pkt
->getAddr());
128 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
132 // do the actual memory access and turn the packet into a response
133 if (dram
&& dram
->getAddrRange().contains(pkt
->getAddr())) {
136 if (pkt
->hasData()) {
137 // this value is not supposed to be accurate, just enough to
138 // keep things going, mimic a closed page
139 latency
= dram
->accessLatency();
141 } else if (nvm
&& nvm
->getAddrRange().contains(pkt
->getAddr())) {
144 if (pkt
->hasData()) {
145 // this value is not supposed to be accurate, just enough to
146 // keep things going, mimic a closed page
147 latency
= nvm
->accessLatency();
150 panic("Can't handle address range for packet %s\n",
158 MemCtrl::readQueueFull(unsigned int neededEntries
) const
161 "Read queue limit %d, current size %d, entries needed %d\n",
162 readBufferSize
, totalReadQueueSize
+ respQueue
.size(),
165 auto rdsize_new
= totalReadQueueSize
+ respQueue
.size() + neededEntries
;
166 return rdsize_new
> readBufferSize
;
170 MemCtrl::writeQueueFull(unsigned int neededEntries
) const
173 "Write queue limit %d, current size %d, entries needed %d\n",
174 writeBufferSize
, totalWriteQueueSize
, neededEntries
);
176 auto wrsize_new
= (totalWriteQueueSize
+ neededEntries
);
177 return wrsize_new
> writeBufferSize
;
181 MemCtrl::addToReadQueue(PacketPtr pkt
, unsigned int pkt_count
, bool is_dram
)
183 // only add to the read queue here. whenever the request is
184 // eventually done, set the readyTime, and call schedule()
185 assert(!pkt
->isWrite());
187 assert(pkt_count
!= 0);
189 // if the request size is larger than burst size, the pkt is split into
191 // Note if the pkt starting address is not aligened to burst size, the
192 // address of first packet is kept unaliged. Subsequent packets
193 // are aligned to burst size boundaries. This is to ensure we accurately
194 // check read packets against packets in write queue.
195 const Addr base_addr
= pkt
->getAddr();
196 Addr addr
= base_addr
;
197 unsigned pktsServicedByWrQ
= 0;
198 BurstHelper
* burst_helper
= NULL
;
200 uint32_t burst_size
= is_dram
? dram
->bytesPerBurst() :
201 nvm
->bytesPerBurst();
202 for (int cnt
= 0; cnt
< pkt_count
; ++cnt
) {
203 unsigned size
= std::min((addr
| (burst_size
- 1)) + 1,
204 base_addr
+ pkt
->getSize()) - addr
;
205 stats
.readPktSize
[ceilLog2(size
)]++;
207 stats
.requestorReadAccesses
[pkt
->requestorId()]++;
209 // First check write buffer to see if the data is already at
211 bool foundInWrQ
= false;
212 Addr burst_addr
= burstAlign(addr
, is_dram
);
213 // if the burst address is not present then there is no need
214 // looking any further
215 if (isInWriteQueue
.find(burst_addr
) != isInWriteQueue
.end()) {
216 for (const auto& vec
: writeQueue
) {
217 for (const auto& p
: vec
) {
218 // check if the read is subsumed in the write queue
219 // packet we are looking at
220 if (p
->addr
<= addr
&&
221 ((addr
+ size
) <= (p
->addr
+ p
->size
))) {
224 stats
.servicedByWrQ
++;
227 "Read to addr %lld with size %d serviced by "
230 stats
.bytesReadWrQ
+= burst_size
;
237 // If not found in the write q, make a memory packet and
238 // push it onto the read queue
241 // Make the burst helper for split packets
242 if (pkt_count
> 1 && burst_helper
== NULL
) {
243 DPRINTF(MemCtrl
, "Read to addr %lld translates to %d "
244 "memory requests\n", pkt
->getAddr(), pkt_count
);
245 burst_helper
= new BurstHelper(pkt_count
);
250 mem_pkt
= dram
->decodePacket(pkt
, addr
, size
, true, true);
251 // increment read entries of the rank
252 dram
->setupRank(mem_pkt
->rank
, true);
254 mem_pkt
= nvm
->decodePacket(pkt
, addr
, size
, true, false);
255 // Increment count to trigger issue of non-deterministic read
256 nvm
->setupRank(mem_pkt
->rank
, true);
257 // Default readyTime to Max; will be reset once read is issued
258 mem_pkt
->readyTime
= MaxTick
;
260 mem_pkt
->burstHelper
= burst_helper
;
262 assert(!readQueueFull(1));
263 stats
.rdQLenPdf
[totalReadQueueSize
+ respQueue
.size()]++;
265 DPRINTF(MemCtrl
, "Adding to read queue\n");
267 readQueue
[mem_pkt
->qosValue()].push_back(mem_pkt
);
270 logRequest(MemCtrl::READ
, pkt
->requestorId(), pkt
->qosValue(),
274 stats
.avgRdQLen
= totalReadQueueSize
+ respQueue
.size();
277 // Starting address of next memory pkt (aligned to burst boundary)
278 addr
= (addr
| (burst_size
- 1)) + 1;
281 // If all packets are serviced by write queue, we send the repsonse back
282 if (pktsServicedByWrQ
== pkt_count
) {
283 accessAndRespond(pkt
, frontendLatency
);
287 // Update how many split packets are serviced by write queue
288 if (burst_helper
!= NULL
)
289 burst_helper
->burstsServiced
= pktsServicedByWrQ
;
291 // If we are not already scheduled to get a request out of the
293 if (!nextReqEvent
.scheduled()) {
294 DPRINTF(MemCtrl
, "Request scheduled immediately\n");
295 schedule(nextReqEvent
, curTick());
300 MemCtrl::addToWriteQueue(PacketPtr pkt
, unsigned int pkt_count
, bool is_dram
)
302 // only add to the write queue here. whenever the request is
303 // eventually done, set the readyTime, and call schedule()
304 assert(pkt
->isWrite());
306 // if the request size is larger than burst size, the pkt is split into
308 const Addr base_addr
= pkt
->getAddr();
309 Addr addr
= base_addr
;
310 uint32_t burst_size
= is_dram
? dram
->bytesPerBurst() :
311 nvm
->bytesPerBurst();
312 for (int cnt
= 0; cnt
< pkt_count
; ++cnt
) {
313 unsigned size
= std::min((addr
| (burst_size
- 1)) + 1,
314 base_addr
+ pkt
->getSize()) - addr
;
315 stats
.writePktSize
[ceilLog2(size
)]++;
317 stats
.requestorWriteAccesses
[pkt
->requestorId()]++;
319 // see if we can merge with an existing item in the write
320 // queue and keep track of whether we have merged or not
321 bool merged
= isInWriteQueue
.find(burstAlign(addr
, is_dram
)) !=
322 isInWriteQueue
.end();
324 // if the item was not merged we need to create a new write
329 mem_pkt
= dram
->decodePacket(pkt
, addr
, size
, false, true);
330 dram
->setupRank(mem_pkt
->rank
, false);
332 mem_pkt
= nvm
->decodePacket(pkt
, addr
, size
, false, false);
333 nvm
->setupRank(mem_pkt
->rank
, false);
335 assert(totalWriteQueueSize
< writeBufferSize
);
336 stats
.wrQLenPdf
[totalWriteQueueSize
]++;
338 DPRINTF(MemCtrl
, "Adding to write queue\n");
340 writeQueue
[mem_pkt
->qosValue()].push_back(mem_pkt
);
341 isInWriteQueue
.insert(burstAlign(addr
, is_dram
));
344 logRequest(MemCtrl::WRITE
, pkt
->requestorId(), pkt
->qosValue(),
347 assert(totalWriteQueueSize
== isInWriteQueue
.size());
350 stats
.avgWrQLen
= totalWriteQueueSize
;
354 "Merging write burst with existing queue entry\n");
356 // keep track of the fact that this burst effectively
357 // disappeared as it was merged with an existing one
358 stats
.mergedWrBursts
++;
361 // Starting address of next memory pkt (aligned to burst_size boundary)
362 addr
= (addr
| (burst_size
- 1)) + 1;
365 // we do not wait for the writes to be send to the actual memory,
366 // but instead take responsibility for the consistency here and
367 // snoop the write queue for any upcoming reads
368 // @todo, if a pkt size is larger than burst size, we might need a
369 // different front end latency
370 accessAndRespond(pkt
, frontendLatency
);
372 // If we are not already scheduled to get a request out of the
374 if (!nextReqEvent
.scheduled()) {
375 DPRINTF(MemCtrl
, "Request scheduled immediately\n");
376 schedule(nextReqEvent
, curTick());
381 MemCtrl::printQs() const
384 DPRINTF(MemCtrl
, "===READ QUEUE===\n\n");
385 for (const auto& queue
: readQueue
) {
386 for (const auto& packet
: queue
) {
387 DPRINTF(MemCtrl
, "Read %lu\n", packet
->addr
);
391 DPRINTF(MemCtrl
, "\n===RESP QUEUE===\n\n");
392 for (const auto& packet
: respQueue
) {
393 DPRINTF(MemCtrl
, "Response %lu\n", packet
->addr
);
396 DPRINTF(MemCtrl
, "\n===WRITE QUEUE===\n\n");
397 for (const auto& queue
: writeQueue
) {
398 for (const auto& packet
: queue
) {
399 DPRINTF(MemCtrl
, "Write %lu\n", packet
->addr
);
406 MemCtrl::recvTimingReq(PacketPtr pkt
)
408 // This is where we enter from the outside world
409 DPRINTF(MemCtrl
, "recvTimingReq: request %s addr %lld size %d\n",
410 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
412 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
415 panic_if(!(pkt
->isRead() || pkt
->isWrite()),
416 "Should only see read and writes at memory controller\n");
418 // Calc avg gap between requests
419 if (prevArrival
!= 0) {
420 stats
.totGap
+= curTick() - prevArrival
;
422 prevArrival
= curTick();
424 // What type of media does this packet access?
426 if (dram
&& dram
->getAddrRange().contains(pkt
->getAddr())) {
428 } else if (nvm
&& nvm
->getAddrRange().contains(pkt
->getAddr())) {
431 panic("Can't handle address range for packet %s\n",
436 // Find out how many memory packets a pkt translates to
437 // If the burst size is equal or larger than the pkt size, then a pkt
438 // translates to only one memory packet. Otherwise, a pkt translates to
439 // multiple memory packets
440 unsigned size
= pkt
->getSize();
441 uint32_t burst_size
= is_dram
? dram
->bytesPerBurst() :
442 nvm
->bytesPerBurst();
443 unsigned offset
= pkt
->getAddr() & (burst_size
- 1);
444 unsigned int pkt_count
= divCeil(offset
+ size
, burst_size
);
446 // run the QoS scheduler and assign a QoS priority value to the packet
447 qosSchedule( { &readQueue
, &writeQueue
}, burst_size
, pkt
);
449 // check local buffers and do not accept if full
450 if (pkt
->isWrite()) {
452 if (writeQueueFull(pkt_count
)) {
453 DPRINTF(MemCtrl
, "Write queue full, not accepting\n");
454 // remember that we have to retry this port
459 addToWriteQueue(pkt
, pkt_count
, is_dram
);
461 stats
.bytesWrittenSys
+= size
;
464 assert(pkt
->isRead());
466 if (readQueueFull(pkt_count
)) {
467 DPRINTF(MemCtrl
, "Read queue full, not accepting\n");
468 // remember that we have to retry this port
473 addToReadQueue(pkt
, pkt_count
, is_dram
);
475 stats
.bytesReadSys
+= size
;
483 MemCtrl::processRespondEvent()
486 "processRespondEvent(): Some req has reached its readyTime\n");
488 MemPacket
* mem_pkt
= respQueue
.front();
490 if (mem_pkt
->isDram()) {
491 // media specific checks and functions when read response is complete
492 dram
->respondEvent(mem_pkt
->rank
);
495 if (mem_pkt
->burstHelper
) {
496 // it is a split packet
497 mem_pkt
->burstHelper
->burstsServiced
++;
498 if (mem_pkt
->burstHelper
->burstsServiced
==
499 mem_pkt
->burstHelper
->burstCount
) {
500 // we have now serviced all children packets of a system packet
501 // so we can now respond to the requestor
502 // @todo we probably want to have a different front end and back
503 // end latency for split packets
504 accessAndRespond(mem_pkt
->pkt
, frontendLatency
+ backendLatency
);
505 delete mem_pkt
->burstHelper
;
506 mem_pkt
->burstHelper
= NULL
;
509 // it is not a split packet
510 accessAndRespond(mem_pkt
->pkt
, frontendLatency
+ backendLatency
);
513 delete respQueue
.front();
514 respQueue
.pop_front();
516 if (!respQueue
.empty()) {
517 assert(respQueue
.front()->readyTime
>= curTick());
518 assert(!respondEvent
.scheduled());
519 schedule(respondEvent
, respQueue
.front()->readyTime
);
521 // if there is nothing left in any queue, signal a drain
522 if (drainState() == DrainState::Draining
&&
523 !totalWriteQueueSize
&& !totalReadQueueSize
&&
526 DPRINTF(Drain
, "Controller done draining\n");
528 } else if (mem_pkt
->isDram()) {
529 // check the refresh state and kick the refresh event loop
530 // into action again if banks already closed and just waiting
531 // for read to complete
532 dram
->checkRefreshState(mem_pkt
->rank
);
536 // We have made a location in the queue available at this point,
537 // so if there is a read that was forced to wait, retry now
544 MemPacketQueue::iterator
545 MemCtrl::chooseNext(MemPacketQueue
& queue
, Tick extra_col_delay
)
547 // This method does the arbitration between requests.
549 MemPacketQueue::iterator ret
= queue
.end();
551 if (!queue
.empty()) {
552 if (queue
.size() == 1) {
553 // available rank corresponds to state refresh idle
554 MemPacket
* mem_pkt
= *(queue
.begin());
555 if (packetReady(mem_pkt
)) {
557 DPRINTF(MemCtrl
, "Single request, going to a free rank\n");
559 DPRINTF(MemCtrl
, "Single request, going to a busy rank\n");
561 } else if (memSchedPolicy
== Enums::fcfs
) {
562 // check if there is a packet going to a free rank
563 for (auto i
= queue
.begin(); i
!= queue
.end(); ++i
) {
564 MemPacket
* mem_pkt
= *i
;
565 if (packetReady(mem_pkt
)) {
570 } else if (memSchedPolicy
== Enums::frfcfs
) {
571 ret
= chooseNextFRFCFS(queue
, extra_col_delay
);
573 panic("No scheduling policy chosen\n");
579 MemPacketQueue::iterator
580 MemCtrl::chooseNextFRFCFS(MemPacketQueue
& queue
, Tick extra_col_delay
)
582 auto selected_pkt_it
= queue
.end();
583 Tick col_allowed_at
= MaxTick
;
585 // time we need to issue a column command to be seamless
586 const Tick min_col_at
= std::max(nextBurstAt
+ extra_col_delay
, curTick());
588 // find optimal packet for each interface
590 // create 2nd set of parameters for NVM
591 auto nvm_pkt_it
= queue
.end();
592 Tick nvm_col_at
= MaxTick
;
594 // Select packet by default to give priority if both
595 // can issue at the same time or seamlessly
596 std::tie(selected_pkt_it
, col_allowed_at
) =
597 dram
->chooseNextFRFCFS(queue
, min_col_at
);
598 std::tie(nvm_pkt_it
, nvm_col_at
) =
599 nvm
->chooseNextFRFCFS(queue
, min_col_at
);
601 // Compare DRAM and NVM and select NVM if it can issue
602 // earlier than the DRAM packet
603 if (col_allowed_at
> nvm_col_at
) {
604 selected_pkt_it
= nvm_pkt_it
;
607 std::tie(selected_pkt_it
, col_allowed_at
) =
608 dram
->chooseNextFRFCFS(queue
, min_col_at
);
610 std::tie(selected_pkt_it
, col_allowed_at
) =
611 nvm
->chooseNextFRFCFS(queue
, min_col_at
);
614 if (selected_pkt_it
== queue
.end()) {
615 DPRINTF(MemCtrl
, "%s no available packets found\n", __func__
);
618 return selected_pkt_it
;
622 MemCtrl::accessAndRespond(PacketPtr pkt
, Tick static_latency
)
624 DPRINTF(MemCtrl
, "Responding to Address %lld.. \n",pkt
->getAddr());
626 bool needsResponse
= pkt
->needsResponse();
627 // do the actual memory access which also turns the packet into a
629 if (dram
&& dram
->getAddrRange().contains(pkt
->getAddr())) {
631 } else if (nvm
&& nvm
->getAddrRange().contains(pkt
->getAddr())) {
634 panic("Can't handle address range for packet %s\n",
638 // turn packet around to go back to requestor if response expected
640 // access already turned the packet into a response
641 assert(pkt
->isResponse());
642 // response_time consumes the static latency and is charged also
643 // with headerDelay that takes into account the delay provided by
644 // the xbar and also the payloadDelay that takes into account the
645 // number of data beats.
646 Tick response_time
= curTick() + static_latency
+ pkt
->headerDelay
+
648 // Here we reset the timing of the packet before sending it out.
649 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
651 // queue the packet in the response queue to be sent out after
652 // the static latency has passed
653 port
.schedTimingResp(pkt
, response_time
);
655 // @todo the packet is going to be deleted, and the MemPacket
656 // is still having a pointer to it
657 pendingDelete
.reset(pkt
);
660 DPRINTF(MemCtrl
, "Done\n");
666 MemCtrl::pruneBurstTick()
668 auto it
= burstTicks
.begin();
669 while (it
!= burstTicks
.end()) {
670 auto current_it
= it
++;
671 if (curTick() > *current_it
) {
672 DPRINTF(MemCtrl
, "Removing burstTick for %d\n", *current_it
);
673 burstTicks
.erase(current_it
);
679 MemCtrl::getBurstWindow(Tick cmd_tick
)
681 // get tick aligned to burst window
682 Tick burst_offset
= cmd_tick
% commandWindow
;
683 return (cmd_tick
- burst_offset
);
687 MemCtrl::verifySingleCmd(Tick cmd_tick
, Tick max_cmds_per_burst
)
689 // start with assumption that there is no contention on command bus
690 Tick cmd_at
= cmd_tick
;
692 // get tick aligned to burst window
693 Tick burst_tick
= getBurstWindow(cmd_tick
);
695 // verify that we have command bandwidth to issue the command
696 // if not, iterate over next window(s) until slot found
697 while (burstTicks
.count(burst_tick
) >= max_cmds_per_burst
) {
698 DPRINTF(MemCtrl
, "Contention found on command bus at %d\n",
700 burst_tick
+= commandWindow
;
704 // add command into burst window and return corresponding Tick
705 burstTicks
.insert(burst_tick
);
710 MemCtrl::verifyMultiCmd(Tick cmd_tick
, Tick max_cmds_per_burst
,
711 Tick max_multi_cmd_split
)
713 // start with assumption that there is no contention on command bus
714 Tick cmd_at
= cmd_tick
;
716 // get tick aligned to burst window
717 Tick burst_tick
= getBurstWindow(cmd_tick
);
719 // Command timing requirements are from 2nd command
720 // Start with assumption that 2nd command will issue at cmd_at and
721 // find prior slot for 1st command to issue
722 // Given a maximum latency of max_multi_cmd_split between the commands,
723 // find the burst at the maximum latency prior to cmd_at
724 Tick burst_offset
= 0;
725 Tick first_cmd_offset
= cmd_tick
% commandWindow
;
726 while (max_multi_cmd_split
> (first_cmd_offset
+ burst_offset
)) {
727 burst_offset
+= commandWindow
;
729 // get the earliest burst aligned address for first command
730 // ensure that the time does not go negative
731 Tick first_cmd_tick
= burst_tick
- std::min(burst_offset
, burst_tick
);
733 // Can required commands issue?
734 bool first_can_issue
= false;
735 bool second_can_issue
= false;
736 // verify that we have command bandwidth to issue the command(s)
737 while (!first_can_issue
|| !second_can_issue
) {
738 bool same_burst
= (burst_tick
== first_cmd_tick
);
739 auto first_cmd_count
= burstTicks
.count(first_cmd_tick
);
740 auto second_cmd_count
= same_burst
? first_cmd_count
+ 1 :
741 burstTicks
.count(burst_tick
);
743 first_can_issue
= first_cmd_count
< max_cmds_per_burst
;
744 second_can_issue
= second_cmd_count
< max_cmds_per_burst
;
746 if (!second_can_issue
) {
747 DPRINTF(MemCtrl
, "Contention (cmd2) found on command bus at %d\n",
749 burst_tick
+= commandWindow
;
753 // Verify max_multi_cmd_split isn't violated when command 2 is shifted
754 // If commands initially were issued in same burst, they are
755 // now in consecutive bursts and can still issue B2B
756 bool gap_violated
= !same_burst
&&
757 ((burst_tick
- first_cmd_tick
) > max_multi_cmd_split
);
759 if (!first_can_issue
|| (!second_can_issue
&& gap_violated
)) {
760 DPRINTF(MemCtrl
, "Contention (cmd1) found on command bus at %d\n",
762 first_cmd_tick
+= commandWindow
;
766 // Add command to burstTicks
767 burstTicks
.insert(burst_tick
);
768 burstTicks
.insert(first_cmd_tick
);
774 MemCtrl::inReadBusState(bool next_state
) const
776 // check the bus state
778 // use busStateNext to get the state that will be used
779 // for the next burst
780 return (busStateNext
== MemCtrl::READ
);
782 return (busState
== MemCtrl::READ
);
787 MemCtrl::inWriteBusState(bool next_state
) const
789 // check the bus state
791 // use busStateNext to get the state that will be used
792 // for the next burst
793 return (busStateNext
== MemCtrl::WRITE
);
795 return (busState
== MemCtrl::WRITE
);
800 MemCtrl::doBurstAccess(MemPacket
* mem_pkt
)
802 // first clean up the burstTick set, removing old entries
803 // before adding new entries for next burst
806 // When was command issued?
809 // Issue the next burst and update bus state to reflect
810 // when previous command was issued
811 if (mem_pkt
->isDram()) {
812 std::vector
<MemPacketQueue
>& queue
= selQueue(mem_pkt
->isRead());
813 std::tie(cmd_at
, nextBurstAt
) =
814 dram
->doBurstAccess(mem_pkt
, nextBurstAt
, queue
);
816 // Update timing for NVM ranks if NVM is configured on this channel
818 nvm
->addRankToRankDelay(cmd_at
);
821 std::tie(cmd_at
, nextBurstAt
) =
822 nvm
->doBurstAccess(mem_pkt
, nextBurstAt
);
824 // Update timing for NVM ranks if NVM is configured on this channel
826 dram
->addRankToRankDelay(cmd_at
);
830 DPRINTF(MemCtrl
, "Access to %lld, ready at %lld next burst at %lld.\n",
831 mem_pkt
->addr
, mem_pkt
->readyTime
, nextBurstAt
);
833 // Update the minimum timing between the requests, this is a
834 // conservative estimate of when we have to schedule the next
835 // request to not introduce any unecessary bubbles. In most cases
836 // we will wake up sooner than we have to.
837 nextReqTime
= nextBurstAt
- (dram
? dram
->commandOffset() :
838 nvm
->commandOffset());
841 // Update the common bus stats
842 if (mem_pkt
->isRead()) {
844 // Update latency stats
845 stats
.requestorReadTotalLat
[mem_pkt
->requestorId()] +=
846 mem_pkt
->readyTime
- mem_pkt
->entryTime
;
847 stats
.requestorReadBytes
[mem_pkt
->requestorId()] += mem_pkt
->size
;
850 stats
.requestorWriteBytes
[mem_pkt
->requestorId()] += mem_pkt
->size
;
851 stats
.requestorWriteTotalLat
[mem_pkt
->requestorId()] +=
852 mem_pkt
->readyTime
- mem_pkt
->entryTime
;
857 MemCtrl::processNextReqEvent()
859 // transition is handled by QoS algorithm if enabled
861 // select bus state - only done if QoS algorithms are in use
862 busStateNext
= selectNextBusState();
865 // detect bus state change
866 bool switched_cmd_type
= (busState
!= busStateNext
);
868 recordTurnaroundStats();
870 DPRINTF(MemCtrl
, "QoS Turnarounds selected state %s %s\n",
871 (busState
==MemCtrl::READ
)?"READ":"WRITE",
872 switched_cmd_type
?"[turnaround triggered]":"");
874 if (switched_cmd_type
) {
875 if (busState
== MemCtrl::READ
) {
877 "Switching to writes after %d reads with %d reads "
878 "waiting\n", readsThisTime
, totalReadQueueSize
);
879 stats
.rdPerTurnAround
.sample(readsThisTime
);
883 "Switching to reads after %d writes with %d writes "
884 "waiting\n", writesThisTime
, totalWriteQueueSize
);
885 stats
.wrPerTurnAround
.sample(writesThisTime
);
890 // updates current state
891 busState
= busStateNext
;
894 for (auto queue
= readQueue
.rbegin();
895 queue
!= readQueue
.rend(); ++queue
) {
896 // select non-deterministic NVM read to issue
897 // assume that we have the command bandwidth to issue this along
898 // with additional RD/WR burst with needed bank operations
899 if (nvm
->readsWaitingToIssue()) {
900 // select non-deterministic NVM read to issue
901 nvm
->chooseRead(*queue
);
906 // check ranks for refresh/wakeup - uses busStateNext, so done after
907 // turnaround decisions
908 // Default to busy status and update based on interface specifics
909 bool dram_busy
= dram
? dram
->isBusy() : true;
910 bool nvm_busy
= true;
911 bool all_writes_nvm
= false;
913 all_writes_nvm
= nvm
->numWritesQueued
== totalWriteQueueSize
;
914 bool read_queue_empty
= totalReadQueueSize
== 0;
915 nvm_busy
= nvm
->isBusy(read_queue_empty
, all_writes_nvm
);
917 // Default state of unused interface is 'true'
918 // Simply AND the busy signals to determine if system is busy
919 if (dram_busy
&& nvm_busy
) {
920 // if all ranks are refreshing wait for them to finish
921 // and stall this state machine without taking any further
922 // action, and do not schedule a new nextReqEvent
926 // when we get here it is either a read or a write
927 if (busState
== READ
) {
929 // track if we should switch or not
930 bool switch_to_writes
= false;
932 if (totalReadQueueSize
== 0) {
933 // In the case there is no read request to go next,
934 // trigger writes if we have passed the low threshold (or
935 // if we are draining)
936 if (!(totalWriteQueueSize
== 0) &&
937 (drainState() == DrainState::Draining
||
938 totalWriteQueueSize
> writeLowThreshold
)) {
941 "Switching to writes due to read queue empty\n");
942 switch_to_writes
= true;
944 // check if we are drained
945 // not done draining until in PWR_IDLE state
946 // ensuring all banks are closed and
947 // have exited low power states
948 if (drainState() == DrainState::Draining
&&
949 respQueue
.empty() && allIntfDrained()) {
951 DPRINTF(Drain
, "MemCtrl controller done draining\n");
955 // nothing to do, not even any point in scheduling an
956 // event for the next request
961 bool read_found
= false;
962 MemPacketQueue::iterator to_read
;
963 uint8_t prio
= numPriorities();
965 for (auto queue
= readQueue
.rbegin();
966 queue
!= readQueue
.rend(); ++queue
) {
971 "Checking READ queue [%d] priority [%d elements]\n",
972 prio
, queue
->size());
974 // Figure out which read request goes next
975 // If we are changing command type, incorporate the minimum
976 // bus turnaround delay which will be rank to rank delay
977 to_read
= chooseNext((*queue
), switched_cmd_type
?
978 minWriteToReadDataGap() : 0);
980 if (to_read
!= queue
->end()) {
981 // candidate read found
987 // if no read to an available rank is found then return
988 // at this point. There could be writes to the available ranks
989 // which are above the required threshold. However, to
990 // avoid adding more complexity to the code, return and wait
991 // for a refresh event to kick things into action again.
993 DPRINTF(MemCtrl
, "No Reads Found - exiting\n");
997 auto mem_pkt
= *to_read
;
999 doBurstAccess(mem_pkt
);
1002 assert(mem_pkt
->size
<= (mem_pkt
->isDram() ?
1003 dram
->bytesPerBurst() :
1004 nvm
->bytesPerBurst()) );
1005 assert(mem_pkt
->readyTime
>= curTick());
1008 logResponse(MemCtrl::READ
, (*to_read
)->requestorId(),
1009 mem_pkt
->qosValue(), mem_pkt
->getAddr(), 1,
1010 mem_pkt
->readyTime
- mem_pkt
->entryTime
);
1013 // Insert into response queue. It will be sent back to the
1014 // requestor at its readyTime
1015 if (respQueue
.empty()) {
1016 assert(!respondEvent
.scheduled());
1017 schedule(respondEvent
, mem_pkt
->readyTime
);
1019 assert(respQueue
.back()->readyTime
<= mem_pkt
->readyTime
);
1020 assert(respondEvent
.scheduled());
1023 respQueue
.push_back(mem_pkt
);
1025 // we have so many writes that we have to transition
1026 // don't transition if the writeRespQueue is full and
1027 // there are no other writes that can issue
1028 if ((totalWriteQueueSize
> writeHighThreshold
) &&
1029 !(nvm
&& all_writes_nvm
&& nvm
->writeRespQueueFull())) {
1030 switch_to_writes
= true;
1033 // remove the request from the queue
1034 // the iterator is no longer valid .
1035 readQueue
[mem_pkt
->qosValue()].erase(to_read
);
1038 // switching to writes, either because the read queue is empty
1039 // and the writes have passed the low threshold (or we are
1040 // draining), or because the writes hit the hight threshold
1041 if (switch_to_writes
) {
1042 // transition to writing
1043 busStateNext
= WRITE
;
1047 bool write_found
= false;
1048 MemPacketQueue::iterator to_write
;
1049 uint8_t prio
= numPriorities();
1051 for (auto queue
= writeQueue
.rbegin();
1052 queue
!= writeQueue
.rend(); ++queue
) {
1057 "Checking WRITE queue [%d] priority [%d elements]\n",
1058 prio
, queue
->size());
1060 // If we are changing command type, incorporate the minimum
1061 // bus turnaround delay
1062 to_write
= chooseNext((*queue
),
1063 switched_cmd_type
? minReadToWriteDataGap() : 0);
1065 if (to_write
!= queue
->end()) {
1071 // if there are no writes to a rank that is available to service
1072 // requests (i.e. rank is in refresh idle state) are found then
1073 // return. There could be reads to the available ranks. However, to
1074 // avoid adding more complexity to the code, return at this point and
1075 // wait for a refresh event to kick things into action again.
1077 DPRINTF(MemCtrl
, "No Writes Found - exiting\n");
1081 auto mem_pkt
= *to_write
;
1084 assert(mem_pkt
->size
<= (mem_pkt
->isDram() ?
1085 dram
->bytesPerBurst() :
1086 nvm
->bytesPerBurst()) );
1088 doBurstAccess(mem_pkt
);
1090 isInWriteQueue
.erase(burstAlign(mem_pkt
->addr
, mem_pkt
->isDram()));
1093 logResponse(MemCtrl::WRITE
, mem_pkt
->requestorId(),
1094 mem_pkt
->qosValue(), mem_pkt
->getAddr(), 1,
1095 mem_pkt
->readyTime
- mem_pkt
->entryTime
);
1098 // remove the request from the queue - the iterator is no longer valid
1099 writeQueue
[mem_pkt
->qosValue()].erase(to_write
);
1103 // If we emptied the write queue, or got sufficiently below the
1104 // threshold (using the minWritesPerSwitch as the hysteresis) and
1105 // are not draining, or we have reads waiting and have done enough
1106 // writes, then switch to reads.
1107 // If we are interfacing to NVM and have filled the writeRespQueue,
1108 // with only NVM writes in Q, then switch to reads
1109 bool below_threshold
=
1110 totalWriteQueueSize
+ minWritesPerSwitch
< writeLowThreshold
;
1112 if (totalWriteQueueSize
== 0 ||
1113 (below_threshold
&& drainState() != DrainState::Draining
) ||
1114 (totalReadQueueSize
&& writesThisTime
>= minWritesPerSwitch
) ||
1115 (totalReadQueueSize
&& nvm
&& nvm
->writeRespQueueFull() &&
1118 // turn the bus back around for reads again
1119 busStateNext
= MemCtrl::READ
;
1121 // note that the we switch back to reads also in the idle
1122 // case, which eventually will check for any draining and
1123 // also pause any further scheduling if there is really
1127 // It is possible that a refresh to another rank kicks things back into
1128 // action before reaching this point.
1129 if (!nextReqEvent
.scheduled())
1130 schedule(nextReqEvent
, std::max(nextReqTime
, curTick()));
1132 // If there is space available and we have writes waiting then let
1133 // them retry. This is done here to ensure that the retry does not
1134 // cause a nextReqEvent to be scheduled before we do so as part of
1135 // the next request processing
1136 if (retryWrReq
&& totalWriteQueueSize
< writeBufferSize
) {
1138 port
.sendRetryReq();
1143 MemCtrl::packetReady(MemPacket
* pkt
)
1145 return (pkt
->isDram() ?
1146 dram
->burstReady(pkt
) : nvm
->burstReady(pkt
));
1150 MemCtrl::minReadToWriteDataGap()
1152 Tick dram_min
= dram
? dram
->minReadToWriteDataGap() : MaxTick
;
1153 Tick nvm_min
= nvm
? nvm
->minReadToWriteDataGap() : MaxTick
;
1154 return std::min(dram_min
, nvm_min
);
1158 MemCtrl::minWriteToReadDataGap()
1160 Tick dram_min
= dram
? dram
->minWriteToReadDataGap() : MaxTick
;
1161 Tick nvm_min
= nvm
? nvm
->minWriteToReadDataGap() : MaxTick
;
1162 return std::min(dram_min
, nvm_min
);
1166 MemCtrl::burstAlign(Addr addr
, bool is_dram
) const
1169 return (addr
& ~(Addr(dram
->bytesPerBurst() - 1)));
1171 return (addr
& ~(Addr(nvm
->bytesPerBurst() - 1)));
1174 MemCtrl::CtrlStats::CtrlStats(MemCtrl
&_ctrl
)
1175 : Stats::Group(&_ctrl
),
1178 ADD_STAT(readReqs
, "Number of read requests accepted"),
1179 ADD_STAT(writeReqs
, "Number of write requests accepted"),
1181 ADD_STAT(readBursts
,
1182 "Number of controller read bursts, "
1183 "including those serviced by the write queue"),
1184 ADD_STAT(writeBursts
,
1185 "Number of controller write bursts, "
1186 "including those merged in the write queue"),
1187 ADD_STAT(servicedByWrQ
,
1188 "Number of controller read bursts serviced by the write queue"),
1189 ADD_STAT(mergedWrBursts
,
1190 "Number of controller write bursts merged with an existing one"),
1192 ADD_STAT(neitherReadNorWriteReqs
,
1193 "Number of requests that are neither read nor write"),
1195 ADD_STAT(avgRdQLen
, "Average read queue length when enqueuing"),
1196 ADD_STAT(avgWrQLen
, "Average write queue length when enqueuing"),
1198 ADD_STAT(numRdRetry
, "Number of times read queue was full causing retry"),
1199 ADD_STAT(numWrRetry
, "Number of times write queue was full causing retry"),
1201 ADD_STAT(readPktSize
, "Read request sizes (log2)"),
1202 ADD_STAT(writePktSize
, "Write request sizes (log2)"),
1204 ADD_STAT(rdQLenPdf
, "What read queue length does an incoming req see"),
1205 ADD_STAT(wrQLenPdf
, "What write queue length does an incoming req see"),
1207 ADD_STAT(rdPerTurnAround
,
1208 "Reads before turning the bus around for writes"),
1209 ADD_STAT(wrPerTurnAround
,
1210 "Writes before turning the bus around for reads"),
1212 ADD_STAT(bytesReadWrQ
, "Total number of bytes read from write queue"),
1213 ADD_STAT(bytesReadSys
, "Total read bytes from the system interface side"),
1214 ADD_STAT(bytesWrittenSys
,
1215 "Total written bytes from the system interface side"),
1217 ADD_STAT(avgRdBWSys
, "Average system read bandwidth in MiByte/s"),
1218 ADD_STAT(avgWrBWSys
, "Average system write bandwidth in MiByte/s"),
1220 ADD_STAT(totGap
, "Total gap between requests"),
1221 ADD_STAT(avgGap
, "Average gap between requests"),
1223 ADD_STAT(requestorReadBytes
, "Per-requestor bytes read from memory"),
1224 ADD_STAT(requestorWriteBytes
, "Per-requestor bytes write to memory"),
1225 ADD_STAT(requestorReadRate
,
1226 "Per-requestor bytes read from memory rate (Bytes/sec)"),
1227 ADD_STAT(requestorWriteRate
,
1228 "Per-requestor bytes write to memory rate (Bytes/sec)"),
1229 ADD_STAT(requestorReadAccesses
,
1230 "Per-requestor read serviced memory accesses"),
1231 ADD_STAT(requestorWriteAccesses
,
1232 "Per-requestor write serviced memory accesses"),
1233 ADD_STAT(requestorReadTotalLat
,
1234 "Per-requestor read total memory access latency"),
1235 ADD_STAT(requestorWriteTotalLat
,
1236 "Per-requestor write total memory access latency"),
1237 ADD_STAT(requestorReadAvgLat
,
1238 "Per-requestor read average memory access latency"),
1239 ADD_STAT(requestorWriteAvgLat
,
1240 "Per-requestor write average memory access latency")
1246 MemCtrl::CtrlStats::regStats()
1248 using namespace Stats
;
1250 assert(ctrl
.system());
1251 const auto max_requestors
= ctrl
.system()->maxRequestors();
1253 avgRdQLen
.precision(2);
1254 avgWrQLen
.precision(2);
1256 readPktSize
.init(ceilLog2(ctrl
.system()->cacheLineSize()) + 1);
1257 writePktSize
.init(ceilLog2(ctrl
.system()->cacheLineSize()) + 1);
1259 rdQLenPdf
.init(ctrl
.readBufferSize
);
1260 wrQLenPdf
.init(ctrl
.writeBufferSize
);
1263 .init(ctrl
.readBufferSize
)
1266 .init(ctrl
.writeBufferSize
)
1269 avgRdBWSys
.precision(2);
1270 avgWrBWSys
.precision(2);
1271 avgGap
.precision(2);
1273 // per-requestor bytes read and written to memory
1275 .init(max_requestors
)
1276 .flags(nozero
| nonan
);
1279 .init(max_requestors
)
1280 .flags(nozero
| nonan
);
1282 // per-requestor bytes read and written to memory rate
1284 .flags(nozero
| nonan
)
1287 requestorReadAccesses
1288 .init(max_requestors
)
1291 requestorWriteAccesses
1292 .init(max_requestors
)
1295 requestorReadTotalLat
1296 .init(max_requestors
)
1297 .flags(nozero
| nonan
);
1304 .flags(nozero
| nonan
)
1307 requestorWriteTotalLat
1308 .init(max_requestors
)
1309 .flags(nozero
| nonan
);
1311 requestorWriteAvgLat
1315 for (int i
= 0; i
< max_requestors
; i
++) {
1316 const std::string requestor
= ctrl
.system()->getRequestorName(i
);
1317 requestorReadBytes
.subname(i
, requestor
);
1318 requestorReadRate
.subname(i
, requestor
);
1319 requestorWriteBytes
.subname(i
, requestor
);
1320 requestorWriteRate
.subname(i
, requestor
);
1321 requestorReadAccesses
.subname(i
, requestor
);
1322 requestorWriteAccesses
.subname(i
, requestor
);
1323 requestorReadTotalLat
.subname(i
, requestor
);
1324 requestorReadAvgLat
.subname(i
, requestor
);
1325 requestorWriteTotalLat
.subname(i
, requestor
);
1326 requestorWriteAvgLat
.subname(i
, requestor
);
1330 avgRdBWSys
= (bytesReadSys
/ 1000000) / simSeconds
;
1331 avgWrBWSys
= (bytesWrittenSys
/ 1000000) / simSeconds
;
1333 avgGap
= totGap
/ (readReqs
+ writeReqs
);
1335 requestorReadRate
= requestorReadBytes
/ simSeconds
;
1336 requestorWriteRate
= requestorWriteBytes
/ simSeconds
;
1337 requestorReadAvgLat
= requestorReadTotalLat
/ requestorReadAccesses
;
1338 requestorWriteAvgLat
= requestorWriteTotalLat
/ requestorWriteAccesses
;
1342 MemCtrl::recvFunctional(PacketPtr pkt
)
1344 if (dram
&& dram
->getAddrRange().contains(pkt
->getAddr())) {
1345 // rely on the abstract memory
1346 dram
->functionalAccess(pkt
);
1347 } else if (nvm
&& nvm
->getAddrRange().contains(pkt
->getAddr())) {
1348 // rely on the abstract memory
1349 nvm
->functionalAccess(pkt
);
1351 panic("Can't handle address range for packet %s\n",
1357 MemCtrl::getPort(const string
&if_name
, PortID idx
)
1359 if (if_name
!= "port") {
1360 return QoS::MemCtrl::getPort(if_name
, idx
);
1367 MemCtrl::allIntfDrained() const
1369 // ensure dram is in power down and refresh IDLE states
1370 bool dram_drained
= !dram
|| dram
->allRanksDrained();
1371 // No outstanding NVM writes
1372 // All other queues verified as needed with calling logic
1373 bool nvm_drained
= !nvm
|| nvm
->allRanksDrained();
1374 return (dram_drained
&& nvm_drained
);
1380 // if there is anything in any of our internal queues, keep track
1382 if (!(!totalWriteQueueSize
&& !totalReadQueueSize
&& respQueue
.empty() &&
1383 allIntfDrained())) {
1385 DPRINTF(Drain
, "Memory controller not drained, write: %d, read: %d,"
1386 " resp: %d\n", totalWriteQueueSize
, totalReadQueueSize
,
1389 // the only queue that is not drained automatically over time
1390 // is the write queue, thus kick things into action if needed
1391 if (!totalWriteQueueSize
&& !nextReqEvent
.scheduled()) {
1392 schedule(nextReqEvent
, curTick());
1398 return DrainState::Draining
;
1400 return DrainState::Drained
;
1405 MemCtrl::drainResume()
1407 if (!isTimingMode
&& system()->isTimingMode()) {
1408 // if we switched to timing mode, kick things into action,
1409 // and behave as if we restored from a checkpoint
1412 } else if (isTimingMode
&& !system()->isTimingMode()) {
1413 // if we switch from timing mode, stop the refresh events to
1414 // not cause issues with KVM
1420 isTimingMode
= system()->isTimingMode();
1423 MemCtrl::MemoryPort::MemoryPort(const std::string
& name
, MemCtrl
& _ctrl
)
1424 : QueuedResponsePort(name
, &_ctrl
, queue
), queue(_ctrl
, *this, true),
1429 MemCtrl::MemoryPort::getAddrRanges() const
1431 AddrRangeList ranges
;
1433 DPRINTF(DRAM
, "Pushing DRAM ranges to port\n");
1434 ranges
.push_back(ctrl
.dram
->getAddrRange());
1437 DPRINTF(NVM
, "Pushing NVM ranges to port\n");
1438 ranges
.push_back(ctrl
.nvm
->getAddrRange());
1444 MemCtrl::MemoryPort::recvFunctional(PacketPtr pkt
)
1446 pkt
->pushLabel(ctrl
.name());
1448 if (!queue
.trySatisfyFunctional(pkt
)) {
1449 // Default implementation of SimpleTimingPort::recvFunctional()
1450 // calls recvAtomic() and throws away the latency; we can save a
1451 // little here by just not calculating the latency.
1452 ctrl
.recvFunctional(pkt
);
1459 MemCtrl::MemoryPort::recvAtomic(PacketPtr pkt
)
1461 return ctrl
.recvAtomic(pkt
);
1465 MemCtrl::MemoryPort::recvTimingReq(PacketPtr pkt
)
1467 // pass it to the memory controller
1468 return ctrl
.recvTimingReq(pkt
);
1472 MemCtrlParams::create()
1474 return new MemCtrl(this);