2 * Copyright (c) 2010-2020 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "mem/dram_ctrl.hh"
43 #include "base/bitfield.hh"
44 #include "base/trace.hh"
45 #include "debug/DRAM.hh"
46 #include "debug/DRAMPower.hh"
47 #include "debug/DRAMState.hh"
48 #include "debug/Drain.hh"
49 #include "debug/QOS.hh"
50 #include "params/DRAMInterface.hh"
51 #include "sim/system.hh"
56 DRAMCtrl::DRAMCtrl(const DRAMCtrlParams
* p
) :
58 port(name() + ".port", *this), isTimingMode(false),
59 retryRdReq(false), retryWrReq(false),
60 nextReqEvent([this]{ processNextReqEvent(); }, name()),
61 respondEvent([this]{ processRespondEvent(); }, name()),
63 readBufferSize(dram
->readBufferSize
),
64 writeBufferSize(dram
->writeBufferSize
),
65 writeHighThreshold(writeBufferSize
* p
->write_high_thresh_perc
/ 100.0),
66 writeLowThreshold(writeBufferSize
* p
->write_low_thresh_perc
/ 100.0),
67 minWritesPerSwitch(p
->min_writes_per_switch
),
68 writesThisTime(0), readsThisTime(0),
69 memSchedPolicy(p
->mem_sched_policy
),
70 frontendLatency(p
->static_frontend_latency
),
71 backendLatency(p
->static_backend_latency
),
72 nextBurstAt(0), prevArrival(0),
76 readQueue
.resize(p
->qos_priorities
);
77 writeQueue
.resize(p
->qos_priorities
);
81 // perform a basic check of the write thresholds
82 if (p
->write_low_thresh_perc
>= p
->write_high_thresh_perc
)
83 fatal("Write buffer low threshold %d must be smaller than the "
84 "high threshold %d\n", p
->write_low_thresh_perc
,
85 p
->write_high_thresh_perc
);
91 if (!port
.isConnected()) {
92 fatal("DRAMCtrl %s is unconnected!\n", name());
94 port
.sendRangeChange();
101 // remember the memory system mode of operation
102 isTimingMode
= system()->isTimingMode();
105 // shift the bus busy time sufficiently far ahead that we never
106 // have to worry about negative values when computing the time for
107 // the next request, this will add an insignificant bubble at the
108 // start of simulation
109 nextBurstAt
= curTick() + dram
->tRC();
114 DRAMCtrl::recvAtomic(PacketPtr pkt
)
116 DPRINTF(DRAM
, "recvAtomic: %s 0x%x\n", pkt
->cmdString(), pkt
->getAddr());
118 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
121 // do the actual memory access and turn the packet into a response
125 if (pkt
->hasData()) {
126 // this value is not supposed to be accurate, just enough to
127 // keep things going, mimic a closed page
128 latency
= dram
->accessLatency();
134 DRAMCtrl::readQueueFull(unsigned int neededEntries
) const
136 DPRINTF(DRAM
, "Read queue limit %d, current size %d, entries needed %d\n",
137 readBufferSize
, totalReadQueueSize
+ respQueue
.size(),
140 auto rdsize_new
= totalReadQueueSize
+ respQueue
.size() + neededEntries
;
141 return rdsize_new
> readBufferSize
;
145 DRAMCtrl::writeQueueFull(unsigned int neededEntries
) const
147 DPRINTF(DRAM
, "Write queue limit %d, current size %d, entries needed %d\n",
148 writeBufferSize
, totalWriteQueueSize
, neededEntries
);
150 auto wrsize_new
= (totalWriteQueueSize
+ neededEntries
);
151 return wrsize_new
> writeBufferSize
;
155 DRAMInterface::decodePacket(const PacketPtr pkt
, Addr dramPktAddr
,
156 unsigned size
, bool isRead
) const
158 // decode the address based on the address mapping scheme, with
159 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
160 // channel, respectively
163 // use a 64-bit unsigned during the computations as the row is
164 // always the top bits, and check before creating the DRAMPacket
167 // truncate the address to a DRAM burst, which makes it unique to
168 // a specific column, row, bank, rank and channel
169 Addr addr
= dramPktAddr
/ burstSize
;
171 // we have removed the lowest order address bits that denote the
172 // position within the column
173 if (addrMapping
== Enums::RoRaBaChCo
|| addrMapping
== Enums::RoRaBaCoCh
) {
174 // the lowest order bits denote the column to ensure that
175 // sequential cache lines occupy the same row
176 addr
= addr
/ columnsPerRowBuffer
;
178 // after the channel bits, get the bank bits to interleave
180 bank
= addr
% banksPerRank
;
181 addr
= addr
/ banksPerRank
;
183 // after the bank, we get the rank bits which thus interleaves
185 rank
= addr
% ranksPerChannel
;
186 addr
= addr
/ ranksPerChannel
;
188 // lastly, get the row bits, no need to remove them from addr
189 row
= addr
% rowsPerBank
;
190 } else if (addrMapping
== Enums::RoCoRaBaCh
) {
191 // optimise for closed page mode and utilise maximum
192 // parallelism of the DRAM (at the cost of power)
194 // take out the lower-order column bits
195 addr
= addr
/ columnsPerStripe
;
197 // start with the bank bits, as this provides the maximum
198 // opportunity for parallelism between requests
199 bank
= addr
% banksPerRank
;
200 addr
= addr
/ banksPerRank
;
202 // next get the rank bits
203 rank
= addr
% ranksPerChannel
;
204 addr
= addr
/ ranksPerChannel
;
206 // next, the higher-order column bites
207 addr
= addr
/ (columnsPerRowBuffer
/ columnsPerStripe
);
209 // lastly, get the row bits, no need to remove them from addr
210 row
= addr
% rowsPerBank
;
212 panic("Unknown address mapping policy chosen!");
214 assert(rank
< ranksPerChannel
);
215 assert(bank
< banksPerRank
);
216 assert(row
< rowsPerBank
);
217 assert(row
< Bank::NO_ROW
);
219 DPRINTF(DRAM
, "Address: %lld Rank %d Bank %d Row %d\n",
220 dramPktAddr
, rank
, bank
, row
);
223 // increment read entries of the rank
224 ++ranks
[rank
]->readEntries
;
226 // increment write entries of the rank
227 ++ranks
[rank
]->writeEntries
;
229 // create the corresponding DRAM packet with the entry time and
230 // ready time set to the current tick, the latter will be updated
232 uint16_t bank_id
= banksPerRank
* rank
+ bank
;
233 return new DRAMPacket(pkt
, isRead
, rank
, bank
, row
, bank_id
, dramPktAddr
,
234 size
, ranks
[rank
]->banks
[bank
]);
238 DRAMCtrl::addToReadQueue(PacketPtr pkt
, unsigned int pktCount
)
240 // only add to the read queue here. whenever the request is
241 // eventually done, set the readyTime, and call schedule()
242 assert(!pkt
->isWrite());
244 assert(pktCount
!= 0);
246 // if the request size is larger than burst size, the pkt is split into
247 // multiple DRAM packets
248 // Note if the pkt starting address is not aligened to burst size, the
249 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
250 // are aligned to burst size boundaries. This is to ensure we accurately
251 // check read packets against packets in write queue.
252 const Addr base_addr
= dram
->getCtrlAddr(pkt
->getAddr());
253 Addr addr
= base_addr
;
254 unsigned pktsServicedByWrQ
= 0;
255 BurstHelper
* burst_helper
= NULL
;
256 uint32_t burstSize
= dram
->bytesPerBurst();
257 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
258 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
259 base_addr
+ pkt
->getSize()) - addr
;
260 stats
.readPktSize
[ceilLog2(size
)]++;
262 stats
.masterReadAccesses
[pkt
->masterId()]++;
264 // First check write buffer to see if the data is already at
266 bool foundInWrQ
= false;
267 Addr burst_addr
= burstAlign(addr
);
268 // if the burst address is not present then there is no need
269 // looking any further
270 if (isInWriteQueue
.find(burst_addr
) != isInWriteQueue
.end()) {
271 for (const auto& vec
: writeQueue
) {
272 for (const auto& p
: vec
) {
273 // check if the read is subsumed in the write queue
274 // packet we are looking at
275 if (p
->addr
<= addr
&&
276 ((addr
+ size
) <= (p
->addr
+ p
->size
))) {
279 stats
.servicedByWrQ
++;
282 "Read to addr %lld with size %d serviced by "
285 stats
.bytesReadWrQ
+= burstSize
;
292 // If not found in the write q, make a DRAM packet and
293 // push it onto the read queue
296 // Make the burst helper for split packets
297 if (pktCount
> 1 && burst_helper
== NULL
) {
298 DPRINTF(DRAM
, "Read to addr %lld translates to %d "
299 "dram requests\n", pkt
->getAddr(), pktCount
);
300 burst_helper
= new BurstHelper(pktCount
);
303 DRAMPacket
* dram_pkt
= dram
->decodePacket(pkt
, addr
, size
, true);
304 dram_pkt
->burstHelper
= burst_helper
;
306 assert(!readQueueFull(1));
307 stats
.rdQLenPdf
[totalReadQueueSize
+ respQueue
.size()]++;
309 DPRINTF(DRAM
, "Adding to read queue\n");
311 readQueue
[dram_pkt
->qosValue()].push_back(dram_pkt
);
314 logRequest(MemCtrl::READ
, pkt
->masterId(), pkt
->qosValue(),
318 stats
.avgRdQLen
= totalReadQueueSize
+ respQueue
.size();
321 // Starting address of next dram pkt (aligned to burst boundary)
322 addr
= (addr
| (burstSize
- 1)) + 1;
325 // If all packets are serviced by write queue, we send the repsonse back
326 if (pktsServicedByWrQ
== pktCount
) {
327 accessAndRespond(pkt
, frontendLatency
);
331 // Update how many split packets are serviced by write queue
332 if (burst_helper
!= NULL
)
333 burst_helper
->burstsServiced
= pktsServicedByWrQ
;
335 // If we are not already scheduled to get a request out of the
337 if (!nextReqEvent
.scheduled()) {
338 DPRINTF(DRAM
, "Request scheduled immediately\n");
339 schedule(nextReqEvent
, curTick());
344 DRAMCtrl::addToWriteQueue(PacketPtr pkt
, unsigned int pktCount
)
346 // only add to the write queue here. whenever the request is
347 // eventually done, set the readyTime, and call schedule()
348 assert(pkt
->isWrite());
350 // if the request size is larger than burst size, the pkt is split into
351 // multiple DRAM packets
352 const Addr base_addr
= dram
->getCtrlAddr(pkt
->getAddr());
353 Addr addr
= base_addr
;
354 uint32_t burstSize
= dram
->bytesPerBurst();
355 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
356 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
357 base_addr
+ pkt
->getSize()) - addr
;
358 stats
.writePktSize
[ceilLog2(size
)]++;
360 stats
.masterWriteAccesses
[pkt
->masterId()]++;
362 // see if we can merge with an existing item in the write
363 // queue and keep track of whether we have merged or not
364 bool merged
= isInWriteQueue
.find(burstAlign(addr
)) !=
365 isInWriteQueue
.end();
367 // if the item was not merged we need to create a new write
370 DRAMPacket
* dram_pkt
= dram
->decodePacket(pkt
, addr
, size
, false);
372 assert(totalWriteQueueSize
< writeBufferSize
);
373 stats
.wrQLenPdf
[totalWriteQueueSize
]++;
375 DPRINTF(DRAM
, "Adding to write queue\n");
377 writeQueue
[dram_pkt
->qosValue()].push_back(dram_pkt
);
378 isInWriteQueue
.insert(burstAlign(addr
));
381 logRequest(MemCtrl::WRITE
, pkt
->masterId(), pkt
->qosValue(),
384 assert(totalWriteQueueSize
== isInWriteQueue
.size());
387 stats
.avgWrQLen
= totalWriteQueueSize
;
390 DPRINTF(DRAM
, "Merging write burst with existing queue entry\n");
392 // keep track of the fact that this burst effectively
393 // disappeared as it was merged with an existing one
394 stats
.mergedWrBursts
++;
397 // Starting address of next dram pkt (aligned to burstSize boundary)
398 addr
= (addr
| (burstSize
- 1)) + 1;
401 // we do not wait for the writes to be send to the actual memory,
402 // but instead take responsibility for the consistency here and
403 // snoop the write queue for any upcoming reads
404 // @todo, if a pkt size is larger than burst size, we might need a
405 // different front end latency
406 accessAndRespond(pkt
, frontendLatency
);
408 // If we are not already scheduled to get a request out of the
410 if (!nextReqEvent
.scheduled()) {
411 DPRINTF(DRAM
, "Request scheduled immediately\n");
412 schedule(nextReqEvent
, curTick());
417 DRAMCtrl::printQs() const
420 DPRINTF(DRAM
, "===READ QUEUE===\n\n");
421 for (const auto& queue
: readQueue
) {
422 for (const auto& packet
: queue
) {
423 DPRINTF(DRAM
, "Read %lu\n", packet
->addr
);
427 DPRINTF(DRAM
, "\n===RESP QUEUE===\n\n");
428 for (const auto& packet
: respQueue
) {
429 DPRINTF(DRAM
, "Response %lu\n", packet
->addr
);
432 DPRINTF(DRAM
, "\n===WRITE QUEUE===\n\n");
433 for (const auto& queue
: writeQueue
) {
434 for (const auto& packet
: queue
) {
435 DPRINTF(DRAM
, "Write %lu\n", packet
->addr
);
442 DRAMCtrl::recvTimingReq(PacketPtr pkt
)
444 // This is where we enter from the outside world
445 DPRINTF(DRAM
, "recvTimingReq: request %s addr %lld size %d\n",
446 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
448 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
451 panic_if(!(pkt
->isRead() || pkt
->isWrite()),
452 "Should only see read and writes at memory controller\n");
454 // Calc avg gap between requests
455 if (prevArrival
!= 0) {
456 stats
.totGap
+= curTick() - prevArrival
;
458 prevArrival
= curTick();
461 // Find out how many dram packets a pkt translates to
462 // If the burst size is equal or larger than the pkt size, then a pkt
463 // translates to only one dram packet. Otherwise, a pkt translates to
464 // multiple dram packets
465 unsigned size
= pkt
->getSize();
466 uint32_t burstSize
= dram
->bytesPerBurst();
467 unsigned offset
= pkt
->getAddr() & (burstSize
- 1);
468 unsigned int dram_pkt_count
= divCeil(offset
+ size
, burstSize
);
470 // run the QoS scheduler and assign a QoS priority value to the packet
471 qosSchedule( { &readQueue
, &writeQueue
}, burstSize
, pkt
);
473 // check local buffers and do not accept if full
474 if (pkt
->isWrite()) {
476 if (writeQueueFull(dram_pkt_count
)) {
477 DPRINTF(DRAM
, "Write queue full, not accepting\n");
478 // remember that we have to retry this port
483 addToWriteQueue(pkt
, dram_pkt_count
);
485 stats
.bytesWrittenSys
+= size
;
488 assert(pkt
->isRead());
490 if (readQueueFull(dram_pkt_count
)) {
491 DPRINTF(DRAM
, "Read queue full, not accepting\n");
492 // remember that we have to retry this port
497 addToReadQueue(pkt
, dram_pkt_count
);
499 stats
.bytesReadSys
+= size
;
507 DRAMCtrl::processRespondEvent()
510 "processRespondEvent(): Some req has reached its readyTime\n");
512 DRAMPacket
* dram_pkt
= respQueue
.front();
514 // media specific checks and functions when read response is complete
515 dram
->respondEvent(dram_pkt
->rank
);
517 if (dram_pkt
->burstHelper
) {
518 // it is a split packet
519 dram_pkt
->burstHelper
->burstsServiced
++;
520 if (dram_pkt
->burstHelper
->burstsServiced
==
521 dram_pkt
->burstHelper
->burstCount
) {
522 // we have now serviced all children packets of a system packet
523 // so we can now respond to the requester
524 // @todo we probably want to have a different front end and back
525 // end latency for split packets
526 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
527 delete dram_pkt
->burstHelper
;
528 dram_pkt
->burstHelper
= NULL
;
531 // it is not a split packet
532 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
535 assert(respQueue
.front() == dram_pkt
);
536 respQueue
.pop_front();
538 if (!respQueue
.empty()) {
539 assert(respQueue
.front()->readyTime
>= curTick());
540 assert(!respondEvent
.scheduled());
541 schedule(respondEvent
, respQueue
.front()->readyTime
);
543 // if there is nothing left in any queue, signal a drain
544 if (drainState() == DrainState::Draining
&&
545 !totalWriteQueueSize
&& !totalReadQueueSize
&&
546 dram
->allRanksDrained()) {
548 DPRINTF(Drain
, "DRAM controller done draining\n");
551 // check the refresh state and kick the refresh event loop
552 // into action again if banks already closed and just waiting
553 // for read to complete
554 dram
->checkRefreshState(dram_pkt
->rank
);
560 // We have made a location in the queue available at this point,
561 // so if there is a read that was forced to wait, retry now
568 DRAMPacketQueue::iterator
569 DRAMCtrl::chooseNext(DRAMPacketQueue
& queue
, Tick extra_col_delay
)
571 // This method does the arbitration between requests.
573 DRAMPacketQueue::iterator ret
= queue
.end();
575 if (!queue
.empty()) {
576 if (queue
.size() == 1) {
577 // available rank corresponds to state refresh idle
578 DRAMPacket
* dram_pkt
= *(queue
.begin());
579 if (dram
->burstReady(dram_pkt
->rank
)) {
581 DPRINTF(DRAM
, "Single request, going to a free rank\n");
583 DPRINTF(DRAM
, "Single request, going to a busy rank\n");
585 } else if (memSchedPolicy
== Enums::fcfs
) {
586 // check if there is a packet going to a free rank
587 for (auto i
= queue
.begin(); i
!= queue
.end(); ++i
) {
588 DRAMPacket
* dram_pkt
= *i
;
589 if (dram
->burstReady(dram_pkt
->rank
)) {
594 } else if (memSchedPolicy
== Enums::frfcfs
) {
595 ret
= chooseNextFRFCFS(queue
, extra_col_delay
);
597 panic("No scheduling policy chosen\n");
603 DRAMPacketQueue::iterator
604 DRAMCtrl::chooseNextFRFCFS(DRAMPacketQueue
& queue
, Tick extra_col_delay
)
606 // Only determine this if needed
607 vector
<uint32_t> earliest_banks(dram
->numRanks(), 0);
609 // Has minBankPrep been called to populate earliest_banks?
610 bool filled_earliest_banks
= false;
611 // can the PRE/ACT sequence be done without impacting utlization?
612 bool hidden_bank_prep
= false;
614 // search for seamless row hits first, if no seamless row hit is
615 // found then determine if there are other packets that can be issued
616 // without incurring additional bus delay due to bank timing
617 // Will select closed rows first to enable more open row possibilies
618 // in future selections
619 bool found_hidden_bank
= false;
621 // remember if we found a row hit, not seamless, but bank prepped
623 bool found_prepped_pkt
= false;
625 // if we have no row hit, prepped or not, and no seamless packet,
626 // just go for the earliest possible
627 bool found_earliest_pkt
= false;
629 auto selected_pkt_it
= queue
.end();
631 // time we need to issue a column command to be seamless
632 const Tick min_col_at
= std::max(nextBurstAt
+ extra_col_delay
, curTick());
634 for (auto i
= queue
.begin(); i
!= queue
.end() ; ++i
) {
635 DRAMPacket
* dram_pkt
= *i
;
636 const Bank
& bank
= dram_pkt
->bankRef
;
637 const Tick col_allowed_at
= dram_pkt
->isRead() ? bank
.rdAllowedAt
:
640 DPRINTF(DRAM
, "%s checking packet in bank %d, row %d\n",
641 __func__
, dram_pkt
->bankRef
.bank
, dram_pkt
->row
);
643 // check if rank is not doing a refresh and thus is available, if not,
644 // jump to the next packet
645 if (dram
->burstReady(dram_pkt
->rank
)) {
648 "%s bank %d - Rank %d available\n", __func__
,
649 dram_pkt
->bank
, dram_pkt
->rank
);
651 // check if it is a row hit
652 if (bank
.openRow
== dram_pkt
->row
) {
653 // no additional rank-to-rank or same bank-group
654 // delays, or we switched read/write and might as well
655 // go for the row hit
656 if (col_allowed_at
<= min_col_at
) {
657 // FCFS within the hits, giving priority to
658 // commands that can issue seamlessly, without
659 // additional delay, such as same rank accesses
660 // and/or different bank-group accesses
661 DPRINTF(DRAM
, "%s Seamless row buffer hit\n", __func__
);
663 // no need to look through the remaining queue entries
665 } else if (!found_hidden_bank
&& !found_prepped_pkt
) {
666 // if we did not find a packet to a closed row that can
667 // issue the bank commands without incurring delay, and
668 // did not yet find a packet to a prepped row, remember
671 found_prepped_pkt
= true;
672 DPRINTF(DRAM
, "%s Prepped row buffer hit\n", __func__
);
674 } else if (!found_earliest_pkt
) {
675 // if we have not initialised the bank status, do it
676 // now, and only once per scheduling decisions
677 if (!filled_earliest_banks
) {
678 // determine entries with earliest bank delay
679 std::tie(earliest_banks
, hidden_bank_prep
) =
680 dram
->minBankPrep(queue
, min_col_at
);
681 filled_earliest_banks
= true;
684 // bank is amongst first available banks
685 // minBankPrep will give priority to packets that can
687 if (bits(earliest_banks
[dram_pkt
->rank
],
688 dram_pkt
->bank
, dram_pkt
->bank
)) {
689 found_earliest_pkt
= true;
690 found_hidden_bank
= hidden_bank_prep
;
692 // give priority to packets that can issue
693 // bank commands 'behind the scenes'
694 // any additional delay if any will be due to
695 // col-to-col command requirements
696 if (hidden_bank_prep
|| !found_prepped_pkt
)
701 DPRINTF(DRAM
, "%s bank %d - Rank %d not available\n", __func__
,
702 dram_pkt
->bank
, dram_pkt
->rank
);
706 if (selected_pkt_it
== queue
.end()) {
707 DPRINTF(DRAM
, "%s no available ranks found\n", __func__
);
710 return selected_pkt_it
;
714 DRAMCtrl::accessAndRespond(PacketPtr pkt
, Tick static_latency
)
716 DPRINTF(DRAM
, "Responding to Address %lld.. \n",pkt
->getAddr());
718 bool needsResponse
= pkt
->needsResponse();
719 // do the actual memory access which also turns the packet into a
723 // turn packet around to go back to requester if response expected
725 // access already turned the packet into a response
726 assert(pkt
->isResponse());
727 // response_time consumes the static latency and is charged also
728 // with headerDelay that takes into account the delay provided by
729 // the xbar and also the payloadDelay that takes into account the
730 // number of data beats.
731 Tick response_time
= curTick() + static_latency
+ pkt
->headerDelay
+
733 // Here we reset the timing of the packet before sending it out.
734 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
736 // queue the packet in the response queue to be sent out after
737 // the static latency has passed
738 port
.schedTimingResp(pkt
, response_time
);
740 // @todo the packet is going to be deleted, and the DRAMPacket
741 // is still having a pointer to it
742 pendingDelete
.reset(pkt
);
745 DPRINTF(DRAM
, "Done\n");
751 DRAMCtrl::pruneBurstTick()
753 auto it
= burstTicks
.begin();
754 while (it
!= burstTicks
.end()) {
755 auto current_it
= it
++;
756 if (curTick() > *current_it
) {
757 DPRINTF(DRAM
, "Removing burstTick for %d\n", *current_it
);
758 burstTicks
.erase(current_it
);
764 DRAMCtrl::getBurstWindow(Tick cmd_tick
)
766 // get tick aligned to burst window
767 Tick burst_offset
= cmd_tick
% dram
->burstDataDelay();
768 return (cmd_tick
- burst_offset
);
772 DRAMCtrl::verifySingleCmd(Tick cmd_tick
)
774 // start with assumption that there is no contention on command bus
775 Tick cmd_at
= cmd_tick
;
777 // get tick aligned to burst window
778 Tick burst_tick
= getBurstWindow(cmd_tick
);
780 // verify that we have command bandwidth to issue the command
781 // if not, iterate over next window(s) until slot found
782 while (burstTicks
.count(burst_tick
) >= dram
->maxCmdsPerBst()) {
783 DPRINTF(DRAM
, "Contention found on command bus at %d\n", burst_tick
);
784 burst_tick
+= dram
->burstDataDelay();
788 // add command into burst window and return corresponding Tick
789 burstTicks
.insert(burst_tick
);
794 DRAMCtrl::verifyMultiCmd(Tick cmd_tick
, Tick max_multi_cmd_split
)
796 // start with assumption that there is no contention on command bus
797 Tick cmd_at
= cmd_tick
;
799 // get tick aligned to burst window
800 Tick burst_tick
= getBurstWindow(cmd_tick
);
802 // Command timing requirements are from 2nd command
803 // Start with assumption that 2nd command will issue at cmd_at and
804 // find prior slot for 1st command to issue
805 // Given a maximum latency of max_multi_cmd_split between the commands,
806 // find the burst at the maximum latency prior to cmd_at
807 Tick burst_offset
= 0;
808 Tick first_cmd_offset
= cmd_tick
% dram
->burstDataDelay();
809 while (max_multi_cmd_split
> (first_cmd_offset
+ burst_offset
)) {
810 burst_offset
+= dram
->burstDataDelay();
812 // get the earliest burst aligned address for first command
813 // ensure that the time does not go negative
814 Tick first_cmd_tick
= burst_tick
- std::min(burst_offset
, burst_tick
);
816 // Can required commands issue?
817 bool first_can_issue
= false;
818 bool second_can_issue
= false;
819 // verify that we have command bandwidth to issue the command(s)
820 while (!first_can_issue
|| !second_can_issue
) {
821 bool same_burst
= (burst_tick
== first_cmd_tick
);
822 auto first_cmd_count
= burstTicks
.count(first_cmd_tick
);
823 auto second_cmd_count
= same_burst
? first_cmd_count
+ 1 :
824 burstTicks
.count(burst_tick
);
826 first_can_issue
= first_cmd_count
< dram
->maxCmdsPerBst();
827 second_can_issue
= second_cmd_count
< dram
->maxCmdsPerBst();
829 if (!second_can_issue
) {
830 DPRINTF(DRAM
, "Contention (cmd2) found on command bus at %d\n",
832 burst_tick
+= dram
->burstDataDelay();
836 // Verify max_multi_cmd_split isn't violated when command 2 is shifted
837 // If commands initially were issued in same burst, they are
838 // now in consecutive bursts and can still issue B2B
839 bool gap_violated
= !same_burst
&&
840 ((burst_tick
- first_cmd_tick
) > max_multi_cmd_split
);
842 if (!first_can_issue
|| (!second_can_issue
&& gap_violated
)) {
843 DPRINTF(DRAM
, "Contention (cmd1) found on command bus at %d\n",
845 first_cmd_tick
+= dram
->burstDataDelay();
849 // Add command to burstTicks
850 burstTicks
.insert(burst_tick
);
851 burstTicks
.insert(first_cmd_tick
);
857 DRAMInterface::activateBank(Rank
& rank_ref
, Bank
& bank_ref
,
858 Tick act_tick
, uint32_t row
)
860 assert(rank_ref
.actTicks
.size() == activationLimit
);
862 // verify that we have command bandwidth to issue the activate
863 // if not, shift to next burst window
865 if (twoCycleActivate
)
866 act_at
= ctrl
->verifyMultiCmd(act_tick
, tAAD
);
868 act_at
= ctrl
->verifySingleCmd(act_tick
);
870 DPRINTF(DRAM
, "Activate at tick %d\n", act_at
);
872 // update the open row
873 assert(bank_ref
.openRow
== Bank::NO_ROW
);
874 bank_ref
.openRow
= row
;
876 // start counting anew, this covers both the case when we
877 // auto-precharged, and when this access is forced to
879 bank_ref
.bytesAccessed
= 0;
880 bank_ref
.rowAccesses
= 0;
882 ++rank_ref
.numBanksActive
;
883 assert(rank_ref
.numBanksActive
<= banksPerRank
);
885 DPRINTF(DRAM
, "Activate bank %d, rank %d at tick %lld, now got "
886 "%d active\n", bank_ref
.bank
, rank_ref
.rank
, act_at
,
887 ranks
[rank_ref
.rank
]->numBanksActive
);
889 rank_ref
.cmdList
.push_back(Command(MemCommand::ACT
, bank_ref
.bank
,
892 DPRINTF(DRAMPower
, "%llu,ACT,%d,%d\n", divCeil(act_at
, tCK
) -
893 timeStampOffset
, bank_ref
.bank
, rank_ref
.rank
);
895 // The next access has to respect tRAS for this bank
896 bank_ref
.preAllowedAt
= act_at
+ tRAS
;
898 // Respect the row-to-column command delay for both read and write cmds
899 bank_ref
.rdAllowedAt
= std::max(act_at
+ tRCD
, bank_ref
.rdAllowedAt
);
900 bank_ref
.wrAllowedAt
= std::max(act_at
+ tRCD
, bank_ref
.wrAllowedAt
);
902 // start by enforcing tRRD
903 for (int i
= 0; i
< banksPerRank
; i
++) {
904 // next activate to any bank in this rank must not happen
906 if (bankGroupArch
&& (bank_ref
.bankgr
== rank_ref
.banks
[i
].bankgr
)) {
907 // bank group architecture requires longer delays between
908 // ACT commands within the same bank group. Use tRRD_L
910 rank_ref
.banks
[i
].actAllowedAt
= std::max(act_at
+ tRRD_L
,
911 rank_ref
.banks
[i
].actAllowedAt
);
913 // use shorter tRRD value when either
914 // 1) bank group architecture is not supportted
915 // 2) bank is in a different bank group
916 rank_ref
.banks
[i
].actAllowedAt
= std::max(act_at
+ tRRD
,
917 rank_ref
.banks
[i
].actAllowedAt
);
921 // next, we deal with tXAW, if the activation limit is disabled
922 // then we directly schedule an activate power event
923 if (!rank_ref
.actTicks
.empty()) {
925 if (rank_ref
.actTicks
.back() &&
926 (act_at
- rank_ref
.actTicks
.back()) < tXAW
) {
927 panic("Got %d activates in window %d (%llu - %llu) which "
928 "is smaller than %llu\n", activationLimit
, act_at
-
929 rank_ref
.actTicks
.back(), act_at
,
930 rank_ref
.actTicks
.back(), tXAW
);
933 // shift the times used for the book keeping, the last element
934 // (highest index) is the oldest one and hence the lowest value
935 rank_ref
.actTicks
.pop_back();
937 // record an new activation (in the future)
938 rank_ref
.actTicks
.push_front(act_at
);
940 // cannot activate more than X times in time window tXAW, push the
941 // next one (the X + 1'st activate) to be tXAW away from the
942 // oldest in our window of X
943 if (rank_ref
.actTicks
.back() &&
944 (act_at
- rank_ref
.actTicks
.back()) < tXAW
) {
945 DPRINTF(DRAM
, "Enforcing tXAW with X = %d, next activate "
946 "no earlier than %llu\n", activationLimit
,
947 rank_ref
.actTicks
.back() + tXAW
);
948 for (int j
= 0; j
< banksPerRank
; j
++)
949 // next activate must not happen before end of window
950 rank_ref
.banks
[j
].actAllowedAt
=
951 std::max(rank_ref
.actTicks
.back() + tXAW
,
952 rank_ref
.banks
[j
].actAllowedAt
);
956 // at the point when this activate takes place, make sure we
957 // transition to the active power state
958 if (!rank_ref
.activateEvent
.scheduled())
959 schedule(rank_ref
.activateEvent
, act_at
);
960 else if (rank_ref
.activateEvent
.when() > act_at
)
961 // move it sooner in time
962 reschedule(rank_ref
.activateEvent
, act_at
);
966 DRAMInterface::prechargeBank(Rank
& rank_ref
, Bank
& bank
, Tick pre_tick
,
967 bool auto_or_preall
, bool trace
)
969 // make sure the bank has an open row
970 assert(bank
.openRow
!= Bank::NO_ROW
);
972 // sample the bytes per activate here since we are closing
974 stats
.bytesPerActivate
.sample(bank
.bytesAccessed
);
976 bank
.openRow
= Bank::NO_ROW
;
978 Tick pre_at
= pre_tick
;
979 if (auto_or_preall
) {
980 // no precharge allowed before this one
981 bank
.preAllowedAt
= pre_at
;
983 // Issuing an explicit PRE command
984 // Verify that we have command bandwidth to issue the precharge
985 // if not, shift to next burst window
986 pre_at
= ctrl
->verifySingleCmd(pre_tick
);
988 for (int i
= 0; i
< banksPerRank
; i
++) {
989 rank_ref
.banks
[i
].preAllowedAt
= std::max(pre_at
+ tPPD
,
990 rank_ref
.banks
[i
].preAllowedAt
);
994 Tick pre_done_at
= pre_at
+ tRP
;
996 bank
.actAllowedAt
= std::max(bank
.actAllowedAt
, pre_done_at
);
998 assert(rank_ref
.numBanksActive
!= 0);
999 --rank_ref
.numBanksActive
;
1001 DPRINTF(DRAM
, "Precharging bank %d, rank %d at tick %lld, now got "
1002 "%d active\n", bank
.bank
, rank_ref
.rank
, pre_at
,
1003 rank_ref
.numBanksActive
);
1007 rank_ref
.cmdList
.push_back(Command(MemCommand::PRE
, bank
.bank
,
1009 DPRINTF(DRAMPower
, "%llu,PRE,%d,%d\n", divCeil(pre_at
, tCK
) -
1010 timeStampOffset
, bank
.bank
, rank_ref
.rank
);
1013 // if we look at the current number of active banks we might be
1014 // tempted to think the DRAM is now idle, however this can be
1015 // undone by an activate that is scheduled to happen before we
1016 // would have reached the idle state, so schedule an event and
1017 // rather check once we actually make it to the point in time when
1018 // the (last) precharge takes place
1019 if (!rank_ref
.prechargeEvent
.scheduled()) {
1020 schedule(rank_ref
.prechargeEvent
, pre_done_at
);
1021 // New event, increment count
1022 ++rank_ref
.outstandingEvents
;
1023 } else if (rank_ref
.prechargeEvent
.when() < pre_done_at
) {
1024 reschedule(rank_ref
.prechargeEvent
, pre_done_at
);
1029 DRAMInterface::doBurstAccess(DRAMPacket
* dram_pkt
, Tick next_burst_at
,
1030 const std::vector
<DRAMPacketQueue
>& queue
)
1032 DPRINTF(DRAM
, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1033 dram_pkt
->addr
, dram_pkt
->rank
, dram_pkt
->bank
, dram_pkt
->row
);
1036 Rank
& rank_ref
= *ranks
[dram_pkt
->rank
];
1038 assert(rank_ref
.inRefIdleState());
1040 // are we in or transitioning to a low-power state and have not scheduled
1041 // a power-up event?
1042 // if so, wake up from power down to issue RD/WR burst
1043 if (rank_ref
.inLowPowerState
) {
1044 assert(rank_ref
.pwrState
!= PWR_SREF
);
1045 rank_ref
.scheduleWakeUpEvent(tXP
);
1049 Bank
& bank_ref
= dram_pkt
->bankRef
;
1051 // for the state we need to track if it is a row hit or not
1052 bool row_hit
= true;
1054 // Determine the access latency and update the bank state
1055 if (bank_ref
.openRow
== dram_pkt
->row
) {
1060 // If there is a page open, precharge it.
1061 if (bank_ref
.openRow
!= Bank::NO_ROW
) {
1062 prechargeBank(rank_ref
, bank_ref
, std::max(bank_ref
.preAllowedAt
,
1066 // next we need to account for the delay in activating the page
1067 Tick act_tick
= std::max(bank_ref
.actAllowedAt
, curTick());
1069 // Record the activation and deal with all the global timing
1070 // constraints caused be a new activation (tRRD and tXAW)
1071 activateBank(rank_ref
, bank_ref
, act_tick
, dram_pkt
->row
);
1074 // respect any constraints on the command (e.g. tRCD or tCCD)
1075 const Tick col_allowed_at
= dram_pkt
->isRead() ?
1076 bank_ref
.rdAllowedAt
: bank_ref
.wrAllowedAt
;
1078 // we need to wait until the bus is available before we can issue
1079 // the command; need to ensure minimum bus delay requirement is met
1080 Tick cmd_at
= std::max({col_allowed_at
, next_burst_at
, curTick()});
1082 // verify that we have command bandwidth to issue the burst
1083 // if not, shift to next burst window
1084 if (dataClockSync
&& ((cmd_at
- rank_ref
.lastBurstTick
) > clkResyncDelay
))
1085 cmd_at
= ctrl
->verifyMultiCmd(cmd_at
, tCK
);
1087 cmd_at
= ctrl
->verifySingleCmd(cmd_at
);
1089 // if we are interleaving bursts, ensure that
1090 // 1) we don't double interleave on next burst issue
1091 // 2) we are at an interleave boundary; if not, shift to next boundary
1092 Tick burst_gap
= tBURST_MIN
;
1093 if (burstInterleave
) {
1094 if (cmd_at
== (rank_ref
.lastBurstTick
+ tBURST_MIN
)) {
1095 // already interleaving, push next command to end of full burst
1097 } else if (cmd_at
< (rank_ref
.lastBurstTick
+ tBURST
)) {
1098 // not at an interleave boundary after bandwidth check
1099 // Shift command to tBURST boundary to avoid data contention
1100 // Command will remain in the same burst window given that
1101 // tBURST is less than tBURST_MAX
1102 cmd_at
= rank_ref
.lastBurstTick
+ tBURST
;
1105 DPRINTF(DRAM
, "Schedule RD/WR burst at tick %d\n", cmd_at
);
1107 // update the packet ready time
1108 dram_pkt
->readyTime
= cmd_at
+ tCL
+ tBURST
;
1110 rank_ref
.lastBurstTick
= cmd_at
;
1112 // update the time for the next read/write burst for each
1113 // bank (add a max with tCCD/tCCD_L/tCCD_L_WR here)
1116 for (int j
= 0; j
< ranksPerChannel
; j
++) {
1117 for (int i
= 0; i
< banksPerRank
; i
++) {
1118 if (dram_pkt
->rank
== j
) {
1119 if (bankGroupArch
&&
1120 (bank_ref
.bankgr
== ranks
[j
]->banks
[i
].bankgr
)) {
1121 // bank group architecture requires longer delays between
1122 // RD/WR burst commands to the same bank group.
1123 // tCCD_L is default requirement for same BG timing
1124 // tCCD_L_WR is required for write-to-write
1125 // Need to also take bus turnaround delays into account
1126 dly_to_rd_cmd
= dram_pkt
->isRead() ?
1127 tCCD_L
: std::max(tCCD_L
, wrToRdDlySameBG
);
1128 dly_to_wr_cmd
= dram_pkt
->isRead() ?
1129 std::max(tCCD_L
, rdToWrDlySameBG
) :
1132 // tBURST is default requirement for diff BG timing
1133 // Need to also take bus turnaround delays into account
1134 dly_to_rd_cmd
= dram_pkt
->isRead() ? burst_gap
: wrToRdDly
;
1135 dly_to_wr_cmd
= dram_pkt
->isRead() ? rdToWrDly
: burst_gap
;
1138 // different rank is by default in a different bank group and
1139 // doesn't require longer tCCD or additional RTW, WTR delays
1140 // Need to account for rank-to-rank switching
1141 dly_to_wr_cmd
= rankToRankDly
;
1142 dly_to_rd_cmd
= rankToRankDly
;
1144 ranks
[j
]->banks
[i
].rdAllowedAt
= std::max(cmd_at
+ dly_to_rd_cmd
,
1145 ranks
[j
]->banks
[i
].rdAllowedAt
);
1146 ranks
[j
]->banks
[i
].wrAllowedAt
= std::max(cmd_at
+ dly_to_wr_cmd
,
1147 ranks
[j
]->banks
[i
].wrAllowedAt
);
1151 // Save rank of current access
1152 activeRank
= dram_pkt
->rank
;
1154 // If this is a write, we also need to respect the write recovery
1155 // time before a precharge, in the case of a read, respect the
1156 // read to precharge constraint
1157 bank_ref
.preAllowedAt
= std::max(bank_ref
.preAllowedAt
,
1158 dram_pkt
->isRead() ? cmd_at
+ tRTP
:
1159 dram_pkt
->readyTime
+ tWR
);
1161 // increment the bytes accessed and the accesses per row
1162 bank_ref
.bytesAccessed
+= burstSize
;
1163 ++bank_ref
.rowAccesses
;
1165 // if we reached the max, then issue with an auto-precharge
1166 bool auto_precharge
= pageMgmt
== Enums::close
||
1167 bank_ref
.rowAccesses
== maxAccessesPerRow
;
1169 // if we did not hit the limit, we might still want to
1171 if (!auto_precharge
&&
1172 (pageMgmt
== Enums::open_adaptive
||
1173 pageMgmt
== Enums::close_adaptive
)) {
1174 // a twist on the open and close page policies:
1175 // 1) open_adaptive page policy does not blindly keep the
1176 // page open, but close it if there are no row hits, and there
1177 // are bank conflicts in the queue
1178 // 2) close_adaptive page policy does not blindly close the
1179 // page, but closes it only if there are no row hits in the queue.
1180 // In this case, only force an auto precharge when there
1181 // are no same page hits in the queue
1182 bool got_more_hits
= false;
1183 bool got_bank_conflict
= false;
1185 for (uint8_t i
= 0; i
< ctrl
->numPriorities(); ++i
) {
1186 auto p
= queue
[i
].begin();
1187 // keep on looking until we find a hit or reach the end of the
1189 // 1) if a hit is found, then both open and close adaptive
1190 // policies keep the page open
1191 // 2) if no hit is found, got_bank_conflict is set to true if a
1192 // bank conflict request is waiting in the queue
1193 // 3) make sure we are not considering the packet that we are
1194 // currently dealing with
1195 while (!got_more_hits
&& p
!= queue
[i
].end()) {
1196 if (dram_pkt
!= (*p
)) {
1197 bool same_rank_bank
= (dram_pkt
->rank
== (*p
)->rank
) &&
1198 (dram_pkt
->bank
== (*p
)->bank
);
1200 bool same_row
= dram_pkt
->row
== (*p
)->row
;
1201 got_more_hits
|= same_rank_bank
&& same_row
;
1202 got_bank_conflict
|= same_rank_bank
&& !same_row
;
1211 // auto pre-charge when either
1212 // 1) open_adaptive policy, we have not got any more hits, and
1213 // have a bank conflict
1214 // 2) close_adaptive policy and we have not got any more hits
1215 auto_precharge
= !got_more_hits
&&
1216 (got_bank_conflict
|| pageMgmt
== Enums::close_adaptive
);
1219 // DRAMPower trace command to be written
1220 std::string mem_cmd
= dram_pkt
->isRead() ? "RD" : "WR";
1222 // MemCommand required for DRAMPower library
1223 MemCommand::cmds command
= (mem_cmd
== "RD") ? MemCommand::RD
:
1226 rank_ref
.cmdList
.push_back(Command(command
, dram_pkt
->bank
, cmd_at
));
1228 DPRINTF(DRAMPower
, "%llu,%s,%d,%d\n", divCeil(cmd_at
, tCK
) -
1229 timeStampOffset
, mem_cmd
, dram_pkt
->bank
, dram_pkt
->rank
);
1231 // if this access should use auto-precharge, then we are
1232 // closing the row after the read/write burst
1233 if (auto_precharge
) {
1234 // if auto-precharge push a PRE command at the correct tick to the
1235 // list used by DRAMPower library to calculate power
1236 prechargeBank(rank_ref
, bank_ref
, std::max(curTick(),
1237 bank_ref
.preAllowedAt
), true);
1239 DPRINTF(DRAM
, "Auto-precharged bank: %d\n", dram_pkt
->bankId
);
1242 // Update the stats and schedule the next request
1243 if (dram_pkt
->isRead()) {
1244 // Every respQueue which will generate an event, increment count
1245 ++rank_ref
.outstandingEvents
;
1249 stats
.readRowHits
++;
1250 stats
.bytesRead
+= burstSize
;
1251 stats
.perBankRdBursts
[dram_pkt
->bankId
]++;
1253 // Update latency stats
1254 stats
.totMemAccLat
+= dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1255 stats
.totQLat
+= cmd_at
- dram_pkt
->entryTime
;
1256 stats
.totBusLat
+= tBURST
;
1258 // Schedule write done event to decrement event count
1259 // after the readyTime has been reached
1260 // Only schedule latest write event to minimize events
1261 // required; only need to ensure that final event scheduled covers
1262 // the time that writes are outstanding and bus is active
1263 // to holdoff power-down entry events
1264 if (!rank_ref
.writeDoneEvent
.scheduled()) {
1265 schedule(rank_ref
.writeDoneEvent
, dram_pkt
->readyTime
);
1266 // New event, increment count
1267 ++rank_ref
.outstandingEvents
;
1269 } else if (rank_ref
.writeDoneEvent
.when() < dram_pkt
->readyTime
) {
1270 reschedule(rank_ref
.writeDoneEvent
, dram_pkt
->readyTime
);
1272 // will remove write from queue when returned to parent function
1273 // decrement count for DRAM rank
1274 --rank_ref
.writeEntries
;
1276 stats
.writeBursts
++;
1278 stats
.writeRowHits
++;
1279 stats
.bytesWritten
+= burstSize
;
1280 stats
.perBankWrBursts
[dram_pkt
->bankId
]++;
1283 // Update bus state to reflect when previous command was issued
1284 return (cmd_at
+ burst_gap
);
1288 DRAMCtrl::inReadBusState(bool next_state
) const
1290 // check the bus state
1292 // use busStateNext to get the state that will be used
1293 // for the next burst
1294 return (busStateNext
== DRAMCtrl::READ
);
1296 return (busState
== DRAMCtrl::READ
);
1301 DRAMCtrl::inWriteBusState(bool next_state
) const
1303 // check the bus state
1305 // use busStateNext to get the state that will be used
1306 // for the next burst
1307 return (busStateNext
== DRAMCtrl::WRITE
);
1309 return (busState
== DRAMCtrl::WRITE
);
1314 DRAMCtrl::doBurstAccess(DRAMPacket
* dram_pkt
)
1316 // first clean up the burstTick set, removing old entries
1317 // before adding new entries for next burst
1320 // Update bus state to reflect when previous command was issued
1321 std::vector
<DRAMPacketQueue
>& queue
= selQueue(dram_pkt
->isRead());
1322 nextBurstAt
= dram
->doBurstAccess(dram_pkt
, nextBurstAt
, queue
);
1324 DPRINTF(DRAM
, "Access to %lld, ready at %lld next burst at %lld.\n",
1325 dram_pkt
->addr
, dram_pkt
->readyTime
, nextBurstAt
);
1327 // Update the minimum timing between the requests, this is a
1328 // conservative estimate of when we have to schedule the next
1329 // request to not introduce any unecessary bubbles. In most cases
1330 // we will wake up sooner than we have to.
1331 nextReqTime
= nextBurstAt
- dram
->tRC();
1334 // Update the common bus stats
1335 if (dram_pkt
->isRead()) {
1337 // Update latency stats
1338 stats
.masterReadTotalLat
[dram_pkt
->masterId()] +=
1339 dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1340 stats
.masterReadBytes
[dram_pkt
->masterId()] += dram_pkt
->size
;
1343 stats
.masterWriteBytes
[dram_pkt
->masterId()] += dram_pkt
->size
;
1344 stats
.masterWriteTotalLat
[dram_pkt
->masterId()] +=
1345 dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1350 DRAMCtrl::processNextReqEvent()
1352 // transition is handled by QoS algorithm if enabled
1354 // select bus state - only done if QoS algorithms are in use
1355 busStateNext
= selectNextBusState();
1358 // detect bus state change
1359 bool switched_cmd_type
= (busState
!= busStateNext
);
1361 recordTurnaroundStats();
1363 DPRINTF(DRAM
, "QoS Turnarounds selected state %s %s\n",
1364 (busState
==MemCtrl::READ
)?"READ":"WRITE",
1365 switched_cmd_type
?"[turnaround triggered]":"");
1367 if (switched_cmd_type
) {
1368 if (busState
== MemCtrl::READ
) {
1370 "Switching to writes after %d reads with %d reads "
1371 "waiting\n", readsThisTime
, totalReadQueueSize
);
1372 stats
.rdPerTurnAround
.sample(readsThisTime
);
1376 "Switching to reads after %d writes with %d writes "
1377 "waiting\n", writesThisTime
, totalWriteQueueSize
);
1378 stats
.wrPerTurnAround
.sample(writesThisTime
);
1383 // updates current state
1384 busState
= busStateNext
;
1386 // check ranks for refresh/wakeup - uses busStateNext, so done after turnaround
1388 if (dram
->isBusy()) {
1389 // if all ranks are refreshing wait for them to finish
1390 // and stall this state machine without taking any further
1391 // action, and do not schedule a new nextReqEvent
1395 // when we get here it is either a read or a write
1396 if (busState
== READ
) {
1398 // track if we should switch or not
1399 bool switch_to_writes
= false;
1401 if (totalReadQueueSize
== 0) {
1402 // In the case there is no read request to go next,
1403 // trigger writes if we have passed the low threshold (or
1404 // if we are draining)
1405 if (!(totalWriteQueueSize
== 0) &&
1406 (drainState() == DrainState::Draining
||
1407 totalWriteQueueSize
> writeLowThreshold
)) {
1409 DPRINTF(DRAM
, "Switching to writes due to read queue empty\n");
1410 switch_to_writes
= true;
1412 // check if we are drained
1413 // not done draining until in PWR_IDLE state
1414 // ensuring all banks are closed and
1415 // have exited low power states
1416 if (drainState() == DrainState::Draining
&&
1417 respQueue
.empty() && dram
->allRanksDrained()) {
1419 DPRINTF(Drain
, "DRAM controller done draining\n");
1423 // nothing to do, not even any point in scheduling an
1424 // event for the next request
1429 bool read_found
= false;
1430 DRAMPacketQueue::iterator to_read
;
1431 uint8_t prio
= numPriorities();
1433 for (auto queue
= readQueue
.rbegin();
1434 queue
!= readQueue
.rend(); ++queue
) {
1439 "DRAM controller checking READ queue [%d] priority [%d elements]\n",
1440 prio
, queue
->size());
1442 // Figure out which read request goes next
1443 // If we are changing command type, incorporate the minimum
1444 // bus turnaround delay which will be rank to rank delay
1445 to_read
= chooseNext((*queue
), switched_cmd_type
?
1446 dram
->rankDelay() : 0);
1448 if (to_read
!= queue
->end()) {
1449 // candidate read found
1455 // if no read to an available rank is found then return
1456 // at this point. There could be writes to the available ranks
1457 // which are above the required threshold. However, to
1458 // avoid adding more complexity to the code, return and wait
1459 // for a refresh event to kick things into action again.
1461 DPRINTF(DRAM
, "No Reads Found - exiting\n");
1465 auto dram_pkt
= *to_read
;
1467 doBurstAccess(dram_pkt
);
1470 assert(dram_pkt
->size
<= dram
->bytesPerBurst());
1471 assert(dram_pkt
->readyTime
>= curTick());
1474 logResponse(MemCtrl::READ
, (*to_read
)->masterId(),
1475 dram_pkt
->qosValue(), dram_pkt
->getAddr(), 1,
1476 dram_pkt
->readyTime
- dram_pkt
->entryTime
);
1479 // Insert into response queue. It will be sent back to the
1480 // requester at its readyTime
1481 if (respQueue
.empty()) {
1482 assert(!respondEvent
.scheduled());
1483 schedule(respondEvent
, dram_pkt
->readyTime
);
1485 assert(respQueue
.back()->readyTime
<= dram_pkt
->readyTime
);
1486 assert(respondEvent
.scheduled());
1489 respQueue
.push_back(dram_pkt
);
1491 // we have so many writes that we have to transition
1492 if (totalWriteQueueSize
> writeHighThreshold
) {
1493 switch_to_writes
= true;
1496 // remove the request from the queue - the iterator is no longer valid .
1497 readQueue
[dram_pkt
->qosValue()].erase(to_read
);
1500 // switching to writes, either because the read queue is empty
1501 // and the writes have passed the low threshold (or we are
1502 // draining), or because the writes hit the hight threshold
1503 if (switch_to_writes
) {
1504 // transition to writing
1505 busStateNext
= WRITE
;
1509 bool write_found
= false;
1510 DRAMPacketQueue::iterator to_write
;
1511 uint8_t prio
= numPriorities();
1513 for (auto queue
= writeQueue
.rbegin();
1514 queue
!= writeQueue
.rend(); ++queue
) {
1519 "DRAM controller checking WRITE queue [%d] priority [%d elements]\n",
1520 prio
, queue
->size());
1522 // If we are changing command type, incorporate the minimum
1523 // bus turnaround delay
1524 to_write
= chooseNext((*queue
),
1525 switched_cmd_type
? std::min(dram
->minRdToWr(),
1526 dram
->rankDelay()) : 0);
1528 if (to_write
!= queue
->end()) {
1534 // if there are no writes to a rank that is available to service
1535 // requests (i.e. rank is in refresh idle state) are found then
1536 // return. There could be reads to the available ranks. However, to
1537 // avoid adding more complexity to the code, return at this point and
1538 // wait for a refresh event to kick things into action again.
1540 DPRINTF(DRAM
, "No Writes Found - exiting\n");
1544 auto dram_pkt
= *to_write
;
1547 assert(dram_pkt
->size
<= dram
->bytesPerBurst());
1549 doBurstAccess(dram_pkt
);
1551 isInWriteQueue
.erase(burstAlign(dram_pkt
->addr
));
1554 logResponse(MemCtrl::WRITE
, dram_pkt
->masterId(),
1555 dram_pkt
->qosValue(), dram_pkt
->getAddr(), 1,
1556 dram_pkt
->readyTime
- dram_pkt
->entryTime
);
1559 // remove the request from the queue - the iterator is no longer valid
1560 writeQueue
[dram_pkt
->qosValue()].erase(to_write
);
1564 // If we emptied the write queue, or got sufficiently below the
1565 // threshold (using the minWritesPerSwitch as the hysteresis) and
1566 // are not draining, or we have reads waiting and have done enough
1567 // writes, then switch to reads.
1568 bool below_threshold
=
1569 totalWriteQueueSize
+ minWritesPerSwitch
< writeLowThreshold
;
1571 if (totalWriteQueueSize
== 0 ||
1572 (below_threshold
&& drainState() != DrainState::Draining
) ||
1573 (totalReadQueueSize
&& writesThisTime
>= minWritesPerSwitch
)) {
1575 // turn the bus back around for reads again
1576 busStateNext
= MemCtrl::READ
;
1578 // note that the we switch back to reads also in the idle
1579 // case, which eventually will check for any draining and
1580 // also pause any further scheduling if there is really
1584 // It is possible that a refresh to another rank kicks things back into
1585 // action before reaching this point.
1586 if (!nextReqEvent
.scheduled())
1587 schedule(nextReqEvent
, std::max(nextReqTime
, curTick()));
1589 // If there is space available and we have writes waiting then let
1590 // them retry. This is done here to ensure that the retry does not
1591 // cause a nextReqEvent to be scheduled before we do so as part of
1592 // the next request processing
1593 if (retryWrReq
&& totalWriteQueueSize
< writeBufferSize
) {
1595 port
.sendRetryReq();
1599 DRAMInterface::DRAMInterface(const DRAMInterfaceParams
* _p
)
1600 : AbstractMemory(_p
),
1601 addrMapping(_p
->addr_mapping
),
1602 burstSize((_p
->devices_per_rank
* _p
->burst_length
*
1603 _p
->device_bus_width
) / 8),
1604 deviceSize(_p
->device_size
),
1605 deviceRowBufferSize(_p
->device_rowbuffer_size
),
1606 devicesPerRank(_p
->devices_per_rank
),
1607 rowBufferSize(devicesPerRank
* deviceRowBufferSize
),
1608 columnsPerRowBuffer(rowBufferSize
/ burstSize
),
1609 columnsPerStripe(range
.interleaved() ?
1610 range
.granularity() / burstSize
: 1),
1611 ranksPerChannel(_p
->ranks_per_channel
),
1612 bankGroupsPerRank(_p
->bank_groups_per_rank
),
1613 bankGroupArch(_p
->bank_groups_per_rank
> 0),
1614 banksPerRank(_p
->banks_per_rank
), rowsPerBank(0),
1615 tCK(_p
->tCK
), tCS(_p
->tCS
), tCL(_p
->tCL
), tBURST(_p
->tBURST
),
1616 tBURST_MIN(_p
->tBURST_MIN
), tBURST_MAX(_p
->tBURST_MAX
), tRTW(_p
->tRTW
),
1617 tCCD_L_WR(_p
->tCCD_L_WR
), tCCD_L(_p
->tCCD_L
), tRCD(_p
->tRCD
),
1618 tRP(_p
->tRP
), tRAS(_p
->tRAS
), tWR(_p
->tWR
), tRTP(_p
->tRTP
),
1619 tRFC(_p
->tRFC
), tREFI(_p
->tREFI
), tRRD(_p
->tRRD
), tRRD_L(_p
->tRRD_L
),
1620 tPPD(_p
->tPPD
), tAAD(_p
->tAAD
),
1621 tXAW(_p
->tXAW
), tXP(_p
->tXP
), tXS(_p
->tXS
),
1622 clkResyncDelay(tCL
+ _p
->tBURST_MAX
),
1623 maxCommandsPerBurst(_p
->burst_length
/ _p
->beats_per_clock
),
1624 dataClockSync(_p
->data_clock_sync
),
1625 burstInterleave(tBURST
!= tBURST_MIN
),
1626 twoCycleActivate(_p
->two_cycle_activate
),
1627 activationLimit(_p
->activation_limit
),
1628 wrToRdDly(tCL
+ tBURST
+ _p
->tWTR
), rdToWrDly(tBURST
+ tRTW
),
1629 wrToRdDlySameBG(tCL
+ _p
->tBURST_MAX
+ _p
->tWTR_L
),
1630 rdToWrDlySameBG(tRTW
+ _p
->tBURST_MAX
),
1631 rankToRankDly(tCS
+ tBURST
),
1632 pageMgmt(_p
->page_policy
),
1633 maxAccessesPerRow(_p
->max_accesses_per_row
),
1634 timeStampOffset(0), activeRank(0),
1635 enableDRAMPowerdown(_p
->enable_dram_powerdown
),
1636 lastStatsResetTick(0),
1638 readBufferSize(_p
->read_buffer_size
),
1639 writeBufferSize(_p
->write_buffer_size
)
1641 fatal_if(!isPowerOf2(burstSize
), "DRAM burst size %d is not allowed, "
1642 "must be a power of two\n", burstSize
);
1644 // sanity check the ranks since we rely on bit slicing for the
1646 fatal_if(!isPowerOf2(ranksPerChannel
), "DRAM rank count of %d is "
1647 "not allowed, must be a power of two\n", ranksPerChannel
);
1649 for (int i
= 0; i
< ranksPerChannel
; i
++) {
1650 DPRINTF(DRAM
, "Creating DRAM rank %d \n", i
);
1651 Rank
* rank
= new Rank(_p
, i
, *this);
1652 ranks
.push_back(rank
);
1655 // determine the dram actual capacity from the DRAM config in Mbytes
1656 uint64_t deviceCapacity
= deviceSize
/ (1024 * 1024) * devicesPerRank
*
1659 uint64_t capacity
= ULL(1) << ceilLog2(AbstractMemory::size());
1661 DPRINTF(DRAM
, "Memory capacity %lld (%lld) bytes\n", capacity
,
1662 AbstractMemory::size());
1664 // if actual DRAM size does not match memory capacity in system warn!
1665 if (deviceCapacity
!= capacity
/ (1024 * 1024))
1666 warn("DRAM device capacity (%d Mbytes) does not match the "
1667 "address range assigned (%d Mbytes)\n", deviceCapacity
,
1668 capacity
/ (1024 * 1024));
1670 DPRINTF(DRAM
, "Row buffer size %d bytes with %d columns per row buffer\n",
1671 rowBufferSize
, columnsPerRowBuffer
);
1673 rowsPerBank
= capacity
/ (rowBufferSize
* banksPerRank
* ranksPerChannel
);
1675 // some basic sanity checks
1676 if (tREFI
<= tRP
|| tREFI
<= tRFC
) {
1677 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
1681 // basic bank group architecture checks ->
1682 if (bankGroupArch
) {
1683 // must have at least one bank per bank group
1684 if (bankGroupsPerRank
> banksPerRank
) {
1685 fatal("banks per rank (%d) must be equal to or larger than "
1686 "banks groups per rank (%d)\n",
1687 banksPerRank
, bankGroupsPerRank
);
1689 // must have same number of banks in each bank group
1690 if ((banksPerRank
% bankGroupsPerRank
) != 0) {
1691 fatal("Banks per rank (%d) must be evenly divisible by bank "
1692 "groups per rank (%d) for equal banks per bank group\n",
1693 banksPerRank
, bankGroupsPerRank
);
1695 // tCCD_L should be greater than minimal, back-to-back burst delay
1696 if (tCCD_L
<= tBURST
) {
1697 fatal("tCCD_L (%d) should be larger than the minimum bus delay "
1698 "(%d) when bank groups per rank (%d) is greater than 1\n",
1699 tCCD_L
, tBURST
, bankGroupsPerRank
);
1701 // tCCD_L_WR should be greater than minimal, back-to-back burst delay
1702 if (tCCD_L_WR
<= tBURST
) {
1703 fatal("tCCD_L_WR (%d) should be larger than the minimum bus delay "
1704 " (%d) when bank groups per rank (%d) is greater than 1\n",
1705 tCCD_L_WR
, tBURST
, bankGroupsPerRank
);
1707 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay
1708 // some datasheets might specify it equal to tRRD
1709 if (tRRD_L
< tRRD
) {
1710 fatal("tRRD_L (%d) should be larger than tRRD (%d) when "
1711 "bank groups per rank (%d) is greater than 1\n",
1712 tRRD_L
, tRRD
, bankGroupsPerRank
);
1718 DRAMInterface::init()
1720 AbstractMemory::init();
1722 // a bit of sanity checks on the interleaving, save it for here to
1723 // ensure that the system pointer is initialised
1724 if (range
.interleaved()) {
1725 if (addrMapping
== Enums::RoRaBaChCo
) {
1726 if (rowBufferSize
!= range
.granularity()) {
1727 fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
1728 "address map\n", name());
1730 } else if (addrMapping
== Enums::RoRaBaCoCh
||
1731 addrMapping
== Enums::RoCoRaBaCh
) {
1732 // for the interleavings with channel bits in the bottom,
1733 // if the system uses a channel striping granularity that
1734 // is larger than the DRAM burst size, then map the
1735 // sequential accesses within a stripe to a number of
1736 // columns in the DRAM, effectively placing some of the
1737 // lower-order column bits as the least-significant bits
1738 // of the address (above the ones denoting the burst size)
1739 assert(columnsPerStripe
>= 1);
1741 // channel striping has to be done at a granularity that
1742 // is equal or larger to a cache line
1743 if (system()->cacheLineSize() > range
.granularity()) {
1744 fatal("Channel interleaving of %s must be at least as large "
1745 "as the cache line size\n", name());
1748 // ...and equal or smaller than the row-buffer size
1749 if (rowBufferSize
< range
.granularity()) {
1750 fatal("Channel interleaving of %s must be at most as large "
1751 "as the row-buffer size\n", name());
1753 // this is essentially the check above, so just to be sure
1754 assert(columnsPerStripe
<= columnsPerRowBuffer
);
1760 DRAMInterface::startup()
1762 if (system()->isTimingMode()) {
1763 // timestamp offset should be in clock cycles for DRAMPower
1764 timeStampOffset
= divCeil(curTick(), tCK
);
1767 for (auto r
: ranks
) {
1768 r
->startup(curTick() + tREFI
- tRP
);
1773 DRAMInterface::isBusy()
1776 for (auto r
: ranks
) {
1777 if (!r
->inRefIdleState()) {
1778 if (r
->pwrState
!= PWR_SREF
) {
1779 // rank is busy refreshing
1780 DPRINTF(DRAMState
, "Rank %d is not available\n", r
->rank
);
1783 // let the rank know that if it was waiting to drain, it
1784 // is now done and ready to proceed
1785 r
->checkDrainDone();
1788 // check if we were in self-refresh and haven't started
1789 // to transition out
1790 if ((r
->pwrState
== PWR_SREF
) && r
->inLowPowerState
) {
1791 DPRINTF(DRAMState
, "Rank %d is in self-refresh\n", r
->rank
);
1792 // if we have commands queued to this rank and we don't have
1793 // a minimum number of active commands enqueued,
1794 // exit self-refresh
1795 if (r
->forceSelfRefreshExit()) {
1796 DPRINTF(DRAMState
, "rank %d was in self refresh and"
1797 " should wake up\n", r
->rank
);
1798 //wake up from self-refresh
1799 r
->scheduleWakeUpEvent(tXS
);
1800 // things are brought back into action once a refresh is
1801 // performed after self-refresh
1802 // continue with selection for other ranks
1807 return (busy_ranks
== ranksPerChannel
);
1811 DRAMInterface::respondEvent(uint8_t rank
)
1813 Rank
& rank_ref
= *ranks
[rank
];
1815 // if a read has reached its ready-time, decrement the number of reads
1816 // At this point the packet has been handled and there is a possibility
1817 // to switch to low-power mode if no other packet is available
1818 --rank_ref
.readEntries
;
1819 DPRINTF(DRAM
, "number of read entries for rank %d is %d\n",
1820 rank
, rank_ref
.readEntries
);
1822 // counter should at least indicate one outstanding request
1824 assert(rank_ref
.outstandingEvents
> 0);
1825 // read response received, decrement count
1826 --rank_ref
.outstandingEvents
;
1828 // at this moment should not have transitioned to a low-power state
1829 assert((rank_ref
.pwrState
!= PWR_SREF
) &&
1830 (rank_ref
.pwrState
!= PWR_PRE_PDN
) &&
1831 (rank_ref
.pwrState
!= PWR_ACT_PDN
));
1833 // track if this is the last packet before idling
1834 // and that there are no outstanding commands to this rank
1835 if (rank_ref
.isQueueEmpty() && rank_ref
.outstandingEvents
== 0 &&
1836 rank_ref
.inRefIdleState() && enableDRAMPowerdown
) {
1837 // verify that there are no events scheduled
1838 assert(!rank_ref
.activateEvent
.scheduled());
1839 assert(!rank_ref
.prechargeEvent
.scheduled());
1841 // if coming from active state, schedule power event to
1842 // active power-down else go to precharge power-down
1843 DPRINTF(DRAMState
, "Rank %d sleep at tick %d; current power state is "
1844 "%d\n", rank
, curTick(), rank_ref
.pwrState
);
1846 // default to ACT power-down unless already in IDLE state
1847 // could be in IDLE if PRE issued before data returned
1848 PowerState next_pwr_state
= PWR_ACT_PDN
;
1849 if (rank_ref
.pwrState
== PWR_IDLE
) {
1850 next_pwr_state
= PWR_PRE_PDN
;
1853 rank_ref
.powerDownSleep(next_pwr_state
, curTick());
1858 DRAMInterface::checkRefreshState(uint8_t rank
)
1860 Rank
& rank_ref
= *ranks
[rank
];
1862 if ((rank_ref
.refreshState
== REF_PRE
) &&
1863 !rank_ref
.prechargeEvent
.scheduled()) {
1864 // kick the refresh event loop into action again if banks already
1865 // closed and just waiting for read to complete
1866 schedule(rank_ref
.refreshEvent
, curTick());
1871 DRAMInterface::drainRanks()
1873 // also need to kick off events to exit self-refresh
1874 for (auto r
: ranks
) {
1875 // force self-refresh exit, which in turn will issue auto-refresh
1876 if (r
->pwrState
== PWR_SREF
) {
1877 DPRINTF(DRAM
,"Rank%d: Forcing self-refresh wakeup in drain\n",
1879 r
->scheduleWakeUpEvent(tXS
);
1885 DRAMInterface::allRanksDrained() const
1887 // true until proven false
1888 bool all_ranks_drained
= true;
1889 for (auto r
: ranks
) {
1890 // then verify that the power state is IDLE ensuring all banks are
1891 // closed and rank is not in a low power state. Also verify that rank
1892 // is idle from a refresh point of view.
1893 all_ranks_drained
= r
->inPwrIdleState() && r
->inRefIdleState() &&
1896 return all_ranks_drained
;
1900 DRAMInterface::suspend()
1902 for (auto r
: ranks
) {
1907 pair
<vector
<uint32_t>, bool>
1908 DRAMInterface::minBankPrep(const DRAMPacketQueue
& queue
,
1909 Tick min_col_at
) const
1911 Tick min_act_at
= MaxTick
;
1912 vector
<uint32_t> bank_mask(ranksPerChannel
, 0);
1914 // latest Tick for which ACT can occur without incurring additoinal
1915 // delay on the data bus
1916 const Tick hidden_act_max
= std::max(min_col_at
- tRCD
, curTick());
1918 // Flag condition when burst can issue back-to-back with previous burst
1919 bool found_seamless_bank
= false;
1921 // Flag condition when bank can be opened without incurring additional
1922 // delay on the data bus
1923 bool hidden_bank_prep
= false;
1925 // determine if we have queued transactions targetting the
1927 vector
<bool> got_waiting(ranksPerChannel
* banksPerRank
, false);
1928 for (const auto& p
: queue
) {
1929 if (ranks
[p
->rank
]->inRefIdleState())
1930 got_waiting
[p
->bankId
] = true;
1933 // Find command with optimal bank timing
1934 // Will prioritize commands that can issue seamlessly.
1935 for (int i
= 0; i
< ranksPerChannel
; i
++) {
1936 for (int j
= 0; j
< banksPerRank
; j
++) {
1937 uint16_t bank_id
= i
* banksPerRank
+ j
;
1939 // if we have waiting requests for the bank, and it is
1940 // amongst the first available, update the mask
1941 if (got_waiting
[bank_id
]) {
1942 // make sure this rank is not currently refreshing.
1943 assert(ranks
[i
]->inRefIdleState());
1944 // simplistic approximation of when the bank can issue
1945 // an activate, ignoring any rank-to-rank switching
1946 // cost in this calculation
1947 Tick act_at
= ranks
[i
]->banks
[j
].openRow
== Bank::NO_ROW
?
1948 std::max(ranks
[i
]->banks
[j
].actAllowedAt
, curTick()) :
1949 std::max(ranks
[i
]->banks
[j
].preAllowedAt
, curTick()) + tRP
;
1951 // When is the earliest the R/W burst can issue?
1952 const Tick col_allowed_at
= ctrl
->inReadBusState(false) ?
1953 ranks
[i
]->banks
[j
].rdAllowedAt
:
1954 ranks
[i
]->banks
[j
].wrAllowedAt
;
1955 Tick col_at
= std::max(col_allowed_at
, act_at
+ tRCD
);
1957 // bank can issue burst back-to-back (seamlessly) with
1959 bool new_seamless_bank
= col_at
<= min_col_at
;
1961 // if we found a new seamless bank or we have no
1962 // seamless banks, and got a bank with an earlier
1963 // activate time, it should be added to the bit mask
1964 if (new_seamless_bank
||
1965 (!found_seamless_bank
&& act_at
<= min_act_at
)) {
1966 // if we did not have a seamless bank before, and
1967 // we do now, reset the bank mask, also reset it
1968 // if we have not yet found a seamless bank and
1969 // the activate time is smaller than what we have
1971 if (!found_seamless_bank
&&
1972 (new_seamless_bank
|| act_at
< min_act_at
)) {
1973 std::fill(bank_mask
.begin(), bank_mask
.end(), 0);
1976 found_seamless_bank
|= new_seamless_bank
;
1978 // ACT can occur 'behind the scenes'
1979 hidden_bank_prep
= act_at
<= hidden_act_max
;
1981 // set the bit corresponding to the available bank
1982 replaceBits(bank_mask
[i
], j
, j
, 1);
1983 min_act_at
= act_at
;
1989 return make_pair(bank_mask
, hidden_bank_prep
);
1993 DRAMInterfaceParams::create()
1995 return new DRAMInterface(this);
1998 DRAMInterface::Rank::Rank(const DRAMInterfaceParams
* _p
,
1999 int _rank
, DRAMInterface
& _dram
)
2000 : EventManager(&_dram
), dram(_dram
),
2001 pwrStateTrans(PWR_IDLE
), pwrStatePostRefresh(PWR_IDLE
),
2002 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE
),
2003 refreshState(REF_IDLE
), inLowPowerState(false), rank(_rank
),
2004 readEntries(0), writeEntries(0), outstandingEvents(0),
2005 wakeUpAllowedAt(0), power(_p
, false), banks(_p
->banks_per_rank
),
2006 numBanksActive(0), actTicks(_p
->activation_limit
, 0), lastBurstTick(0),
2007 writeDoneEvent([this]{ processWriteDoneEvent(); }, name()),
2008 activateEvent([this]{ processActivateEvent(); }, name()),
2009 prechargeEvent([this]{ processPrechargeEvent(); }, name()),
2010 refreshEvent([this]{ processRefreshEvent(); }, name()),
2011 powerEvent([this]{ processPowerEvent(); }, name()),
2012 wakeUpEvent([this]{ processWakeUpEvent(); }, name()),
2015 for (int b
= 0; b
< _p
->banks_per_rank
; b
++) {
2017 // GDDR addressing of banks to BG is linear.
2018 // Here we assume that all DRAM generations address bank groups as
2020 if (_p
->bank_groups_per_rank
> 0) {
2021 // Simply assign lower bits to bank group in order to
2022 // rotate across bank groups as banks are incremented
2023 // e.g. with 4 banks per bank group and 16 banks total:
2024 // banks 0,4,8,12 are in bank group 0
2025 // banks 1,5,9,13 are in bank group 1
2026 // banks 2,6,10,14 are in bank group 2
2027 // banks 3,7,11,15 are in bank group 3
2028 banks
[b
].bankgr
= b
% _p
->bank_groups_per_rank
;
2030 // No bank groups; simply assign to bank number
2031 banks
[b
].bankgr
= b
;
2037 DRAMInterface::Rank::startup(Tick ref_tick
)
2039 assert(ref_tick
> curTick());
2041 pwrStateTick
= curTick();
2043 // kick off the refresh, and give ourselves enough time to
2045 schedule(refreshEvent
, ref_tick
);
2049 DRAMInterface::Rank::suspend()
2051 deschedule(refreshEvent
);
2056 // don't automatically transition back to LP state after next REF
2057 pwrStatePostRefresh
= PWR_IDLE
;
2061 DRAMInterface::Rank::isQueueEmpty() const
2063 // check commmands in Q based on current bus direction
2064 bool no_queued_cmds
= (dram
.ctrl
->inReadBusState(true) &&
2066 || (dram
.ctrl
->inWriteBusState(true) &&
2067 (writeEntries
== 0));
2068 return no_queued_cmds
;
2072 DRAMInterface::Rank::checkDrainDone()
2074 // if this rank was waiting to drain it is now able to proceed to
2076 if (refreshState
== REF_DRAIN
) {
2077 DPRINTF(DRAM
, "Refresh drain done, now precharging\n");
2079 refreshState
= REF_PD_EXIT
;
2081 // hand control back to the refresh event loop
2082 schedule(refreshEvent
, curTick());
2087 DRAMInterface::Rank::flushCmdList()
2089 // at the moment sort the list of commands and update the counters
2090 // for DRAMPower libray when doing a refresh
2091 sort(cmdList
.begin(), cmdList
.end(), DRAMInterface::sortTime
);
2093 auto next_iter
= cmdList
.begin();
2094 // push to commands to DRAMPower
2095 for ( ; next_iter
!= cmdList
.end() ; ++next_iter
) {
2096 Command cmd
= *next_iter
;
2097 if (cmd
.timeStamp
<= curTick()) {
2098 // Move all commands at or before curTick to DRAMPower
2099 power
.powerlib
.doCommand(cmd
.type
, cmd
.bank
,
2100 divCeil(cmd
.timeStamp
, dram
.tCK
) -
2101 dram
.timeStampOffset
);
2103 // done - found all commands at or before curTick()
2104 // next_iter references the 1st command after curTick
2108 // reset cmdList to only contain commands after curTick
2109 // if there are no commands after curTick, updated cmdList will be empty
2110 // in this case, next_iter is cmdList.end()
2111 cmdList
.assign(next_iter
, cmdList
.end());
2115 DRAMInterface::Rank::processActivateEvent()
2117 // we should transition to the active state as soon as any bank is active
2118 if (pwrState
!= PWR_ACT
)
2119 // note that at this point numBanksActive could be back at
2120 // zero again due to a precharge scheduled in the future
2121 schedulePowerEvent(PWR_ACT
, curTick());
2125 DRAMInterface::Rank::processPrechargeEvent()
2127 // counter should at least indicate one outstanding request
2128 // for this precharge
2129 assert(outstandingEvents
> 0);
2130 // precharge complete, decrement count
2131 --outstandingEvents
;
2133 // if we reached zero, then special conditions apply as we track
2134 // if all banks are precharged for the power models
2135 if (numBanksActive
== 0) {
2136 // no reads to this rank in the Q and no pending
2137 // RD/WR or refresh commands
2138 if (isQueueEmpty() && outstandingEvents
== 0 &&
2139 dram
.enableDRAMPowerdown
) {
2140 // should still be in ACT state since bank still open
2141 assert(pwrState
== PWR_ACT
);
2143 // All banks closed - switch to precharge power down state.
2144 DPRINTF(DRAMState
, "Rank %d sleep at tick %d\n",
2146 powerDownSleep(PWR_PRE_PDN
, curTick());
2148 // we should transition to the idle state when the last bank
2150 schedulePowerEvent(PWR_IDLE
, curTick());
2156 DRAMInterface::Rank::processWriteDoneEvent()
2158 // counter should at least indicate one outstanding request
2160 assert(outstandingEvents
> 0);
2161 // Write transfer on bus has completed
2162 // decrement per rank counter
2163 --outstandingEvents
;
2167 DRAMInterface::Rank::processRefreshEvent()
2169 // when first preparing the refresh, remember when it was due
2170 if ((refreshState
== REF_IDLE
) || (refreshState
== REF_SREF_EXIT
)) {
2171 // remember when the refresh is due
2172 refreshDueAt
= curTick();
2175 refreshState
= REF_DRAIN
;
2177 // make nonzero while refresh is pending to ensure
2178 // power down and self-refresh are not entered
2179 ++outstandingEvents
;
2181 DPRINTF(DRAM
, "Refresh due\n");
2184 // let any scheduled read or write to the same rank go ahead,
2185 // after which it will
2186 // hand control back to this event loop
2187 if (refreshState
== REF_DRAIN
) {
2188 // if a request is at the moment being handled and this request is
2189 // accessing the current rank then wait for it to finish
2190 if ((rank
== dram
.activeRank
)
2191 && (dram
.ctrl
->requestEventScheduled())) {
2192 // hand control over to the request loop until it is
2194 DPRINTF(DRAM
, "Refresh awaiting draining\n");
2198 refreshState
= REF_PD_EXIT
;
2202 // at this point, ensure that rank is not in a power-down state
2203 if (refreshState
== REF_PD_EXIT
) {
2204 // if rank was sleeping and we have't started exit process,
2205 // wake-up for refresh
2206 if (inLowPowerState
) {
2207 DPRINTF(DRAM
, "Wake Up for refresh\n");
2208 // save state and return after refresh completes
2209 scheduleWakeUpEvent(dram
.tXP
);
2212 refreshState
= REF_PRE
;
2216 // at this point, ensure that all banks are precharged
2217 if (refreshState
== REF_PRE
) {
2218 // precharge any active bank
2219 if (numBanksActive
!= 0) {
2220 // at the moment, we use a precharge all even if there is
2221 // only a single bank open
2222 DPRINTF(DRAM
, "Precharging all\n");
2224 // first determine when we can precharge
2225 Tick pre_at
= curTick();
2227 for (auto &b
: banks
) {
2228 // respect both causality and any existing bank
2229 // constraints, some banks could already have a
2230 // (auto) precharge scheduled
2231 pre_at
= std::max(b
.preAllowedAt
, pre_at
);
2234 // make sure all banks per rank are precharged, and for those that
2235 // already are, update their availability
2236 Tick act_allowed_at
= pre_at
+ dram
.tRP
;
2238 for (auto &b
: banks
) {
2239 if (b
.openRow
!= Bank::NO_ROW
) {
2240 dram
.prechargeBank(*this, b
, pre_at
, true, false);
2242 b
.actAllowedAt
= std::max(b
.actAllowedAt
, act_allowed_at
);
2243 b
.preAllowedAt
= std::max(b
.preAllowedAt
, pre_at
);
2247 // precharge all banks in rank
2248 cmdList
.push_back(Command(MemCommand::PREA
, 0, pre_at
));
2250 DPRINTF(DRAMPower
, "%llu,PREA,0,%d\n",
2251 divCeil(pre_at
, dram
.tCK
) -
2252 dram
.timeStampOffset
, rank
);
2253 } else if ((pwrState
== PWR_IDLE
) && (outstandingEvents
== 1)) {
2254 // Banks are closed, have transitioned to IDLE state, and
2255 // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled
2256 DPRINTF(DRAM
, "All banks already precharged, starting refresh\n");
2258 // go ahead and kick the power state machine into gear since
2259 // we are already idle
2260 schedulePowerEvent(PWR_REF
, curTick());
2262 // banks state is closed but haven't transitioned pwrState to IDLE
2263 // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled
2264 // should have outstanding precharge or read response event
2265 assert(prechargeEvent
.scheduled() ||
2266 dram
.ctrl
->respondEventScheduled());
2267 // will start refresh when pwrState transitions to IDLE
2270 assert(numBanksActive
== 0);
2272 // wait for all banks to be precharged or read to complete
2273 // When precharge commands are done, power state machine will
2274 // transition to the idle state, and automatically move to a
2275 // refresh, at that point it will also call this method to get
2276 // the refresh event loop going again
2277 // Similarly, when read response completes, if all banks are
2278 // precharged, will call this method to get loop re-started
2282 // last but not least we perform the actual refresh
2283 if (refreshState
== REF_START
) {
2284 // should never get here with any banks active
2285 assert(numBanksActive
== 0);
2286 assert(pwrState
== PWR_REF
);
2288 Tick ref_done_at
= curTick() + dram
.tRFC
;
2290 for (auto &b
: banks
) {
2291 b
.actAllowedAt
= ref_done_at
;
2294 // at the moment this affects all ranks
2295 cmdList
.push_back(Command(MemCommand::REF
, 0, curTick()));
2300 DPRINTF(DRAMPower
, "%llu,REF,0,%d\n", divCeil(curTick(), dram
.tCK
) -
2301 dram
.timeStampOffset
, rank
);
2303 // Update for next refresh
2304 refreshDueAt
+= dram
.tREFI
;
2306 // make sure we did not wait so long that we cannot make up
2308 if (refreshDueAt
< ref_done_at
) {
2309 fatal("Refresh was delayed so long we cannot catch up\n");
2312 // Run the refresh and schedule event to transition power states
2313 // when refresh completes
2314 refreshState
= REF_RUN
;
2315 schedule(refreshEvent
, ref_done_at
);
2319 if (refreshState
== REF_RUN
) {
2320 // should never get here with any banks active
2321 assert(numBanksActive
== 0);
2322 assert(pwrState
== PWR_REF
);
2324 assert(!powerEvent
.scheduled());
2326 if ((dram
.ctrl
->drainState() == DrainState::Draining
) ||
2327 (dram
.ctrl
->drainState() == DrainState::Drained
)) {
2328 // if draining, do not re-enter low-power mode.
2329 // simply go to IDLE and wait
2330 schedulePowerEvent(PWR_IDLE
, curTick());
2332 // At the moment, we sleep when the refresh ends and wait to be
2333 // woken up again if previously in a low-power state.
2334 if (pwrStatePostRefresh
!= PWR_IDLE
) {
2335 // power State should be power Refresh
2336 assert(pwrState
== PWR_REF
);
2337 DPRINTF(DRAMState
, "Rank %d sleeping after refresh and was in "
2338 "power state %d before refreshing\n", rank
,
2339 pwrStatePostRefresh
);
2340 powerDownSleep(pwrState
, curTick());
2342 // Force PRE power-down if there are no outstanding commands
2343 // in Q after refresh.
2344 } else if (isQueueEmpty() && dram
.enableDRAMPowerdown
) {
2345 // still have refresh event outstanding but there should
2346 // be no other events outstanding
2347 assert(outstandingEvents
== 1);
2348 DPRINTF(DRAMState
, "Rank %d sleeping after refresh but was NOT"
2349 " in a low power state before refreshing\n", rank
);
2350 powerDownSleep(PWR_PRE_PDN
, curTick());
2353 // move to the idle power state once the refresh is done, this
2354 // will also move the refresh state machine to the refresh
2356 schedulePowerEvent(PWR_IDLE
, curTick());
2360 // At this point, we have completed the current refresh.
2361 // In the SREF bypass case, we do not get to this state in the
2362 // refresh STM and therefore can always schedule next event.
2363 // Compensate for the delay in actually performing the refresh
2364 // when scheduling the next one
2365 schedule(refreshEvent
, refreshDueAt
- dram
.tRP
);
2367 DPRINTF(DRAMState
, "Refresh done at %llu and next refresh"
2368 " at %llu\n", curTick(), refreshDueAt
);
2373 DRAMInterface::Rank::schedulePowerEvent(PowerState pwr_state
, Tick tick
)
2375 // respect causality
2376 assert(tick
>= curTick());
2378 if (!powerEvent
.scheduled()) {
2379 DPRINTF(DRAMState
, "Scheduling power event at %llu to state %d\n",
2382 // insert the new transition
2383 pwrStateTrans
= pwr_state
;
2385 schedule(powerEvent
, tick
);
2387 panic("Scheduled power event at %llu to state %d, "
2388 "with scheduled event at %llu to %d\n", tick
, pwr_state
,
2389 powerEvent
.when(), pwrStateTrans
);
2394 DRAMInterface::Rank::powerDownSleep(PowerState pwr_state
, Tick tick
)
2396 // if low power state is active low, schedule to active low power state.
2397 // in reality tCKE is needed to enter active low power. This is neglected
2398 // here and could be added in the future.
2399 if (pwr_state
== PWR_ACT_PDN
) {
2400 schedulePowerEvent(pwr_state
, tick
);
2401 // push command to DRAMPower
2402 cmdList
.push_back(Command(MemCommand::PDN_F_ACT
, 0, tick
));
2403 DPRINTF(DRAMPower
, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick
,
2404 dram
.tCK
) - dram
.timeStampOffset
, rank
);
2405 } else if (pwr_state
== PWR_PRE_PDN
) {
2406 // if low power state is precharge low, schedule to precharge low
2407 // power state. In reality tCKE is needed to enter active low power.
2408 // This is neglected here.
2409 schedulePowerEvent(pwr_state
, tick
);
2410 //push Command to DRAMPower
2411 cmdList
.push_back(Command(MemCommand::PDN_F_PRE
, 0, tick
));
2412 DPRINTF(DRAMPower
, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick
,
2413 dram
.tCK
) - dram
.timeStampOffset
, rank
);
2414 } else if (pwr_state
== PWR_REF
) {
2415 // if a refresh just occurred
2416 // transition to PRE_PDN now that all banks are closed
2417 // precharge power down requires tCKE to enter. For simplicity
2418 // this is not considered.
2419 schedulePowerEvent(PWR_PRE_PDN
, tick
);
2420 //push Command to DRAMPower
2421 cmdList
.push_back(Command(MemCommand::PDN_F_PRE
, 0, tick
));
2422 DPRINTF(DRAMPower
, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick
,
2423 dram
.tCK
) - dram
.timeStampOffset
, rank
);
2424 } else if (pwr_state
== PWR_SREF
) {
2425 // should only enter SREF after PRE-PD wakeup to do a refresh
2426 assert(pwrStatePostRefresh
== PWR_PRE_PDN
);
2427 // self refresh requires time tCKESR to enter. For simplicity,
2428 // this is not considered.
2429 schedulePowerEvent(PWR_SREF
, tick
);
2430 // push Command to DRAMPower
2431 cmdList
.push_back(Command(MemCommand::SREN
, 0, tick
));
2432 DPRINTF(DRAMPower
, "%llu,SREN,0,%d\n", divCeil(tick
,
2433 dram
.tCK
) - dram
.timeStampOffset
, rank
);
2435 // Ensure that we don't power-down and back up in same tick
2436 // Once we commit to PD entry, do it and wait for at least 1tCK
2437 // This could be replaced with tCKE if/when that is added to the model
2438 wakeUpAllowedAt
= tick
+ dram
.tCK
;
2440 // Transitioning to a low power state, set flag
2441 inLowPowerState
= true;
2445 DRAMInterface::Rank::scheduleWakeUpEvent(Tick exit_delay
)
2447 Tick wake_up_tick
= std::max(curTick(), wakeUpAllowedAt
);
2449 DPRINTF(DRAMState
, "Scheduling wake-up for rank %d at tick %d\n",
2450 rank
, wake_up_tick
);
2452 // if waking for refresh, hold previous state
2453 // else reset state back to IDLE
2454 if (refreshState
== REF_PD_EXIT
) {
2455 pwrStatePostRefresh
= pwrState
;
2457 // don't automatically transition back to LP state after next REF
2458 pwrStatePostRefresh
= PWR_IDLE
;
2461 // schedule wake-up with event to ensure entry has completed before
2462 // we try to wake-up
2463 schedule(wakeUpEvent
, wake_up_tick
);
2465 for (auto &b
: banks
) {
2466 // respect both causality and any existing bank
2467 // constraints, some banks could already have a
2468 // (auto) precharge scheduled
2469 b
.wrAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.wrAllowedAt
);
2470 b
.rdAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.rdAllowedAt
);
2471 b
.preAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.preAllowedAt
);
2472 b
.actAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.actAllowedAt
);
2474 // Transitioning out of low power state, clear flag
2475 inLowPowerState
= false;
2477 // push to DRAMPower
2478 // use pwrStateTrans for cases where we have a power event scheduled
2479 // to enter low power that has not yet been processed
2480 if (pwrStateTrans
== PWR_ACT_PDN
) {
2481 cmdList
.push_back(Command(MemCommand::PUP_ACT
, 0, wake_up_tick
));
2482 DPRINTF(DRAMPower
, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick
,
2483 dram
.tCK
) - dram
.timeStampOffset
, rank
);
2485 } else if (pwrStateTrans
== PWR_PRE_PDN
) {
2486 cmdList
.push_back(Command(MemCommand::PUP_PRE
, 0, wake_up_tick
));
2487 DPRINTF(DRAMPower
, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick
,
2488 dram
.tCK
) - dram
.timeStampOffset
, rank
);
2489 } else if (pwrStateTrans
== PWR_SREF
) {
2490 cmdList
.push_back(Command(MemCommand::SREX
, 0, wake_up_tick
));
2491 DPRINTF(DRAMPower
, "%llu,SREX,0,%d\n", divCeil(wake_up_tick
,
2492 dram
.tCK
) - dram
.timeStampOffset
, rank
);
2497 DRAMInterface::Rank::processWakeUpEvent()
2499 // Should be in a power-down or self-refresh state
2500 assert((pwrState
== PWR_ACT_PDN
) || (pwrState
== PWR_PRE_PDN
) ||
2501 (pwrState
== PWR_SREF
));
2503 // Check current state to determine transition state
2504 if (pwrState
== PWR_ACT_PDN
) {
2505 // banks still open, transition to PWR_ACT
2506 schedulePowerEvent(PWR_ACT
, curTick());
2508 // transitioning from a precharge power-down or self-refresh state
2509 // banks are closed - transition to PWR_IDLE
2510 schedulePowerEvent(PWR_IDLE
, curTick());
2515 DRAMInterface::Rank::processPowerEvent()
2517 assert(curTick() >= pwrStateTick
);
2518 // remember where we were, and for how long
2519 Tick duration
= curTick() - pwrStateTick
;
2520 PowerState prev_state
= pwrState
;
2522 // update the accounting
2523 stats
.pwrStateTime
[prev_state
] += duration
;
2525 // track to total idle time
2526 if ((prev_state
== PWR_PRE_PDN
) || (prev_state
== PWR_ACT_PDN
) ||
2527 (prev_state
== PWR_SREF
)) {
2528 stats
.totalIdleTime
+= duration
;
2531 pwrState
= pwrStateTrans
;
2532 pwrStateTick
= curTick();
2534 // if rank was refreshing, make sure to start scheduling requests again
2535 if (prev_state
== PWR_REF
) {
2536 // bus IDLED prior to REF
2537 // counter should be one for refresh command only
2538 assert(outstandingEvents
== 1);
2539 // REF complete, decrement count and go back to IDLE
2540 --outstandingEvents
;
2541 refreshState
= REF_IDLE
;
2543 DPRINTF(DRAMState
, "Was refreshing for %llu ticks\n", duration
);
2544 // if moving back to power-down after refresh
2545 if (pwrState
!= PWR_IDLE
) {
2546 assert(pwrState
== PWR_PRE_PDN
);
2547 DPRINTF(DRAMState
, "Switching to power down state after refreshing"
2548 " rank %d at %llu tick\n", rank
, curTick());
2551 // completed refresh event, ensure next request is scheduled
2552 if (!dram
.ctrl
->requestEventScheduled()) {
2553 DPRINTF(DRAM
, "Scheduling next request after refreshing"
2554 " rank %d\n", rank
);
2555 dram
.ctrl
->restartScheduler(curTick());
2559 if ((pwrState
== PWR_ACT
) && (refreshState
== REF_PD_EXIT
)) {
2560 // have exited ACT PD
2561 assert(prev_state
== PWR_ACT_PDN
);
2563 // go back to REF event and close banks
2564 refreshState
= REF_PRE
;
2565 schedule(refreshEvent
, curTick());
2566 } else if (pwrState
== PWR_IDLE
) {
2567 DPRINTF(DRAMState
, "All banks precharged\n");
2568 if (prev_state
== PWR_SREF
) {
2569 // set refresh state to REF_SREF_EXIT, ensuring inRefIdleState
2570 // continues to return false during tXS after SREF exit
2571 // Schedule a refresh which kicks things back into action
2573 refreshState
= REF_SREF_EXIT
;
2574 schedule(refreshEvent
, curTick() + dram
.tXS
);
2576 // if we have a pending refresh, and are now moving to
2577 // the idle state, directly transition to, or schedule refresh
2578 if ((refreshState
== REF_PRE
) || (refreshState
== REF_PD_EXIT
)) {
2579 // ensure refresh is restarted only after final PRE command.
2580 // do not restart refresh if controller is in an intermediate
2581 // state, after PRE_PDN exit, when banks are IDLE but an
2582 // ACT is scheduled.
2583 if (!activateEvent
.scheduled()) {
2584 // there should be nothing waiting at this point
2585 assert(!powerEvent
.scheduled());
2586 if (refreshState
== REF_PD_EXIT
) {
2587 // exiting PRE PD, will be in IDLE until tXP expires
2588 // and then should transition to PWR_REF state
2589 assert(prev_state
== PWR_PRE_PDN
);
2590 schedulePowerEvent(PWR_REF
, curTick() + dram
.tXP
);
2591 } else if (refreshState
== REF_PRE
) {
2592 // can directly move to PWR_REF state and proceed below
2596 // must have PRE scheduled to transition back to IDLE
2597 // and re-kick off refresh
2598 assert(prechargeEvent
.scheduled());
2604 // transition to the refresh state and re-start refresh process
2605 // refresh state machine will schedule the next power state transition
2606 if (pwrState
== PWR_REF
) {
2607 // completed final PRE for refresh or exiting power-down
2608 assert(refreshState
== REF_PRE
|| refreshState
== REF_PD_EXIT
);
2610 // exited PRE PD for refresh, with no pending commands
2611 // bypass auto-refresh and go straight to SREF, where memory
2612 // will issue refresh immediately upon entry
2613 if (pwrStatePostRefresh
== PWR_PRE_PDN
&& isQueueEmpty() &&
2614 (dram
.ctrl
->drainState() != DrainState::Draining
) &&
2615 (dram
.ctrl
->drainState() != DrainState::Drained
) &&
2616 dram
.enableDRAMPowerdown
) {
2617 DPRINTF(DRAMState
, "Rank %d bypassing refresh and transitioning "
2618 "to self refresh at %11u tick\n", rank
, curTick());
2619 powerDownSleep(PWR_SREF
, curTick());
2621 // Since refresh was bypassed, remove event by decrementing count
2622 assert(outstandingEvents
== 1);
2623 --outstandingEvents
;
2625 // reset state back to IDLE temporarily until SREF is entered
2626 pwrState
= PWR_IDLE
;
2628 // Not bypassing refresh for SREF entry
2630 DPRINTF(DRAMState
, "Refreshing\n");
2632 // there should be nothing waiting at this point
2633 assert(!powerEvent
.scheduled());
2635 // kick the refresh event loop into action again, and that
2636 // in turn will schedule a transition to the idle power
2637 // state once the refresh is done
2638 schedule(refreshEvent
, curTick());
2640 // Banks transitioned to IDLE, start REF
2641 refreshState
= REF_START
;
2648 DRAMInterface::Rank::updatePowerStats()
2650 // All commands up to refresh have completed
2651 // flush cmdList to DRAMPower
2654 // Call the function that calculates window energy at intermediate update
2655 // events like at refresh, stats dump as well as at simulation exit.
2656 // Window starts at the last time the calcWindowEnergy function was called
2657 // and is upto current time.
2658 power
.powerlib
.calcWindowEnergy(divCeil(curTick(), dram
.tCK
) -
2659 dram
.timeStampOffset
);
2661 // Get the energy from DRAMPower
2662 Data::MemoryPowerModel::Energy energy
= power
.powerlib
.getEnergy();
2664 // The energy components inside the power lib are calculated over
2665 // the window so accumulate into the corresponding gem5 stat
2666 stats
.actEnergy
+= energy
.act_energy
* dram
.devicesPerRank
;
2667 stats
.preEnergy
+= energy
.pre_energy
* dram
.devicesPerRank
;
2668 stats
.readEnergy
+= energy
.read_energy
* dram
.devicesPerRank
;
2669 stats
.writeEnergy
+= energy
.write_energy
* dram
.devicesPerRank
;
2670 stats
.refreshEnergy
+= energy
.ref_energy
* dram
.devicesPerRank
;
2671 stats
.actBackEnergy
+= energy
.act_stdby_energy
* dram
.devicesPerRank
;
2672 stats
.preBackEnergy
+= energy
.pre_stdby_energy
* dram
.devicesPerRank
;
2673 stats
.actPowerDownEnergy
+= energy
.f_act_pd_energy
* dram
.devicesPerRank
;
2674 stats
.prePowerDownEnergy
+= energy
.f_pre_pd_energy
* dram
.devicesPerRank
;
2675 stats
.selfRefreshEnergy
+= energy
.sref_energy
* dram
.devicesPerRank
;
2677 // Accumulate window energy into the total energy.
2678 stats
.totalEnergy
+= energy
.window_energy
* dram
.devicesPerRank
;
2679 // Average power must not be accumulated but calculated over the time
2680 // since last stats reset. SimClock::Frequency is tick period not tick
2683 // power (mW) = ----------- * ----------
2684 // time (tick) tick_frequency
2685 stats
.averagePower
= (stats
.totalEnergy
.value() /
2686 (curTick() - dram
.lastStatsResetTick
)) *
2687 (SimClock::Frequency
/ 1000000000.0);
2691 DRAMInterface::Rank::computeStats()
2693 DPRINTF(DRAM
,"Computing stats due to a dump callback\n");
2698 // final update of power state times
2699 stats
.pwrStateTime
[pwrState
] += (curTick() - pwrStateTick
);
2700 pwrStateTick
= curTick();
2704 DRAMInterface::Rank::resetStats() {
2705 // The only way to clear the counters in DRAMPower is to call
2706 // calcWindowEnergy function as that then calls clearCounters. The
2707 // clearCounters method itself is private.
2708 power
.powerlib
.calcWindowEnergy(divCeil(curTick(), dram
.tCK
) -
2709 dram
.timeStampOffset
);
2714 DRAMInterface::Rank::forceSelfRefreshExit() const {
2715 return (readEntries
!= 0) ||
2716 (dram
.ctrl
->inWriteBusState(true) && (writeEntries
!= 0));
2719 DRAMCtrl::CtrlStats::CtrlStats(DRAMCtrl
&_ctrl
)
2720 : Stats::Group(&_ctrl
),
2723 ADD_STAT(readReqs
, "Number of read requests accepted"),
2724 ADD_STAT(writeReqs
, "Number of write requests accepted"),
2726 ADD_STAT(readBursts
,
2727 "Number of controller read bursts, "
2728 "including those serviced by the write queue"),
2729 ADD_STAT(writeBursts
,
2730 "Number of controller write bursts, "
2731 "including those merged in the write queue"),
2732 ADD_STAT(servicedByWrQ
,
2733 "Number of controller read bursts serviced by the write queue"),
2734 ADD_STAT(mergedWrBursts
,
2735 "Number of controller write bursts merged with an existing one"),
2737 ADD_STAT(neitherReadNorWriteReqs
,
2738 "Number of requests that are neither read nor write"),
2740 ADD_STAT(avgRdQLen
, "Average read queue length when enqueuing"),
2741 ADD_STAT(avgWrQLen
, "Average write queue length when enqueuing"),
2743 ADD_STAT(numRdRetry
, "Number of times read queue was full causing retry"),
2744 ADD_STAT(numWrRetry
, "Number of times write queue was full causing retry"),
2746 ADD_STAT(readPktSize
, "Read request sizes (log2)"),
2747 ADD_STAT(writePktSize
, "Write request sizes (log2)"),
2749 ADD_STAT(rdQLenPdf
, "What read queue length does an incoming req see"),
2750 ADD_STAT(wrQLenPdf
, "What write queue length does an incoming req see"),
2752 ADD_STAT(rdPerTurnAround
,
2753 "Reads before turning the bus around for writes"),
2754 ADD_STAT(wrPerTurnAround
,
2755 "Writes before turning the bus around for reads"),
2757 ADD_STAT(bytesReadWrQ
, "Total number of bytes read from write queue"),
2758 ADD_STAT(bytesReadSys
, "Total read bytes from the system interface side"),
2759 ADD_STAT(bytesWrittenSys
,
2760 "Total written bytes from the system interface side"),
2762 ADD_STAT(avgRdBWSys
, "Average system read bandwidth in MiByte/s"),
2763 ADD_STAT(avgWrBWSys
, "Average system write bandwidth in MiByte/s"),
2765 ADD_STAT(totGap
, "Total gap between requests"),
2766 ADD_STAT(avgGap
, "Average gap between requests"),
2768 ADD_STAT(masterReadBytes
, "Per-master bytes read from memory"),
2769 ADD_STAT(masterWriteBytes
, "Per-master bytes write to memory"),
2770 ADD_STAT(masterReadRate
,
2771 "Per-master bytes read from memory rate (Bytes/sec)"),
2772 ADD_STAT(masterWriteRate
,
2773 "Per-master bytes write to memory rate (Bytes/sec)"),
2774 ADD_STAT(masterReadAccesses
,
2775 "Per-master read serviced memory accesses"),
2776 ADD_STAT(masterWriteAccesses
,
2777 "Per-master write serviced memory accesses"),
2778 ADD_STAT(masterReadTotalLat
,
2779 "Per-master read total memory access latency"),
2780 ADD_STAT(masterWriteTotalLat
,
2781 "Per-master write total memory access latency"),
2782 ADD_STAT(masterReadAvgLat
,
2783 "Per-master read average memory access latency"),
2784 ADD_STAT(masterWriteAvgLat
,
2785 "Per-master write average memory access latency")
2791 DRAMCtrl::CtrlStats::regStats()
2793 using namespace Stats
;
2795 assert(ctrl
.system());
2796 const auto max_masters
= ctrl
.system()->maxMasters();
2798 avgRdQLen
.precision(2);
2799 avgWrQLen
.precision(2);
2801 readPktSize
.init(ceilLog2(ctrl
.dram
->bytesPerBurst()) + 1);
2802 writePktSize
.init(ceilLog2(ctrl
.dram
->bytesPerBurst()) + 1);
2804 rdQLenPdf
.init(ctrl
.readBufferSize
);
2805 wrQLenPdf
.init(ctrl
.writeBufferSize
);
2808 .init(ctrl
.readBufferSize
)
2811 .init(ctrl
.writeBufferSize
)
2814 avgRdBWSys
.precision(2);
2815 avgWrBWSys
.precision(2);
2816 avgGap
.precision(2);
2818 // per-master bytes read and written to memory
2821 .flags(nozero
| nonan
);
2825 .flags(nozero
| nonan
);
2827 // per-master bytes read and written to memory rate
2829 .flags(nozero
| nonan
)
2842 .flags(nozero
| nonan
);
2849 .flags(nozero
| nonan
)
2854 .flags(nozero
| nonan
);
2860 for (int i
= 0; i
< max_masters
; i
++) {
2861 const std::string master
= ctrl
.system()->getMasterName(i
);
2862 masterReadBytes
.subname(i
, master
);
2863 masterReadRate
.subname(i
, master
);
2864 masterWriteBytes
.subname(i
, master
);
2865 masterWriteRate
.subname(i
, master
);
2866 masterReadAccesses
.subname(i
, master
);
2867 masterWriteAccesses
.subname(i
, master
);
2868 masterReadTotalLat
.subname(i
, master
);
2869 masterReadAvgLat
.subname(i
, master
);
2870 masterWriteTotalLat
.subname(i
, master
);
2871 masterWriteAvgLat
.subname(i
, master
);
2875 avgRdBWSys
= (bytesReadSys
/ 1000000) / simSeconds
;
2876 avgWrBWSys
= (bytesWrittenSys
/ 1000000) / simSeconds
;
2878 avgGap
= totGap
/ (readReqs
+ writeReqs
);
2880 masterReadRate
= masterReadBytes
/ simSeconds
;
2881 masterWriteRate
= masterWriteBytes
/ simSeconds
;
2882 masterReadAvgLat
= masterReadTotalLat
/ masterReadAccesses
;
2883 masterWriteAvgLat
= masterWriteTotalLat
/ masterWriteAccesses
;
2887 DRAMInterface::DRAMStats::resetStats()
2889 dram
.lastStatsResetTick
= curTick();
2892 DRAMInterface::DRAMStats::DRAMStats(DRAMInterface
&_dram
)
2893 : Stats::Group(&_dram
),
2896 ADD_STAT(readBursts
, "Number of DRAM read bursts"),
2897 ADD_STAT(writeBursts
, "Number of DRAM write bursts"),
2899 ADD_STAT(perBankRdBursts
, "Per bank write bursts"),
2900 ADD_STAT(perBankWrBursts
, "Per bank write bursts"),
2902 ADD_STAT(totQLat
, "Total ticks spent queuing"),
2903 ADD_STAT(totBusLat
, "Total ticks spent in databus transfers"),
2904 ADD_STAT(totMemAccLat
,
2905 "Total ticks spent from burst creation until serviced "
2908 ADD_STAT(avgQLat
, "Average queueing delay per DRAM burst"),
2909 ADD_STAT(avgBusLat
, "Average bus latency per DRAM burst"),
2910 ADD_STAT(avgMemAccLat
, "Average memory access latency per DRAM burst"),
2912 ADD_STAT(readRowHits
, "Number of row buffer hits during reads"),
2913 ADD_STAT(writeRowHits
, "Number of row buffer hits during writes"),
2914 ADD_STAT(readRowHitRate
, "Row buffer hit rate for reads"),
2915 ADD_STAT(writeRowHitRate
, "Row buffer hit rate for writes"),
2917 ADD_STAT(bytesPerActivate
, "Bytes accessed per row activation"),
2918 ADD_STAT(bytesRead
, "Total number of bytes read from DRAM"),
2919 ADD_STAT(bytesWritten
, "Total number of bytes written to DRAM"),
2920 ADD_STAT(avgRdBW
, "Average DRAM read bandwidth in MiBytes/s"),
2921 ADD_STAT(avgWrBW
, "Average DRAM write bandwidth in MiBytes/s"),
2922 ADD_STAT(peakBW
, "Theoretical peak bandwidth in MiByte/s"),
2924 ADD_STAT(busUtil
, "Data bus utilization in percentage"),
2925 ADD_STAT(busUtilRead
, "Data bus utilization in percentage for reads"),
2926 ADD_STAT(busUtilWrite
, "Data bus utilization in percentage for writes"),
2928 ADD_STAT(pageHitRate
, "Row buffer hit rate, read and write combined")
2934 DRAMInterface::DRAMStats::regStats()
2936 using namespace Stats
;
2938 avgQLat
.precision(2);
2939 avgBusLat
.precision(2);
2940 avgMemAccLat
.precision(2);
2942 readRowHitRate
.precision(2);
2943 writeRowHitRate
.precision(2);
2945 perBankRdBursts
.init(dram
.banksPerRank
* dram
.ranksPerChannel
);
2946 perBankWrBursts
.init(dram
.banksPerRank
* dram
.ranksPerChannel
);
2949 .init(dram
.maxAccessesPerRow
?
2950 dram
.maxAccessesPerRow
: dram
.rowBufferSize
)
2953 peakBW
.precision(2);
2954 busUtil
.precision(2);
2955 busUtilWrite
.precision(2);
2956 busUtilRead
.precision(2);
2958 pageHitRate
.precision(2);
2961 avgQLat
= totQLat
/ readBursts
;
2962 avgBusLat
= totBusLat
/ readBursts
;
2963 avgMemAccLat
= totMemAccLat
/ readBursts
;
2965 readRowHitRate
= (readRowHits
/ readBursts
) * 100;
2966 writeRowHitRate
= (writeRowHits
/ writeBursts
) * 100;
2968 avgRdBW
= (bytesRead
/ 1000000) / simSeconds
;
2969 avgWrBW
= (bytesWritten
/ 1000000) / simSeconds
;
2970 peakBW
= (SimClock::Frequency
/ dram
.burstDataDelay()) *
2971 dram
.bytesPerBurst() / 1000000;
2973 busUtil
= (avgRdBW
+ avgWrBW
) / peakBW
* 100;
2974 busUtilRead
= avgRdBW
/ peakBW
* 100;
2975 busUtilWrite
= avgWrBW
/ peakBW
* 100;
2977 pageHitRate
= (writeRowHits
+ readRowHits
) /
2978 (writeBursts
+ readBursts
) * 100;
2981 DRAMInterface::RankStats::RankStats(DRAMInterface
&_dram
, Rank
&_rank
)
2982 : Stats::Group(&_dram
, csprintf("rank%d", _rank
.rank
).c_str()),
2985 ADD_STAT(actEnergy
, "Energy for activate commands per rank (pJ)"),
2986 ADD_STAT(preEnergy
, "Energy for precharge commands per rank (pJ)"),
2987 ADD_STAT(readEnergy
, "Energy for read commands per rank (pJ)"),
2988 ADD_STAT(writeEnergy
, "Energy for write commands per rank (pJ)"),
2989 ADD_STAT(refreshEnergy
, "Energy for refresh commands per rank (pJ)"),
2990 ADD_STAT(actBackEnergy
, "Energy for active background per rank (pJ)"),
2991 ADD_STAT(preBackEnergy
, "Energy for precharge background per rank (pJ)"),
2992 ADD_STAT(actPowerDownEnergy
,
2993 "Energy for active power-down per rank (pJ)"),
2994 ADD_STAT(prePowerDownEnergy
,
2995 "Energy for precharge power-down per rank (pJ)"),
2996 ADD_STAT(selfRefreshEnergy
, "Energy for self refresh per rank (pJ)"),
2998 ADD_STAT(totalEnergy
, "Total energy per rank (pJ)"),
2999 ADD_STAT(averagePower
, "Core power per rank (mW)"),
3001 ADD_STAT(totalIdleTime
, "Total Idle time Per DRAM Rank"),
3002 ADD_STAT(pwrStateTime
, "Time in different power states")
3007 DRAMInterface::RankStats::regStats()
3009 Stats::Group::regStats();
3016 .subname(3, "PRE_PDN")
3018 .subname(5, "ACT_PDN");
3022 DRAMInterface::RankStats::resetStats()
3024 Stats::Group::resetStats();
3030 DRAMInterface::RankStats::preDumpStats()
3032 Stats::Group::preDumpStats();
3034 rank
.computeStats();
3038 DRAMCtrl::recvFunctional(PacketPtr pkt
)
3040 // rely on the abstract memory
3041 dram
->functionalAccess(pkt
);
3045 DRAMCtrl::getPort(const string
&if_name
, PortID idx
)
3047 if (if_name
!= "port") {
3048 return QoS::MemCtrl::getPort(if_name
, idx
);
3057 // if there is anything in any of our internal queues, keep track
3059 if (!(!totalWriteQueueSize
&& !totalReadQueueSize
&& respQueue
.empty() &&
3060 dram
->allRanksDrained())) {
3062 DPRINTF(Drain
, "DRAM controller not drained, write: %d, read: %d,"
3063 " resp: %d\n", totalWriteQueueSize
, totalReadQueueSize
,
3066 // the only queue that is not drained automatically over time
3067 // is the write queue, thus kick things into action if needed
3068 if (!totalWriteQueueSize
&& !nextReqEvent
.scheduled()) {
3069 schedule(nextReqEvent
, curTick());
3074 return DrainState::Draining
;
3076 return DrainState::Drained
;
3081 DRAMCtrl::drainResume()
3083 if (!isTimingMode
&& system()->isTimingMode()) {
3084 // if we switched to timing mode, kick things into action,
3085 // and behave as if we restored from a checkpoint
3088 } else if (isTimingMode
&& !system()->isTimingMode()) {
3089 // if we switch from timing mode, stop the refresh events to
3090 // not cause issues with KVM
3095 isTimingMode
= system()->isTimingMode();
3098 DRAMCtrl::MemoryPort::MemoryPort(const std::string
& name
, DRAMCtrl
& _ctrl
)
3099 : QueuedSlavePort(name
, &_ctrl
, queue
), queue(_ctrl
, *this, true),
3104 DRAMCtrl::MemoryPort::getAddrRanges() const
3106 AddrRangeList ranges
;
3107 ranges
.push_back(ctrl
.dram
->getAddrRange());
3112 DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt
)
3114 pkt
->pushLabel(ctrl
.name());
3116 if (!queue
.trySatisfyFunctional(pkt
)) {
3117 // Default implementation of SimpleTimingPort::recvFunctional()
3118 // calls recvAtomic() and throws away the latency; we can save a
3119 // little here by just not calculating the latency.
3120 ctrl
.recvFunctional(pkt
);
3127 DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt
)
3129 return ctrl
.recvAtomic(pkt
);
3133 DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt
)
3135 // pass it to the memory controller
3136 return ctrl
.recvTimingReq(pkt
);
3140 DRAMCtrlParams::create()
3142 return new DRAMCtrl(this);