2 * Copyright (c) 2010-2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Andreas Hansson
48 #include "mem/dram_ctrl.hh"
50 #include "base/bitfield.hh"
51 #include "base/trace.hh"
52 #include "debug/DRAM.hh"
53 #include "debug/DRAMPower.hh"
54 #include "debug/DRAMState.hh"
55 #include "debug/Drain.hh"
56 #include "debug/QOS.hh"
57 #include "sim/system.hh"
62 DRAMCtrl::DRAMCtrl(const DRAMCtrlParams
* p
) :
64 port(name() + ".port", *this), isTimingMode(false),
65 retryRdReq(false), retryWrReq(false),
66 nextReqEvent([this]{ processNextReqEvent(); }, name()),
67 respondEvent([this]{ processRespondEvent(); }, name()),
68 deviceSize(p
->device_size
),
69 deviceBusWidth(p
->device_bus_width
), burstLength(p
->burst_length
),
70 deviceRowBufferSize(p
->device_rowbuffer_size
),
71 devicesPerRank(p
->devices_per_rank
),
72 burstSize((devicesPerRank
* burstLength
* deviceBusWidth
) / 8),
73 rowBufferSize(devicesPerRank
* deviceRowBufferSize
),
74 columnsPerRowBuffer(rowBufferSize
/ burstSize
),
75 columnsPerStripe(range
.interleaved() ? range
.granularity() / burstSize
: 1),
76 ranksPerChannel(p
->ranks_per_channel
),
77 bankGroupsPerRank(p
->bank_groups_per_rank
),
78 bankGroupArch(p
->bank_groups_per_rank
> 0),
79 banksPerRank(p
->banks_per_rank
), channels(p
->channels
), rowsPerBank(0),
80 readBufferSize(p
->read_buffer_size
),
81 writeBufferSize(p
->write_buffer_size
),
82 writeHighThreshold(writeBufferSize
* p
->write_high_thresh_perc
/ 100.0),
83 writeLowThreshold(writeBufferSize
* p
->write_low_thresh_perc
/ 100.0),
84 minWritesPerSwitch(p
->min_writes_per_switch
),
85 writesThisTime(0), readsThisTime(0),
86 tCK(p
->tCK
), tRTW(p
->tRTW
), tCS(p
->tCS
), tBURST(p
->tBURST
),
87 tCCD_L_WR(p
->tCCD_L_WR
),
88 tCCD_L(p
->tCCD_L
), tRCD(p
->tRCD
), tCL(p
->tCL
), tRP(p
->tRP
), tRAS(p
->tRAS
),
89 tWR(p
->tWR
), tRTP(p
->tRTP
), tRFC(p
->tRFC
), tREFI(p
->tREFI
), tRRD(p
->tRRD
),
90 tRRD_L(p
->tRRD_L
), tXAW(p
->tXAW
), tXP(p
->tXP
), tXS(p
->tXS
),
91 activationLimit(p
->activation_limit
), rankToRankDly(tCS
+ tBURST
),
92 wrToRdDly(tCL
+ tBURST
+ p
->tWTR
), rdToWrDly(tRTW
+ tBURST
),
93 memSchedPolicy(p
->mem_sched_policy
), addrMapping(p
->addr_mapping
),
94 pageMgmt(p
->page_policy
),
95 maxAccessesPerRow(p
->max_accesses_per_row
),
96 frontendLatency(p
->static_frontend_latency
),
97 backendLatency(p
->static_backend_latency
),
98 nextBurstAt(0), prevArrival(0),
101 activeRank(0), timeStampOffset(0),
102 lastStatsResetTick(0), enableDRAMPowerdown(p
->enable_dram_powerdown
)
104 // sanity check the ranks since we rely on bit slicing for the
106 fatal_if(!isPowerOf2(ranksPerChannel
), "DRAM rank count of %d is not "
107 "allowed, must be a power of two\n", ranksPerChannel
);
109 fatal_if(!isPowerOf2(burstSize
), "DRAM burst size %d is not allowed, "
110 "must be a power of two\n", burstSize
);
111 readQueue
.resize(p
->qos_priorities
);
112 writeQueue
.resize(p
->qos_priorities
);
115 for (int i
= 0; i
< ranksPerChannel
; i
++) {
116 Rank
* rank
= new Rank(*this, p
, i
);
117 ranks
.push_back(rank
);
120 // perform a basic check of the write thresholds
121 if (p
->write_low_thresh_perc
>= p
->write_high_thresh_perc
)
122 fatal("Write buffer low threshold %d must be smaller than the "
123 "high threshold %d\n", p
->write_low_thresh_perc
,
124 p
->write_high_thresh_perc
);
126 // determine the rows per bank by looking at the total capacity
127 uint64_t capacity
= ULL(1) << ceilLog2(AbstractMemory::size());
129 // determine the dram actual capacity from the DRAM config in Mbytes
130 uint64_t deviceCapacity
= deviceSize
/ (1024 * 1024) * devicesPerRank
*
133 // if actual DRAM size does not match memory capacity in system warn!
134 if (deviceCapacity
!= capacity
/ (1024 * 1024))
135 warn("DRAM device capacity (%d Mbytes) does not match the "
136 "address range assigned (%d Mbytes)\n", deviceCapacity
,
137 capacity
/ (1024 * 1024));
139 DPRINTF(DRAM
, "Memory capacity %lld (%lld) bytes\n", capacity
,
140 AbstractMemory::size());
142 DPRINTF(DRAM
, "Row buffer size %d bytes with %d columns per row buffer\n",
143 rowBufferSize
, columnsPerRowBuffer
);
145 rowsPerBank
= capacity
/ (rowBufferSize
* banksPerRank
* ranksPerChannel
);
147 // some basic sanity checks
148 if (tREFI
<= tRP
|| tREFI
<= tRFC
) {
149 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
153 // basic bank group architecture checks ->
155 // must have at least one bank per bank group
156 if (bankGroupsPerRank
> banksPerRank
) {
157 fatal("banks per rank (%d) must be equal to or larger than "
158 "banks groups per rank (%d)\n",
159 banksPerRank
, bankGroupsPerRank
);
161 // must have same number of banks in each bank group
162 if ((banksPerRank
% bankGroupsPerRank
) != 0) {
163 fatal("Banks per rank (%d) must be evenly divisible by bank groups "
164 "per rank (%d) for equal banks per bank group\n",
165 banksPerRank
, bankGroupsPerRank
);
167 // tCCD_L should be greater than minimal, back-to-back burst delay
168 if (tCCD_L
<= tBURST
) {
169 fatal("tCCD_L (%d) should be larger than tBURST (%d) when "
170 "bank groups per rank (%d) is greater than 1\n",
171 tCCD_L
, tBURST
, bankGroupsPerRank
);
173 // tCCD_L_WR should be greater than minimal, back-to-back burst delay
174 if (tCCD_L_WR
<= tBURST
) {
175 fatal("tCCD_L_WR (%d) should be larger than tBURST (%d) when "
176 "bank groups per rank (%d) is greater than 1\n",
177 tCCD_L_WR
, tBURST
, bankGroupsPerRank
);
179 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay
180 // some datasheets might specify it equal to tRRD
182 fatal("tRRD_L (%d) should be larger than tRRD (%d) when "
183 "bank groups per rank (%d) is greater than 1\n",
184 tRRD_L
, tRRD
, bankGroupsPerRank
);
195 if (!port
.isConnected()) {
196 fatal("DRAMCtrl %s is unconnected!\n", name());
198 port
.sendRangeChange();
201 // a bit of sanity checks on the interleaving, save it for here to
202 // ensure that the system pointer is initialised
203 if (range
.interleaved()) {
204 if (channels
!= range
.stripes())
205 fatal("%s has %d interleaved address stripes but %d channel(s)\n",
206 name(), range
.stripes(), channels
);
208 if (addrMapping
== Enums::RoRaBaChCo
) {
209 if (rowBufferSize
!= range
.granularity()) {
210 fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
211 "address map\n", name());
213 } else if (addrMapping
== Enums::RoRaBaCoCh
||
214 addrMapping
== Enums::RoCoRaBaCh
) {
215 // for the interleavings with channel bits in the bottom,
216 // if the system uses a channel striping granularity that
217 // is larger than the DRAM burst size, then map the
218 // sequential accesses within a stripe to a number of
219 // columns in the DRAM, effectively placing some of the
220 // lower-order column bits as the least-significant bits
221 // of the address (above the ones denoting the burst size)
222 assert(columnsPerStripe
>= 1);
224 // channel striping has to be done at a granularity that
225 // is equal or larger to a cache line
226 if (system()->cacheLineSize() > range
.granularity()) {
227 fatal("Channel interleaving of %s must be at least as large "
228 "as the cache line size\n", name());
231 // ...and equal or smaller than the row-buffer size
232 if (rowBufferSize
< range
.granularity()) {
233 fatal("Channel interleaving of %s must be at most as large "
234 "as the row-buffer size\n", name());
236 // this is essentially the check above, so just to be sure
237 assert(columnsPerStripe
<= columnsPerRowBuffer
);
245 // remember the memory system mode of operation
246 isTimingMode
= system()->isTimingMode();
249 // timestamp offset should be in clock cycles for DRAMPower
250 timeStampOffset
= divCeil(curTick(), tCK
);
252 // update the start tick for the precharge accounting to the
254 for (auto r
: ranks
) {
255 r
->startup(curTick() + tREFI
- tRP
);
258 // shift the bus busy time sufficiently far ahead that we never
259 // have to worry about negative values when computing the time for
260 // the next request, this will add an insignificant bubble at the
261 // start of simulation
262 nextBurstAt
= curTick() + tRP
+ tRCD
;
267 DRAMCtrl::recvAtomic(PacketPtr pkt
)
269 DPRINTF(DRAM
, "recvAtomic: %s 0x%x\n", pkt
->cmdString(), pkt
->getAddr());
271 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
274 // do the actual memory access and turn the packet into a response
278 if (pkt
->hasData()) {
279 // this value is not supposed to be accurate, just enough to
280 // keep things going, mimic a closed page
281 latency
= tRP
+ tRCD
+ tCL
;
287 DRAMCtrl::readQueueFull(unsigned int neededEntries
) const
289 DPRINTF(DRAM
, "Read queue limit %d, current size %d, entries needed %d\n",
290 readBufferSize
, totalReadQueueSize
+ respQueue
.size(),
293 auto rdsize_new
= totalReadQueueSize
+ respQueue
.size() + neededEntries
;
294 return rdsize_new
> readBufferSize
;
298 DRAMCtrl::writeQueueFull(unsigned int neededEntries
) const
300 DPRINTF(DRAM
, "Write queue limit %d, current size %d, entries needed %d\n",
301 writeBufferSize
, totalWriteQueueSize
, neededEntries
);
303 auto wrsize_new
= (totalWriteQueueSize
+ neededEntries
);
304 return wrsize_new
> writeBufferSize
;
307 DRAMCtrl::DRAMPacket
*
308 DRAMCtrl::decodeAddr(const PacketPtr pkt
, Addr dramPktAddr
, unsigned size
,
311 // decode the address based on the address mapping scheme, with
312 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
313 // channel, respectively
316 // use a 64-bit unsigned during the computations as the row is
317 // always the top bits, and check before creating the DRAMPacket
320 // truncate the address to a DRAM burst, which makes it unique to
321 // a specific column, row, bank, rank and channel
322 Addr addr
= dramPktAddr
/ burstSize
;
324 // we have removed the lowest order address bits that denote the
325 // position within the column
326 if (addrMapping
== Enums::RoRaBaChCo
) {
327 // the lowest order bits denote the column to ensure that
328 // sequential cache lines occupy the same row
329 addr
= addr
/ columnsPerRowBuffer
;
331 // take out the channel part of the address
332 addr
= addr
/ channels
;
334 // after the channel bits, get the bank bits to interleave
336 bank
= addr
% banksPerRank
;
337 addr
= addr
/ banksPerRank
;
339 // after the bank, we get the rank bits which thus interleaves
341 rank
= addr
% ranksPerChannel
;
342 addr
= addr
/ ranksPerChannel
;
344 // lastly, get the row bits, no need to remove them from addr
345 row
= addr
% rowsPerBank
;
346 } else if (addrMapping
== Enums::RoRaBaCoCh
) {
347 // take out the lower-order column bits
348 addr
= addr
/ columnsPerStripe
;
350 // take out the channel part of the address
351 addr
= addr
/ channels
;
353 // next, the higher-order column bites
354 addr
= addr
/ (columnsPerRowBuffer
/ columnsPerStripe
);
356 // after the column bits, we get the bank bits to interleave
358 bank
= addr
% banksPerRank
;
359 addr
= addr
/ banksPerRank
;
361 // after the bank, we get the rank bits which thus interleaves
363 rank
= addr
% ranksPerChannel
;
364 addr
= addr
/ ranksPerChannel
;
366 // lastly, get the row bits, no need to remove them from addr
367 row
= addr
% rowsPerBank
;
368 } else if (addrMapping
== Enums::RoCoRaBaCh
) {
369 // optimise for closed page mode and utilise maximum
370 // parallelism of the DRAM (at the cost of power)
372 // take out the lower-order column bits
373 addr
= addr
/ columnsPerStripe
;
375 // take out the channel part of the address, not that this has
376 // to match with how accesses are interleaved between the
377 // controllers in the address mapping
378 addr
= addr
/ channels
;
380 // start with the bank bits, as this provides the maximum
381 // opportunity for parallelism between requests
382 bank
= addr
% banksPerRank
;
383 addr
= addr
/ banksPerRank
;
385 // next get the rank bits
386 rank
= addr
% ranksPerChannel
;
387 addr
= addr
/ ranksPerChannel
;
389 // next, the higher-order column bites
390 addr
= addr
/ (columnsPerRowBuffer
/ columnsPerStripe
);
392 // lastly, get the row bits, no need to remove them from addr
393 row
= addr
% rowsPerBank
;
395 panic("Unknown address mapping policy chosen!");
397 assert(rank
< ranksPerChannel
);
398 assert(bank
< banksPerRank
);
399 assert(row
< rowsPerBank
);
400 assert(row
< Bank::NO_ROW
);
402 DPRINTF(DRAM
, "Address: %lld Rank %d Bank %d Row %d\n",
403 dramPktAddr
, rank
, bank
, row
);
405 // create the corresponding DRAM packet with the entry time and
406 // ready time set to the current tick, the latter will be updated
408 uint16_t bank_id
= banksPerRank
* rank
+ bank
;
409 return new DRAMPacket(pkt
, isRead
, rank
, bank
, row
, bank_id
, dramPktAddr
,
410 size
, ranks
[rank
]->banks
[bank
], *ranks
[rank
]);
414 DRAMCtrl::addToReadQueue(PacketPtr pkt
, unsigned int pktCount
)
416 // only add to the read queue here. whenever the request is
417 // eventually done, set the readyTime, and call schedule()
418 assert(!pkt
->isWrite());
420 assert(pktCount
!= 0);
422 // if the request size is larger than burst size, the pkt is split into
423 // multiple DRAM packets
424 // Note if the pkt starting address is not aligened to burst size, the
425 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
426 // are aligned to burst size boundaries. This is to ensure we accurately
427 // check read packets against packets in write queue.
428 Addr addr
= pkt
->getAddr();
429 unsigned pktsServicedByWrQ
= 0;
430 BurstHelper
* burst_helper
= NULL
;
431 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
432 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
433 pkt
->getAddr() + pkt
->getSize()) - addr
;
434 stats
.readPktSize
[ceilLog2(size
)]++;
436 stats
.masterReadAccesses
[pkt
->masterId()]++;
438 // First check write buffer to see if the data is already at
440 bool foundInWrQ
= false;
441 Addr burst_addr
= burstAlign(addr
);
442 // if the burst address is not present then there is no need
443 // looking any further
444 if (isInWriteQueue
.find(burst_addr
) != isInWriteQueue
.end()) {
445 for (const auto& vec
: writeQueue
) {
446 for (const auto& p
: vec
) {
447 // check if the read is subsumed in the write queue
448 // packet we are looking at
449 if (p
->addr
<= addr
&&
450 ((addr
+ size
) <= (p
->addr
+ p
->size
))) {
453 stats
.servicedByWrQ
++;
456 "Read to addr %lld with size %d serviced by "
459 stats
.bytesReadWrQ
+= burstSize
;
466 // If not found in the write q, make a DRAM packet and
467 // push it onto the read queue
470 // Make the burst helper for split packets
471 if (pktCount
> 1 && burst_helper
== NULL
) {
472 DPRINTF(DRAM
, "Read to addr %lld translates to %d "
473 "dram requests\n", pkt
->getAddr(), pktCount
);
474 burst_helper
= new BurstHelper(pktCount
);
477 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, true);
478 dram_pkt
->burstHelper
= burst_helper
;
480 assert(!readQueueFull(1));
481 stats
.rdQLenPdf
[totalReadQueueSize
+ respQueue
.size()]++;
483 DPRINTF(DRAM
, "Adding to read queue\n");
485 readQueue
[dram_pkt
->qosValue()].push_back(dram_pkt
);
487 ++dram_pkt
->rankRef
.readEntries
;
490 logRequest(MemCtrl::READ
, pkt
->masterId(), pkt
->qosValue(),
494 stats
.avgRdQLen
= totalReadQueueSize
+ respQueue
.size();
497 // Starting address of next dram pkt (aligend to burstSize boundary)
498 addr
= (addr
| (burstSize
- 1)) + 1;
501 // If all packets are serviced by write queue, we send the repsonse back
502 if (pktsServicedByWrQ
== pktCount
) {
503 accessAndRespond(pkt
, frontendLatency
);
507 // Update how many split packets are serviced by write queue
508 if (burst_helper
!= NULL
)
509 burst_helper
->burstsServiced
= pktsServicedByWrQ
;
511 // If we are not already scheduled to get a request out of the
513 if (!nextReqEvent
.scheduled()) {
514 DPRINTF(DRAM
, "Request scheduled immediately\n");
515 schedule(nextReqEvent
, curTick());
520 DRAMCtrl::addToWriteQueue(PacketPtr pkt
, unsigned int pktCount
)
522 // only add to the write queue here. whenever the request is
523 // eventually done, set the readyTime, and call schedule()
524 assert(pkt
->isWrite());
526 // if the request size is larger than burst size, the pkt is split into
527 // multiple DRAM packets
528 Addr addr
= pkt
->getAddr();
529 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
530 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
531 pkt
->getAddr() + pkt
->getSize()) - addr
;
532 stats
.writePktSize
[ceilLog2(size
)]++;
534 stats
.masterWriteAccesses
[pkt
->masterId()]++;
536 // see if we can merge with an existing item in the write
537 // queue and keep track of whether we have merged or not
538 bool merged
= isInWriteQueue
.find(burstAlign(addr
)) !=
539 isInWriteQueue
.end();
541 // if the item was not merged we need to create a new write
544 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, false);
546 assert(totalWriteQueueSize
< writeBufferSize
);
547 stats
.wrQLenPdf
[totalWriteQueueSize
]++;
549 DPRINTF(DRAM
, "Adding to write queue\n");
551 writeQueue
[dram_pkt
->qosValue()].push_back(dram_pkt
);
552 isInWriteQueue
.insert(burstAlign(addr
));
555 logRequest(MemCtrl::WRITE
, pkt
->masterId(), pkt
->qosValue(),
558 assert(totalWriteQueueSize
== isInWriteQueue
.size());
561 stats
.avgWrQLen
= totalWriteQueueSize
;
563 // increment write entries of the rank
564 ++dram_pkt
->rankRef
.writeEntries
;
566 DPRINTF(DRAM
, "Merging write burst with existing queue entry\n");
568 // keep track of the fact that this burst effectively
569 // disappeared as it was merged with an existing one
570 stats
.mergedWrBursts
++;
573 // Starting address of next dram pkt (aligend to burstSize boundary)
574 addr
= (addr
| (burstSize
- 1)) + 1;
577 // we do not wait for the writes to be send to the actual memory,
578 // but instead take responsibility for the consistency here and
579 // snoop the write queue for any upcoming reads
580 // @todo, if a pkt size is larger than burst size, we might need a
581 // different front end latency
582 accessAndRespond(pkt
, frontendLatency
);
584 // If we are not already scheduled to get a request out of the
586 if (!nextReqEvent
.scheduled()) {
587 DPRINTF(DRAM
, "Request scheduled immediately\n");
588 schedule(nextReqEvent
, curTick());
593 DRAMCtrl::printQs() const
596 DPRINTF(DRAM
, "===READ QUEUE===\n\n");
597 for (const auto& queue
: readQueue
) {
598 for (const auto& packet
: queue
) {
599 DPRINTF(DRAM
, "Read %lu\n", packet
->addr
);
603 DPRINTF(DRAM
, "\n===RESP QUEUE===\n\n");
604 for (const auto& packet
: respQueue
) {
605 DPRINTF(DRAM
, "Response %lu\n", packet
->addr
);
608 DPRINTF(DRAM
, "\n===WRITE QUEUE===\n\n");
609 for (const auto& queue
: writeQueue
) {
610 for (const auto& packet
: queue
) {
611 DPRINTF(DRAM
, "Write %lu\n", packet
->addr
);
618 DRAMCtrl::recvTimingReq(PacketPtr pkt
)
620 // This is where we enter from the outside world
621 DPRINTF(DRAM
, "recvTimingReq: request %s addr %lld size %d\n",
622 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
624 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
627 panic_if(!(pkt
->isRead() || pkt
->isWrite()),
628 "Should only see read and writes at memory controller\n");
630 // Calc avg gap between requests
631 if (prevArrival
!= 0) {
632 stats
.totGap
+= curTick() - prevArrival
;
634 prevArrival
= curTick();
637 // Find out how many dram packets a pkt translates to
638 // If the burst size is equal or larger than the pkt size, then a pkt
639 // translates to only one dram packet. Otherwise, a pkt translates to
640 // multiple dram packets
641 unsigned size
= pkt
->getSize();
642 unsigned offset
= pkt
->getAddr() & (burstSize
- 1);
643 unsigned int dram_pkt_count
= divCeil(offset
+ size
, burstSize
);
645 // run the QoS scheduler and assign a QoS priority value to the packet
646 qosSchedule( { &readQueue
, &writeQueue
}, burstSize
, pkt
);
648 // check local buffers and do not accept if full
649 if (pkt
->isWrite()) {
651 if (writeQueueFull(dram_pkt_count
)) {
652 DPRINTF(DRAM
, "Write queue full, not accepting\n");
653 // remember that we have to retry this port
658 addToWriteQueue(pkt
, dram_pkt_count
);
660 stats
.bytesWrittenSys
+= size
;
663 assert(pkt
->isRead());
665 if (readQueueFull(dram_pkt_count
)) {
666 DPRINTF(DRAM
, "Read queue full, not accepting\n");
667 // remember that we have to retry this port
672 addToReadQueue(pkt
, dram_pkt_count
);
674 stats
.bytesReadSys
+= size
;
682 DRAMCtrl::processRespondEvent()
685 "processRespondEvent(): Some req has reached its readyTime\n");
687 DRAMPacket
* dram_pkt
= respQueue
.front();
689 // if a read has reached its ready-time, decrement the number of reads
690 // At this point the packet has been handled and there is a possibility
691 // to switch to low-power mode if no other packet is available
692 --dram_pkt
->rankRef
.readEntries
;
693 DPRINTF(DRAM
, "number of read entries for rank %d is %d\n",
694 dram_pkt
->rank
, dram_pkt
->rankRef
.readEntries
);
696 // counter should at least indicate one outstanding request
698 assert(dram_pkt
->rankRef
.outstandingEvents
> 0);
699 // read response received, decrement count
700 --dram_pkt
->rankRef
.outstandingEvents
;
702 // at this moment should not have transitioned to a low-power state
703 assert((dram_pkt
->rankRef
.pwrState
!= PWR_SREF
) &&
704 (dram_pkt
->rankRef
.pwrState
!= PWR_PRE_PDN
) &&
705 (dram_pkt
->rankRef
.pwrState
!= PWR_ACT_PDN
));
707 // track if this is the last packet before idling
708 // and that there are no outstanding commands to this rank
709 if (dram_pkt
->rankRef
.isQueueEmpty() &&
710 dram_pkt
->rankRef
.outstandingEvents
== 0 && enableDRAMPowerdown
) {
711 // verify that there are no events scheduled
712 assert(!dram_pkt
->rankRef
.activateEvent
.scheduled());
713 assert(!dram_pkt
->rankRef
.prechargeEvent
.scheduled());
715 // if coming from active state, schedule power event to
716 // active power-down else go to precharge power-down
717 DPRINTF(DRAMState
, "Rank %d sleep at tick %d; current power state is "
718 "%d\n", dram_pkt
->rank
, curTick(), dram_pkt
->rankRef
.pwrState
);
720 // default to ACT power-down unless already in IDLE state
721 // could be in IDLE if PRE issued before data returned
722 PowerState next_pwr_state
= PWR_ACT_PDN
;
723 if (dram_pkt
->rankRef
.pwrState
== PWR_IDLE
) {
724 next_pwr_state
= PWR_PRE_PDN
;
727 dram_pkt
->rankRef
.powerDownSleep(next_pwr_state
, curTick());
730 if (dram_pkt
->burstHelper
) {
731 // it is a split packet
732 dram_pkt
->burstHelper
->burstsServiced
++;
733 if (dram_pkt
->burstHelper
->burstsServiced
==
734 dram_pkt
->burstHelper
->burstCount
) {
735 // we have now serviced all children packets of a system packet
736 // so we can now respond to the requester
737 // @todo we probably want to have a different front end and back
738 // end latency for split packets
739 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
740 delete dram_pkt
->burstHelper
;
741 dram_pkt
->burstHelper
= NULL
;
744 // it is not a split packet
745 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
748 delete respQueue
.front();
749 respQueue
.pop_front();
751 if (!respQueue
.empty()) {
752 assert(respQueue
.front()->readyTime
>= curTick());
753 assert(!respondEvent
.scheduled());
754 schedule(respondEvent
, respQueue
.front()->readyTime
);
756 // if there is nothing left in any queue, signal a drain
757 if (drainState() == DrainState::Draining
&&
758 !totalWriteQueueSize
&& !totalReadQueueSize
&& allRanksDrained()) {
760 DPRINTF(Drain
, "DRAM controller done draining\n");
765 // We have made a location in the queue available at this point,
766 // so if there is a read that was forced to wait, retry now
773 DRAMCtrl::DRAMPacketQueue::iterator
774 DRAMCtrl::chooseNext(DRAMPacketQueue
& queue
, Tick extra_col_delay
)
776 // This method does the arbitration between requests.
778 DRAMCtrl::DRAMPacketQueue::iterator ret
= queue
.end();
780 if (!queue
.empty()) {
781 if (queue
.size() == 1) {
782 // available rank corresponds to state refresh idle
783 DRAMPacket
* dram_pkt
= *(queue
.begin());
784 if (ranks
[dram_pkt
->rank
]->inRefIdleState()) {
786 DPRINTF(DRAM
, "Single request, going to a free rank\n");
788 DPRINTF(DRAM
, "Single request, going to a busy rank\n");
790 } else if (memSchedPolicy
== Enums::fcfs
) {
791 // check if there is a packet going to a free rank
792 for (auto i
= queue
.begin(); i
!= queue
.end(); ++i
) {
793 DRAMPacket
* dram_pkt
= *i
;
794 if (ranks
[dram_pkt
->rank
]->inRefIdleState()) {
799 } else if (memSchedPolicy
== Enums::frfcfs
) {
800 ret
= chooseNextFRFCFS(queue
, extra_col_delay
);
802 panic("No scheduling policy chosen\n");
808 DRAMCtrl::DRAMPacketQueue::iterator
809 DRAMCtrl::chooseNextFRFCFS(DRAMPacketQueue
& queue
, Tick extra_col_delay
)
811 // Only determine this if needed
812 vector
<uint32_t> earliest_banks(ranksPerChannel
, 0);
814 // Has minBankPrep been called to populate earliest_banks?
815 bool filled_earliest_banks
= false;
816 // can the PRE/ACT sequence be done without impacting utlization?
817 bool hidden_bank_prep
= false;
819 // search for seamless row hits first, if no seamless row hit is
820 // found then determine if there are other packets that can be issued
821 // without incurring additional bus delay due to bank timing
822 // Will select closed rows first to enable more open row possibilies
823 // in future selections
824 bool found_hidden_bank
= false;
826 // remember if we found a row hit, not seamless, but bank prepped
828 bool found_prepped_pkt
= false;
830 // if we have no row hit, prepped or not, and no seamless packet,
831 // just go for the earliest possible
832 bool found_earliest_pkt
= false;
834 auto selected_pkt_it
= queue
.end();
836 // time we need to issue a column command to be seamless
837 const Tick min_col_at
= std::max(nextBurstAt
+ extra_col_delay
, curTick());
839 for (auto i
= queue
.begin(); i
!= queue
.end() ; ++i
) {
840 DRAMPacket
* dram_pkt
= *i
;
841 const Bank
& bank
= dram_pkt
->bankRef
;
842 const Tick col_allowed_at
= dram_pkt
->isRead() ? bank
.rdAllowedAt
:
845 DPRINTF(DRAM
, "%s checking packet in bank %d\n",
846 __func__
, dram_pkt
->bankRef
.bank
);
848 // check if rank is not doing a refresh and thus is available, if not,
849 // jump to the next packet
850 if (dram_pkt
->rankRef
.inRefIdleState()) {
853 "%s bank %d - Rank %d available\n", __func__
,
854 dram_pkt
->bankRef
.bank
, dram_pkt
->rankRef
.rank
);
856 // check if it is a row hit
857 if (bank
.openRow
== dram_pkt
->row
) {
858 // no additional rank-to-rank or same bank-group
859 // delays, or we switched read/write and might as well
860 // go for the row hit
861 if (col_allowed_at
<= min_col_at
) {
862 // FCFS within the hits, giving priority to
863 // commands that can issue seamlessly, without
864 // additional delay, such as same rank accesses
865 // and/or different bank-group accesses
866 DPRINTF(DRAM
, "%s Seamless row buffer hit\n", __func__
);
868 // no need to look through the remaining queue entries
870 } else if (!found_hidden_bank
&& !found_prepped_pkt
) {
871 // if we did not find a packet to a closed row that can
872 // issue the bank commands without incurring delay, and
873 // did not yet find a packet to a prepped row, remember
876 found_prepped_pkt
= true;
877 DPRINTF(DRAM
, "%s Prepped row buffer hit\n", __func__
);
879 } else if (!found_earliest_pkt
) {
880 // if we have not initialised the bank status, do it
881 // now, and only once per scheduling decisions
882 if (!filled_earliest_banks
) {
883 // determine entries with earliest bank delay
884 std::tie(earliest_banks
, hidden_bank_prep
) =
885 minBankPrep(queue
, min_col_at
);
886 filled_earliest_banks
= true;
889 // bank is amongst first available banks
890 // minBankPrep will give priority to packets that can
892 if (bits(earliest_banks
[dram_pkt
->rank
],
893 dram_pkt
->bank
, dram_pkt
->bank
)) {
894 found_earliest_pkt
= true;
895 found_hidden_bank
= hidden_bank_prep
;
897 // give priority to packets that can issue
898 // bank commands 'behind the scenes'
899 // any additional delay if any will be due to
900 // col-to-col command requirements
901 if (hidden_bank_prep
|| !found_prepped_pkt
)
906 DPRINTF(DRAM
, "%s bank %d - Rank %d not available\n", __func__
,
907 dram_pkt
->bankRef
.bank
, dram_pkt
->rankRef
.rank
);
911 if (selected_pkt_it
== queue
.end()) {
912 DPRINTF(DRAM
, "%s no available ranks found\n", __func__
);
915 return selected_pkt_it
;
919 DRAMCtrl::accessAndRespond(PacketPtr pkt
, Tick static_latency
)
921 DPRINTF(DRAM
, "Responding to Address %lld.. ",pkt
->getAddr());
923 bool needsResponse
= pkt
->needsResponse();
924 // do the actual memory access which also turns the packet into a
928 // turn packet around to go back to requester if response expected
930 // access already turned the packet into a response
931 assert(pkt
->isResponse());
932 // response_time consumes the static latency and is charged also
933 // with headerDelay that takes into account the delay provided by
934 // the xbar and also the payloadDelay that takes into account the
935 // number of data beats.
936 Tick response_time
= curTick() + static_latency
+ pkt
->headerDelay
+
938 // Here we reset the timing of the packet before sending it out.
939 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
941 // queue the packet in the response queue to be sent out after
942 // the static latency has passed
943 port
.schedTimingResp(pkt
, response_time
);
945 // @todo the packet is going to be deleted, and the DRAMPacket
946 // is still having a pointer to it
947 pendingDelete
.reset(pkt
);
950 DPRINTF(DRAM
, "Done\n");
956 DRAMCtrl::activateBank(Rank
& rank_ref
, Bank
& bank_ref
,
957 Tick act_tick
, uint32_t row
)
959 assert(rank_ref
.actTicks
.size() == activationLimit
);
961 DPRINTF(DRAM
, "Activate at tick %d\n", act_tick
);
963 // update the open row
964 assert(bank_ref
.openRow
== Bank::NO_ROW
);
965 bank_ref
.openRow
= row
;
967 // start counting anew, this covers both the case when we
968 // auto-precharged, and when this access is forced to
970 bank_ref
.bytesAccessed
= 0;
971 bank_ref
.rowAccesses
= 0;
973 ++rank_ref
.numBanksActive
;
974 assert(rank_ref
.numBanksActive
<= banksPerRank
);
976 DPRINTF(DRAM
, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
977 bank_ref
.bank
, rank_ref
.rank
, act_tick
,
978 ranks
[rank_ref
.rank
]->numBanksActive
);
980 rank_ref
.cmdList
.push_back(Command(MemCommand::ACT
, bank_ref
.bank
,
983 DPRINTF(DRAMPower
, "%llu,ACT,%d,%d\n", divCeil(act_tick
, tCK
) -
984 timeStampOffset
, bank_ref
.bank
, rank_ref
.rank
);
986 // The next access has to respect tRAS for this bank
987 bank_ref
.preAllowedAt
= act_tick
+ tRAS
;
989 // Respect the row-to-column command delay for both read and write cmds
990 bank_ref
.rdAllowedAt
= std::max(act_tick
+ tRCD
, bank_ref
.rdAllowedAt
);
991 bank_ref
.wrAllowedAt
= std::max(act_tick
+ tRCD
, bank_ref
.wrAllowedAt
);
993 // start by enforcing tRRD
994 for (int i
= 0; i
< banksPerRank
; i
++) {
995 // next activate to any bank in this rank must not happen
997 if (bankGroupArch
&& (bank_ref
.bankgr
== rank_ref
.banks
[i
].bankgr
)) {
998 // bank group architecture requires longer delays between
999 // ACT commands within the same bank group. Use tRRD_L
1001 rank_ref
.banks
[i
].actAllowedAt
= std::max(act_tick
+ tRRD_L
,
1002 rank_ref
.banks
[i
].actAllowedAt
);
1004 // use shorter tRRD value when either
1005 // 1) bank group architecture is not supportted
1006 // 2) bank is in a different bank group
1007 rank_ref
.banks
[i
].actAllowedAt
= std::max(act_tick
+ tRRD
,
1008 rank_ref
.banks
[i
].actAllowedAt
);
1012 // next, we deal with tXAW, if the activation limit is disabled
1013 // then we directly schedule an activate power event
1014 if (!rank_ref
.actTicks
.empty()) {
1016 if (rank_ref
.actTicks
.back() &&
1017 (act_tick
- rank_ref
.actTicks
.back()) < tXAW
) {
1018 panic("Got %d activates in window %d (%llu - %llu) which "
1019 "is smaller than %llu\n", activationLimit
, act_tick
-
1020 rank_ref
.actTicks
.back(), act_tick
,
1021 rank_ref
.actTicks
.back(), tXAW
);
1024 // shift the times used for the book keeping, the last element
1025 // (highest index) is the oldest one and hence the lowest value
1026 rank_ref
.actTicks
.pop_back();
1028 // record an new activation (in the future)
1029 rank_ref
.actTicks
.push_front(act_tick
);
1031 // cannot activate more than X times in time window tXAW, push the
1032 // next one (the X + 1'st activate) to be tXAW away from the
1033 // oldest in our window of X
1034 if (rank_ref
.actTicks
.back() &&
1035 (act_tick
- rank_ref
.actTicks
.back()) < tXAW
) {
1036 DPRINTF(DRAM
, "Enforcing tXAW with X = %d, next activate "
1037 "no earlier than %llu\n", activationLimit
,
1038 rank_ref
.actTicks
.back() + tXAW
);
1039 for (int j
= 0; j
< banksPerRank
; j
++)
1040 // next activate must not happen before end of window
1041 rank_ref
.banks
[j
].actAllowedAt
=
1042 std::max(rank_ref
.actTicks
.back() + tXAW
,
1043 rank_ref
.banks
[j
].actAllowedAt
);
1047 // at the point when this activate takes place, make sure we
1048 // transition to the active power state
1049 if (!rank_ref
.activateEvent
.scheduled())
1050 schedule(rank_ref
.activateEvent
, act_tick
);
1051 else if (rank_ref
.activateEvent
.when() > act_tick
)
1052 // move it sooner in time
1053 reschedule(rank_ref
.activateEvent
, act_tick
);
1057 DRAMCtrl::prechargeBank(Rank
& rank_ref
, Bank
& bank
, Tick pre_at
, bool trace
)
1059 // make sure the bank has an open row
1060 assert(bank
.openRow
!= Bank::NO_ROW
);
1062 // sample the bytes per activate here since we are closing
1064 stats
.bytesPerActivate
.sample(bank
.bytesAccessed
);
1066 bank
.openRow
= Bank::NO_ROW
;
1068 // no precharge allowed before this one
1069 bank
.preAllowedAt
= pre_at
;
1071 Tick pre_done_at
= pre_at
+ tRP
;
1073 bank
.actAllowedAt
= std::max(bank
.actAllowedAt
, pre_done_at
);
1075 assert(rank_ref
.numBanksActive
!= 0);
1076 --rank_ref
.numBanksActive
;
1078 DPRINTF(DRAM
, "Precharging bank %d, rank %d at tick %lld, now got "
1079 "%d active\n", bank
.bank
, rank_ref
.rank
, pre_at
,
1080 rank_ref
.numBanksActive
);
1084 rank_ref
.cmdList
.push_back(Command(MemCommand::PRE
, bank
.bank
,
1086 DPRINTF(DRAMPower
, "%llu,PRE,%d,%d\n", divCeil(pre_at
, tCK
) -
1087 timeStampOffset
, bank
.bank
, rank_ref
.rank
);
1089 // if we look at the current number of active banks we might be
1090 // tempted to think the DRAM is now idle, however this can be
1091 // undone by an activate that is scheduled to happen before we
1092 // would have reached the idle state, so schedule an event and
1093 // rather check once we actually make it to the point in time when
1094 // the (last) precharge takes place
1095 if (!rank_ref
.prechargeEvent
.scheduled()) {
1096 schedule(rank_ref
.prechargeEvent
, pre_done_at
);
1097 // New event, increment count
1098 ++rank_ref
.outstandingEvents
;
1099 } else if (rank_ref
.prechargeEvent
.when() < pre_done_at
) {
1100 reschedule(rank_ref
.prechargeEvent
, pre_done_at
);
1105 DRAMCtrl::doDRAMAccess(DRAMPacket
* dram_pkt
)
1107 DPRINTF(DRAM
, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1108 dram_pkt
->addr
, dram_pkt
->rank
, dram_pkt
->bank
, dram_pkt
->row
);
1111 Rank
& rank
= dram_pkt
->rankRef
;
1113 // are we in or transitioning to a low-power state and have not scheduled
1114 // a power-up event?
1115 // if so, wake up from power down to issue RD/WR burst
1116 if (rank
.inLowPowerState
) {
1117 assert(rank
.pwrState
!= PWR_SREF
);
1118 rank
.scheduleWakeUpEvent(tXP
);
1122 Bank
& bank
= dram_pkt
->bankRef
;
1124 // for the state we need to track if it is a row hit or not
1125 bool row_hit
= true;
1127 // Determine the access latency and update the bank state
1128 if (bank
.openRow
== dram_pkt
->row
) {
1133 // If there is a page open, precharge it.
1134 if (bank
.openRow
!= Bank::NO_ROW
) {
1135 prechargeBank(rank
, bank
, std::max(bank
.preAllowedAt
, curTick()));
1138 // next we need to account for the delay in activating the
1140 Tick act_tick
= std::max(bank
.actAllowedAt
, curTick());
1142 // Record the activation and deal with all the global timing
1143 // constraints caused be a new activation (tRRD and tXAW)
1144 activateBank(rank
, bank
, act_tick
, dram_pkt
->row
);
1147 // respect any constraints on the command (e.g. tRCD or tCCD)
1148 const Tick col_allowed_at
= dram_pkt
->isRead() ?
1149 bank
.rdAllowedAt
: bank
.wrAllowedAt
;
1151 // we need to wait until the bus is available before we can issue
1152 // the command; need minimum of tBURST between commands
1153 Tick cmd_at
= std::max({col_allowed_at
, nextBurstAt
, curTick()});
1155 // update the packet ready time
1156 dram_pkt
->readyTime
= cmd_at
+ tCL
+ tBURST
;
1158 // update the time for the next read/write burst for each
1159 // bank (add a max with tCCD/tCCD_L/tCCD_L_WR here)
1162 for (int j
= 0; j
< ranksPerChannel
; j
++) {
1163 for (int i
= 0; i
< banksPerRank
; i
++) {
1164 // next burst to same bank group in this rank must not happen
1165 // before tCCD_L. Different bank group timing requirement is
1166 // tBURST; Add tCS for different ranks
1167 if (dram_pkt
->rank
== j
) {
1168 if (bankGroupArch
&&
1169 (bank
.bankgr
== ranks
[j
]->banks
[i
].bankgr
)) {
1170 // bank group architecture requires longer delays between
1171 // RD/WR burst commands to the same bank group.
1172 // tCCD_L is default requirement for same BG timing
1173 // tCCD_L_WR is required for write-to-write
1174 // Need to also take bus turnaround delays into account
1175 dly_to_rd_cmd
= dram_pkt
->isRead() ?
1176 tCCD_L
: std::max(tCCD_L
, wrToRdDly
);
1177 dly_to_wr_cmd
= dram_pkt
->isRead() ?
1178 std::max(tCCD_L
, rdToWrDly
) : tCCD_L_WR
;
1180 // tBURST is default requirement for diff BG timing
1181 // Need to also take bus turnaround delays into account
1182 dly_to_rd_cmd
= dram_pkt
->isRead() ? tBURST
: wrToRdDly
;
1183 dly_to_wr_cmd
= dram_pkt
->isRead() ? rdToWrDly
: tBURST
;
1186 // different rank is by default in a different bank group and
1187 // doesn't require longer tCCD or additional RTW, WTR delays
1188 // Need to account for rank-to-rank switching with tCS
1189 dly_to_wr_cmd
= rankToRankDly
;
1190 dly_to_rd_cmd
= rankToRankDly
;
1192 ranks
[j
]->banks
[i
].rdAllowedAt
= std::max(cmd_at
+ dly_to_rd_cmd
,
1193 ranks
[j
]->banks
[i
].rdAllowedAt
);
1194 ranks
[j
]->banks
[i
].wrAllowedAt
= std::max(cmd_at
+ dly_to_wr_cmd
,
1195 ranks
[j
]->banks
[i
].wrAllowedAt
);
1199 // Save rank of current access
1200 activeRank
= dram_pkt
->rank
;
1202 // If this is a write, we also need to respect the write recovery
1203 // time before a precharge, in the case of a read, respect the
1204 // read to precharge constraint
1205 bank
.preAllowedAt
= std::max(bank
.preAllowedAt
,
1206 dram_pkt
->isRead() ? cmd_at
+ tRTP
:
1207 dram_pkt
->readyTime
+ tWR
);
1209 // increment the bytes accessed and the accesses per row
1210 bank
.bytesAccessed
+= burstSize
;
1213 // if we reached the max, then issue with an auto-precharge
1214 bool auto_precharge
= pageMgmt
== Enums::close
||
1215 bank
.rowAccesses
== maxAccessesPerRow
;
1217 // if we did not hit the limit, we might still want to
1219 if (!auto_precharge
&&
1220 (pageMgmt
== Enums::open_adaptive
||
1221 pageMgmt
== Enums::close_adaptive
)) {
1222 // a twist on the open and close page policies:
1223 // 1) open_adaptive page policy does not blindly keep the
1224 // page open, but close it if there are no row hits, and there
1225 // are bank conflicts in the queue
1226 // 2) close_adaptive page policy does not blindly close the
1227 // page, but closes it only if there are no row hits in the queue.
1228 // In this case, only force an auto precharge when there
1229 // are no same page hits in the queue
1230 bool got_more_hits
= false;
1231 bool got_bank_conflict
= false;
1233 // either look at the read queue or write queue
1234 const std::vector
<DRAMPacketQueue
>& queue
=
1235 dram_pkt
->isRead() ? readQueue
: writeQueue
;
1237 for (uint8_t i
= 0; i
< numPriorities(); ++i
) {
1238 auto p
= queue
[i
].begin();
1239 // keep on looking until we find a hit or reach the end of the queue
1240 // 1) if a hit is found, then both open and close adaptive policies keep
1242 // 2) if no hit is found, got_bank_conflict is set to true if a bank
1243 // conflict request is waiting in the queue
1244 // 3) make sure we are not considering the packet that we are
1245 // currently dealing with
1246 while (!got_more_hits
&& p
!= queue
[i
].end()) {
1247 if (dram_pkt
!= (*p
)) {
1248 bool same_rank_bank
= (dram_pkt
->rank
== (*p
)->rank
) &&
1249 (dram_pkt
->bank
== (*p
)->bank
);
1251 bool same_row
= dram_pkt
->row
== (*p
)->row
;
1252 got_more_hits
|= same_rank_bank
&& same_row
;
1253 got_bank_conflict
|= same_rank_bank
&& !same_row
;
1262 // auto pre-charge when either
1263 // 1) open_adaptive policy, we have not got any more hits, and
1264 // have a bank conflict
1265 // 2) close_adaptive policy and we have not got any more hits
1266 auto_precharge
= !got_more_hits
&&
1267 (got_bank_conflict
|| pageMgmt
== Enums::close_adaptive
);
1270 // DRAMPower trace command to be written
1271 std::string mem_cmd
= dram_pkt
->isRead() ? "RD" : "WR";
1273 // MemCommand required for DRAMPower library
1274 MemCommand::cmds command
= (mem_cmd
== "RD") ? MemCommand::RD
:
1277 // Update bus state to reflect when previous command was issued
1278 nextBurstAt
= cmd_at
+ tBURST
;
1280 DPRINTF(DRAM
, "Access to %lld, ready at %lld next burst at %lld.\n",
1281 dram_pkt
->addr
, dram_pkt
->readyTime
, nextBurstAt
);
1283 dram_pkt
->rankRef
.cmdList
.push_back(Command(command
, dram_pkt
->bank
,
1286 DPRINTF(DRAMPower
, "%llu,%s,%d,%d\n", divCeil(cmd_at
, tCK
) -
1287 timeStampOffset
, mem_cmd
, dram_pkt
->bank
, dram_pkt
->rank
);
1289 // if this access should use auto-precharge, then we are
1290 // closing the row after the read/write burst
1291 if (auto_precharge
) {
1292 // if auto-precharge push a PRE command at the correct tick to the
1293 // list used by DRAMPower library to calculate power
1294 prechargeBank(rank
, bank
, std::max(curTick(), bank
.preAllowedAt
));
1296 DPRINTF(DRAM
, "Auto-precharged bank: %d\n", dram_pkt
->bankId
);
1299 // Update the minimum timing between the requests, this is a
1300 // conservative estimate of when we have to schedule the next
1301 // request to not introduce any unecessary bubbles. In most cases
1302 // we will wake up sooner than we have to.
1303 nextReqTime
= nextBurstAt
- (tRP
+ tRCD
);
1305 // Update the stats and schedule the next request
1306 if (dram_pkt
->isRead()) {
1309 stats
.readRowHits
++;
1310 stats
.bytesReadDRAM
+= burstSize
;
1311 stats
.perBankRdBursts
[dram_pkt
->bankId
]++;
1313 // Update latency stats
1314 stats
.totMemAccLat
+= dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1315 stats
.masterReadTotalLat
[dram_pkt
->masterId()] +=
1316 dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1318 stats
.totBusLat
+= tBURST
;
1319 stats
.totQLat
+= cmd_at
- dram_pkt
->entryTime
;
1320 stats
.masterReadBytes
[dram_pkt
->masterId()] += dram_pkt
->size
;
1324 stats
.writeRowHits
++;
1325 stats
.bytesWritten
+= burstSize
;
1326 stats
.perBankWrBursts
[dram_pkt
->bankId
]++;
1327 stats
.masterWriteBytes
[dram_pkt
->masterId()] += dram_pkt
->size
;
1328 stats
.masterWriteTotalLat
[dram_pkt
->masterId()] +=
1329 dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1334 DRAMCtrl::processNextReqEvent()
1336 // transition is handled by QoS algorithm if enabled
1338 // select bus state - only done if QoS algorithms are in use
1339 busStateNext
= selectNextBusState();
1342 // detect bus state change
1343 bool switched_cmd_type
= (busState
!= busStateNext
);
1345 recordTurnaroundStats();
1347 DPRINTF(DRAM
, "QoS Turnarounds selected state %s %s\n",
1348 (busState
==MemCtrl::READ
)?"READ":"WRITE",
1349 switched_cmd_type
?"[turnaround triggered]":"");
1351 if (switched_cmd_type
) {
1352 if (busState
== READ
) {
1354 "Switching to writes after %d reads with %d reads "
1355 "waiting\n", readsThisTime
, totalReadQueueSize
);
1356 stats
.rdPerTurnAround
.sample(readsThisTime
);
1360 "Switching to reads after %d writes with %d writes "
1361 "waiting\n", writesThisTime
, totalWriteQueueSize
);
1362 stats
.wrPerTurnAround
.sample(writesThisTime
);
1367 // updates current state
1368 busState
= busStateNext
;
1370 // check ranks for refresh/wakeup - uses busStateNext, so done after turnaround
1373 for (auto r
: ranks
) {
1374 if (!r
->inRefIdleState()) {
1375 if (r
->pwrState
!= PWR_SREF
) {
1376 // rank is busy refreshing
1377 DPRINTF(DRAMState
, "Rank %d is not available\n", r
->rank
);
1380 // let the rank know that if it was waiting to drain, it
1381 // is now done and ready to proceed
1382 r
->checkDrainDone();
1385 // check if we were in self-refresh and haven't started
1386 // to transition out
1387 if ((r
->pwrState
== PWR_SREF
) && r
->inLowPowerState
) {
1388 DPRINTF(DRAMState
, "Rank %d is in self-refresh\n", r
->rank
);
1389 // if we have commands queued to this rank and we don't have
1390 // a minimum number of active commands enqueued,
1391 // exit self-refresh
1392 if (r
->forceSelfRefreshExit()) {
1393 DPRINTF(DRAMState
, "rank %d was in self refresh and"
1394 " should wake up\n", r
->rank
);
1395 //wake up from self-refresh
1396 r
->scheduleWakeUpEvent(tXS
);
1397 // things are brought back into action once a refresh is
1398 // performed after self-refresh
1399 // continue with selection for other ranks
1405 if (busyRanks
== ranksPerChannel
) {
1406 // if all ranks are refreshing wait for them to finish
1407 // and stall this state machine without taking any further
1408 // action, and do not schedule a new nextReqEvent
1412 // when we get here it is either a read or a write
1413 if (busState
== READ
) {
1415 // track if we should switch or not
1416 bool switch_to_writes
= false;
1418 if (totalReadQueueSize
== 0) {
1419 // In the case there is no read request to go next,
1420 // trigger writes if we have passed the low threshold (or
1421 // if we are draining)
1422 if (!(totalWriteQueueSize
== 0) &&
1423 (drainState() == DrainState::Draining
||
1424 totalWriteQueueSize
> writeLowThreshold
)) {
1426 DPRINTF(DRAM
, "Switching to writes due to read queue empty\n");
1427 switch_to_writes
= true;
1429 // check if we are drained
1430 // not done draining until in PWR_IDLE state
1431 // ensuring all banks are closed and
1432 // have exited low power states
1433 if (drainState() == DrainState::Draining
&&
1434 respQueue
.empty() && allRanksDrained()) {
1436 DPRINTF(Drain
, "DRAM controller done draining\n");
1440 // nothing to do, not even any point in scheduling an
1441 // event for the next request
1446 bool read_found
= false;
1447 DRAMPacketQueue::iterator to_read
;
1448 uint8_t prio
= numPriorities();
1450 for (auto queue
= readQueue
.rbegin();
1451 queue
!= readQueue
.rend(); ++queue
) {
1456 "DRAM controller checking READ queue [%d] priority [%d elements]\n",
1457 prio
, queue
->size());
1459 // Figure out which read request goes next
1460 // If we are changing command type, incorporate the minimum
1461 // bus turnaround delay which will be tCS (different rank) case
1462 to_read
= chooseNext((*queue
), switched_cmd_type
? tCS
: 0);
1464 if (to_read
!= queue
->end()) {
1465 // candidate read found
1471 // if no read to an available rank is found then return
1472 // at this point. There could be writes to the available ranks
1473 // which are above the required threshold. However, to
1474 // avoid adding more complexity to the code, return and wait
1475 // for a refresh event to kick things into action again.
1477 DPRINTF(DRAM
, "No Reads Found - exiting\n");
1481 auto dram_pkt
= *to_read
;
1483 assert(dram_pkt
->rankRef
.inRefIdleState());
1485 doDRAMAccess(dram_pkt
);
1487 // Every respQueue which will generate an event, increment count
1488 ++dram_pkt
->rankRef
.outstandingEvents
;
1490 assert(dram_pkt
->size
<= burstSize
);
1491 assert(dram_pkt
->readyTime
>= curTick());
1494 logResponse(MemCtrl::READ
, (*to_read
)->masterId(),
1495 dram_pkt
->qosValue(), dram_pkt
->getAddr(), 1,
1496 dram_pkt
->readyTime
- dram_pkt
->entryTime
);
1499 // Insert into response queue. It will be sent back to the
1500 // requester at its readyTime
1501 if (respQueue
.empty()) {
1502 assert(!respondEvent
.scheduled());
1503 schedule(respondEvent
, dram_pkt
->readyTime
);
1505 assert(respQueue
.back()->readyTime
<= dram_pkt
->readyTime
);
1506 assert(respondEvent
.scheduled());
1509 respQueue
.push_back(dram_pkt
);
1511 // we have so many writes that we have to transition
1512 if (totalWriteQueueSize
> writeHighThreshold
) {
1513 switch_to_writes
= true;
1516 // remove the request from the queue - the iterator is no longer valid .
1517 readQueue
[dram_pkt
->qosValue()].erase(to_read
);
1520 // switching to writes, either because the read queue is empty
1521 // and the writes have passed the low threshold (or we are
1522 // draining), or because the writes hit the hight threshold
1523 if (switch_to_writes
) {
1524 // transition to writing
1525 busStateNext
= WRITE
;
1529 bool write_found
= false;
1530 DRAMPacketQueue::iterator to_write
;
1531 uint8_t prio
= numPriorities();
1533 for (auto queue
= writeQueue
.rbegin();
1534 queue
!= writeQueue
.rend(); ++queue
) {
1539 "DRAM controller checking WRITE queue [%d] priority [%d elements]\n",
1540 prio
, queue
->size());
1542 // If we are changing command type, incorporate the minimum
1543 // bus turnaround delay
1544 to_write
= chooseNext((*queue
),
1545 switched_cmd_type
? std::min(tRTW
, tCS
) : 0);
1547 if (to_write
!= queue
->end()) {
1553 // if there are no writes to a rank that is available to service
1554 // requests (i.e. rank is in refresh idle state) are found then
1555 // return. There could be reads to the available ranks. However, to
1556 // avoid adding more complexity to the code, return at this point and
1557 // wait for a refresh event to kick things into action again.
1559 DPRINTF(DRAM
, "No Writes Found - exiting\n");
1563 auto dram_pkt
= *to_write
;
1565 assert(dram_pkt
->rankRef
.inRefIdleState());
1567 assert(dram_pkt
->size
<= burstSize
);
1569 doDRAMAccess(dram_pkt
);
1571 // removed write from queue, decrement count
1572 --dram_pkt
->rankRef
.writeEntries
;
1574 // Schedule write done event to decrement event count
1575 // after the readyTime has been reached
1576 // Only schedule latest write event to minimize events
1577 // required; only need to ensure that final event scheduled covers
1578 // the time that writes are outstanding and bus is active
1579 // to holdoff power-down entry events
1580 if (!dram_pkt
->rankRef
.writeDoneEvent
.scheduled()) {
1581 schedule(dram_pkt
->rankRef
.writeDoneEvent
, dram_pkt
->readyTime
);
1582 // New event, increment count
1583 ++dram_pkt
->rankRef
.outstandingEvents
;
1585 } else if (dram_pkt
->rankRef
.writeDoneEvent
.when() <
1586 dram_pkt
->readyTime
) {
1588 reschedule(dram_pkt
->rankRef
.writeDoneEvent
, dram_pkt
->readyTime
);
1591 isInWriteQueue
.erase(burstAlign(dram_pkt
->addr
));
1594 logResponse(MemCtrl::WRITE
, dram_pkt
->masterId(),
1595 dram_pkt
->qosValue(), dram_pkt
->getAddr(), 1,
1596 dram_pkt
->readyTime
- dram_pkt
->entryTime
);
1599 // remove the request from the queue - the iterator is no longer valid
1600 writeQueue
[dram_pkt
->qosValue()].erase(to_write
);
1604 // If we emptied the write queue, or got sufficiently below the
1605 // threshold (using the minWritesPerSwitch as the hysteresis) and
1606 // are not draining, or we have reads waiting and have done enough
1607 // writes, then switch to reads.
1608 bool below_threshold
=
1609 totalWriteQueueSize
+ minWritesPerSwitch
< writeLowThreshold
;
1611 if (totalWriteQueueSize
== 0 ||
1612 (below_threshold
&& drainState() != DrainState::Draining
) ||
1613 (totalReadQueueSize
&& writesThisTime
>= minWritesPerSwitch
)) {
1615 // turn the bus back around for reads again
1616 busStateNext
= READ
;
1618 // note that the we switch back to reads also in the idle
1619 // case, which eventually will check for any draining and
1620 // also pause any further scheduling if there is really
1624 // It is possible that a refresh to another rank kicks things back into
1625 // action before reaching this point.
1626 if (!nextReqEvent
.scheduled())
1627 schedule(nextReqEvent
, std::max(nextReqTime
, curTick()));
1629 // If there is space available and we have writes waiting then let
1630 // them retry. This is done here to ensure that the retry does not
1631 // cause a nextReqEvent to be scheduled before we do so as part of
1632 // the next request processing
1633 if (retryWrReq
&& totalWriteQueueSize
< writeBufferSize
) {
1635 port
.sendRetryReq();
1639 pair
<vector
<uint32_t>, bool>
1640 DRAMCtrl::minBankPrep(const DRAMPacketQueue
& queue
,
1641 Tick min_col_at
) const
1643 Tick min_act_at
= MaxTick
;
1644 vector
<uint32_t> bank_mask(ranksPerChannel
, 0);
1646 // latest Tick for which ACT can occur without incurring additoinal
1647 // delay on the data bus
1648 const Tick hidden_act_max
= std::max(min_col_at
- tRCD
, curTick());
1650 // Flag condition when burst can issue back-to-back with previous burst
1651 bool found_seamless_bank
= false;
1653 // Flag condition when bank can be opened without incurring additional
1654 // delay on the data bus
1655 bool hidden_bank_prep
= false;
1657 // determine if we have queued transactions targetting the
1659 vector
<bool> got_waiting(ranksPerChannel
* banksPerRank
, false);
1660 for (const auto& p
: queue
) {
1661 if (p
->rankRef
.inRefIdleState())
1662 got_waiting
[p
->bankId
] = true;
1665 // Find command with optimal bank timing
1666 // Will prioritize commands that can issue seamlessly.
1667 for (int i
= 0; i
< ranksPerChannel
; i
++) {
1668 for (int j
= 0; j
< banksPerRank
; j
++) {
1669 uint16_t bank_id
= i
* banksPerRank
+ j
;
1671 // if we have waiting requests for the bank, and it is
1672 // amongst the first available, update the mask
1673 if (got_waiting
[bank_id
]) {
1674 // make sure this rank is not currently refreshing.
1675 assert(ranks
[i
]->inRefIdleState());
1676 // simplistic approximation of when the bank can issue
1677 // an activate, ignoring any rank-to-rank switching
1678 // cost in this calculation
1679 Tick act_at
= ranks
[i
]->banks
[j
].openRow
== Bank::NO_ROW
?
1680 std::max(ranks
[i
]->banks
[j
].actAllowedAt
, curTick()) :
1681 std::max(ranks
[i
]->banks
[j
].preAllowedAt
, curTick()) + tRP
;
1683 // When is the earliest the R/W burst can issue?
1684 const Tick col_allowed_at
= (busState
== READ
) ?
1685 ranks
[i
]->banks
[j
].rdAllowedAt
:
1686 ranks
[i
]->banks
[j
].wrAllowedAt
;
1687 Tick col_at
= std::max(col_allowed_at
, act_at
+ tRCD
);
1689 // bank can issue burst back-to-back (seamlessly) with
1691 bool new_seamless_bank
= col_at
<= min_col_at
;
1693 // if we found a new seamless bank or we have no
1694 // seamless banks, and got a bank with an earlier
1695 // activate time, it should be added to the bit mask
1696 if (new_seamless_bank
||
1697 (!found_seamless_bank
&& act_at
<= min_act_at
)) {
1698 // if we did not have a seamless bank before, and
1699 // we do now, reset the bank mask, also reset it
1700 // if we have not yet found a seamless bank and
1701 // the activate time is smaller than what we have
1703 if (!found_seamless_bank
&&
1704 (new_seamless_bank
|| act_at
< min_act_at
)) {
1705 std::fill(bank_mask
.begin(), bank_mask
.end(), 0);
1708 found_seamless_bank
|= new_seamless_bank
;
1710 // ACT can occur 'behind the scenes'
1711 hidden_bank_prep
= act_at
<= hidden_act_max
;
1713 // set the bit corresponding to the available bank
1714 replaceBits(bank_mask
[i
], j
, j
, 1);
1715 min_act_at
= act_at
;
1721 return make_pair(bank_mask
, hidden_bank_prep
);
1724 DRAMCtrl::Rank::Rank(DRAMCtrl
& _memory
, const DRAMCtrlParams
* _p
, int rank
)
1725 : EventManager(&_memory
), memory(_memory
),
1726 pwrStateTrans(PWR_IDLE
), pwrStatePostRefresh(PWR_IDLE
),
1727 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE
),
1728 refreshState(REF_IDLE
), inLowPowerState(false), rank(rank
),
1729 readEntries(0), writeEntries(0), outstandingEvents(0),
1730 wakeUpAllowedAt(0), power(_p
, false), banks(_p
->banks_per_rank
),
1731 numBanksActive(0), actTicks(_p
->activation_limit
, 0),
1732 writeDoneEvent([this]{ processWriteDoneEvent(); }, name()),
1733 activateEvent([this]{ processActivateEvent(); }, name()),
1734 prechargeEvent([this]{ processPrechargeEvent(); }, name()),
1735 refreshEvent([this]{ processRefreshEvent(); }, name()),
1736 powerEvent([this]{ processPowerEvent(); }, name()),
1737 wakeUpEvent([this]{ processWakeUpEvent(); }, name()),
1738 stats(_memory
, *this)
1740 for (int b
= 0; b
< _p
->banks_per_rank
; b
++) {
1742 // GDDR addressing of banks to BG is linear.
1743 // Here we assume that all DRAM generations address bank groups as
1745 if (_p
->bank_groups_per_rank
> 0) {
1746 // Simply assign lower bits to bank group in order to
1747 // rotate across bank groups as banks are incremented
1748 // e.g. with 4 banks per bank group and 16 banks total:
1749 // banks 0,4,8,12 are in bank group 0
1750 // banks 1,5,9,13 are in bank group 1
1751 // banks 2,6,10,14 are in bank group 2
1752 // banks 3,7,11,15 are in bank group 3
1753 banks
[b
].bankgr
= b
% _p
->bank_groups_per_rank
;
1755 // No bank groups; simply assign to bank number
1756 banks
[b
].bankgr
= b
;
1762 DRAMCtrl::Rank::startup(Tick ref_tick
)
1764 assert(ref_tick
> curTick());
1766 pwrStateTick
= curTick();
1768 // kick off the refresh, and give ourselves enough time to
1770 schedule(refreshEvent
, ref_tick
);
1774 DRAMCtrl::Rank::suspend()
1776 deschedule(refreshEvent
);
1781 // don't automatically transition back to LP state after next REF
1782 pwrStatePostRefresh
= PWR_IDLE
;
1786 DRAMCtrl::Rank::isQueueEmpty() const
1788 // check commmands in Q based on current bus direction
1789 bool no_queued_cmds
= ((memory
.busStateNext
== READ
) && (readEntries
== 0))
1790 || ((memory
.busStateNext
== WRITE
) &&
1791 (writeEntries
== 0));
1792 return no_queued_cmds
;
1796 DRAMCtrl::Rank::checkDrainDone()
1798 // if this rank was waiting to drain it is now able to proceed to
1800 if (refreshState
== REF_DRAIN
) {
1801 DPRINTF(DRAM
, "Refresh drain done, now precharging\n");
1803 refreshState
= REF_PD_EXIT
;
1805 // hand control back to the refresh event loop
1806 schedule(refreshEvent
, curTick());
1811 DRAMCtrl::Rank::flushCmdList()
1813 // at the moment sort the list of commands and update the counters
1814 // for DRAMPower libray when doing a refresh
1815 sort(cmdList
.begin(), cmdList
.end(), DRAMCtrl::sortTime
);
1817 auto next_iter
= cmdList
.begin();
1818 // push to commands to DRAMPower
1819 for ( ; next_iter
!= cmdList
.end() ; ++next_iter
) {
1820 Command cmd
= *next_iter
;
1821 if (cmd
.timeStamp
<= curTick()) {
1822 // Move all commands at or before curTick to DRAMPower
1823 power
.powerlib
.doCommand(cmd
.type
, cmd
.bank
,
1824 divCeil(cmd
.timeStamp
, memory
.tCK
) -
1825 memory
.timeStampOffset
);
1827 // done - found all commands at or before curTick()
1828 // next_iter references the 1st command after curTick
1832 // reset cmdList to only contain commands after curTick
1833 // if there are no commands after curTick, updated cmdList will be empty
1834 // in this case, next_iter is cmdList.end()
1835 cmdList
.assign(next_iter
, cmdList
.end());
1839 DRAMCtrl::Rank::processActivateEvent()
1841 // we should transition to the active state as soon as any bank is active
1842 if (pwrState
!= PWR_ACT
)
1843 // note that at this point numBanksActive could be back at
1844 // zero again due to a precharge scheduled in the future
1845 schedulePowerEvent(PWR_ACT
, curTick());
1849 DRAMCtrl::Rank::processPrechargeEvent()
1851 // counter should at least indicate one outstanding request
1852 // for this precharge
1853 assert(outstandingEvents
> 0);
1854 // precharge complete, decrement count
1855 --outstandingEvents
;
1857 // if we reached zero, then special conditions apply as we track
1858 // if all banks are precharged for the power models
1859 if (numBanksActive
== 0) {
1860 // no reads to this rank in the Q and no pending
1861 // RD/WR or refresh commands
1862 if (isQueueEmpty() && outstandingEvents
== 0 &&
1863 memory
.enableDRAMPowerdown
) {
1864 // should still be in ACT state since bank still open
1865 assert(pwrState
== PWR_ACT
);
1867 // All banks closed - switch to precharge power down state.
1868 DPRINTF(DRAMState
, "Rank %d sleep at tick %d\n",
1870 powerDownSleep(PWR_PRE_PDN
, curTick());
1872 // we should transition to the idle state when the last bank
1874 schedulePowerEvent(PWR_IDLE
, curTick());
1880 DRAMCtrl::Rank::processWriteDoneEvent()
1882 // counter should at least indicate one outstanding request
1884 assert(outstandingEvents
> 0);
1885 // Write transfer on bus has completed
1886 // decrement per rank counter
1887 --outstandingEvents
;
1891 DRAMCtrl::Rank::processRefreshEvent()
1893 // when first preparing the refresh, remember when it was due
1894 if ((refreshState
== REF_IDLE
) || (refreshState
== REF_SREF_EXIT
)) {
1895 // remember when the refresh is due
1896 refreshDueAt
= curTick();
1899 refreshState
= REF_DRAIN
;
1901 // make nonzero while refresh is pending to ensure
1902 // power down and self-refresh are not entered
1903 ++outstandingEvents
;
1905 DPRINTF(DRAM
, "Refresh due\n");
1908 // let any scheduled read or write to the same rank go ahead,
1909 // after which it will
1910 // hand control back to this event loop
1911 if (refreshState
== REF_DRAIN
) {
1912 // if a request is at the moment being handled and this request is
1913 // accessing the current rank then wait for it to finish
1914 if ((rank
== memory
.activeRank
)
1915 && (memory
.nextReqEvent
.scheduled())) {
1916 // hand control over to the request loop until it is
1918 DPRINTF(DRAM
, "Refresh awaiting draining\n");
1922 refreshState
= REF_PD_EXIT
;
1926 // at this point, ensure that rank is not in a power-down state
1927 if (refreshState
== REF_PD_EXIT
) {
1928 // if rank was sleeping and we have't started exit process,
1929 // wake-up for refresh
1930 if (inLowPowerState
) {
1931 DPRINTF(DRAM
, "Wake Up for refresh\n");
1932 // save state and return after refresh completes
1933 scheduleWakeUpEvent(memory
.tXP
);
1936 refreshState
= REF_PRE
;
1940 // at this point, ensure that all banks are precharged
1941 if (refreshState
== REF_PRE
) {
1942 // precharge any active bank
1943 if (numBanksActive
!= 0) {
1944 // at the moment, we use a precharge all even if there is
1945 // only a single bank open
1946 DPRINTF(DRAM
, "Precharging all\n");
1948 // first determine when we can precharge
1949 Tick pre_at
= curTick();
1951 for (auto &b
: banks
) {
1952 // respect both causality and any existing bank
1953 // constraints, some banks could already have a
1954 // (auto) precharge scheduled
1955 pre_at
= std::max(b
.preAllowedAt
, pre_at
);
1958 // make sure all banks per rank are precharged, and for those that
1959 // already are, update their availability
1960 Tick act_allowed_at
= pre_at
+ memory
.tRP
;
1962 for (auto &b
: banks
) {
1963 if (b
.openRow
!= Bank::NO_ROW
) {
1964 memory
.prechargeBank(*this, b
, pre_at
, false);
1966 b
.actAllowedAt
= std::max(b
.actAllowedAt
, act_allowed_at
);
1967 b
.preAllowedAt
= std::max(b
.preAllowedAt
, pre_at
);
1971 // precharge all banks in rank
1972 cmdList
.push_back(Command(MemCommand::PREA
, 0, pre_at
));
1974 DPRINTF(DRAMPower
, "%llu,PREA,0,%d\n",
1975 divCeil(pre_at
, memory
.tCK
) -
1976 memory
.timeStampOffset
, rank
);
1977 } else if ((pwrState
== PWR_IDLE
) && (outstandingEvents
== 1)) {
1978 // Banks are closed, have transitioned to IDLE state, and
1979 // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1980 DPRINTF(DRAM
, "All banks already precharged, starting refresh\n");
1982 // go ahead and kick the power state machine into gear since
1983 // we are already idle
1984 schedulePowerEvent(PWR_REF
, curTick());
1986 // banks state is closed but haven't transitioned pwrState to IDLE
1987 // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1988 // should have outstanding precharge event in this case
1989 assert(prechargeEvent
.scheduled());
1990 // will start refresh when pwrState transitions to IDLE
1993 assert(numBanksActive
== 0);
1995 // wait for all banks to be precharged, at which point the
1996 // power state machine will transition to the idle state, and
1997 // automatically move to a refresh, at that point it will also
1998 // call this method to get the refresh event loop going again
2002 // last but not least we perform the actual refresh
2003 if (refreshState
== REF_START
) {
2004 // should never get here with any banks active
2005 assert(numBanksActive
== 0);
2006 assert(pwrState
== PWR_REF
);
2008 Tick ref_done_at
= curTick() + memory
.tRFC
;
2010 for (auto &b
: banks
) {
2011 b
.actAllowedAt
= ref_done_at
;
2014 // at the moment this affects all ranks
2015 cmdList
.push_back(Command(MemCommand::REF
, 0, curTick()));
2020 DPRINTF(DRAMPower
, "%llu,REF,0,%d\n", divCeil(curTick(), memory
.tCK
) -
2021 memory
.timeStampOffset
, rank
);
2023 // Update for next refresh
2024 refreshDueAt
+= memory
.tREFI
;
2026 // make sure we did not wait so long that we cannot make up
2028 if (refreshDueAt
< ref_done_at
) {
2029 fatal("Refresh was delayed so long we cannot catch up\n");
2032 // Run the refresh and schedule event to transition power states
2033 // when refresh completes
2034 refreshState
= REF_RUN
;
2035 schedule(refreshEvent
, ref_done_at
);
2039 if (refreshState
== REF_RUN
) {
2040 // should never get here with any banks active
2041 assert(numBanksActive
== 0);
2042 assert(pwrState
== PWR_REF
);
2044 assert(!powerEvent
.scheduled());
2046 if ((memory
.drainState() == DrainState::Draining
) ||
2047 (memory
.drainState() == DrainState::Drained
)) {
2048 // if draining, do not re-enter low-power mode.
2049 // simply go to IDLE and wait
2050 schedulePowerEvent(PWR_IDLE
, curTick());
2052 // At the moment, we sleep when the refresh ends and wait to be
2053 // woken up again if previously in a low-power state.
2054 if (pwrStatePostRefresh
!= PWR_IDLE
) {
2055 // power State should be power Refresh
2056 assert(pwrState
== PWR_REF
);
2057 DPRINTF(DRAMState
, "Rank %d sleeping after refresh and was in "
2058 "power state %d before refreshing\n", rank
,
2059 pwrStatePostRefresh
);
2060 powerDownSleep(pwrState
, curTick());
2062 // Force PRE power-down if there are no outstanding commands
2063 // in Q after refresh.
2064 } else if (isQueueEmpty() && memory
.enableDRAMPowerdown
) {
2065 // still have refresh event outstanding but there should
2066 // be no other events outstanding
2067 assert(outstandingEvents
== 1);
2068 DPRINTF(DRAMState
, "Rank %d sleeping after refresh but was NOT"
2069 " in a low power state before refreshing\n", rank
);
2070 powerDownSleep(PWR_PRE_PDN
, curTick());
2073 // move to the idle power state once the refresh is done, this
2074 // will also move the refresh state machine to the refresh
2076 schedulePowerEvent(PWR_IDLE
, curTick());
2080 // At this point, we have completed the current refresh.
2081 // In the SREF bypass case, we do not get to this state in the
2082 // refresh STM and therefore can always schedule next event.
2083 // Compensate for the delay in actually performing the refresh
2084 // when scheduling the next one
2085 schedule(refreshEvent
, refreshDueAt
- memory
.tRP
);
2087 DPRINTF(DRAMState
, "Refresh done at %llu and next refresh"
2088 " at %llu\n", curTick(), refreshDueAt
);
2093 DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state
, Tick tick
)
2095 // respect causality
2096 assert(tick
>= curTick());
2098 if (!powerEvent
.scheduled()) {
2099 DPRINTF(DRAMState
, "Scheduling power event at %llu to state %d\n",
2102 // insert the new transition
2103 pwrStateTrans
= pwr_state
;
2105 schedule(powerEvent
, tick
);
2107 panic("Scheduled power event at %llu to state %d, "
2108 "with scheduled event at %llu to %d\n", tick
, pwr_state
,
2109 powerEvent
.when(), pwrStateTrans
);
2114 DRAMCtrl::Rank::powerDownSleep(PowerState pwr_state
, Tick tick
)
2116 // if low power state is active low, schedule to active low power state.
2117 // in reality tCKE is needed to enter active low power. This is neglected
2118 // here and could be added in the future.
2119 if (pwr_state
== PWR_ACT_PDN
) {
2120 schedulePowerEvent(pwr_state
, tick
);
2121 // push command to DRAMPower
2122 cmdList
.push_back(Command(MemCommand::PDN_F_ACT
, 0, tick
));
2123 DPRINTF(DRAMPower
, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick
,
2124 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2125 } else if (pwr_state
== PWR_PRE_PDN
) {
2126 // if low power state is precharge low, schedule to precharge low
2127 // power state. In reality tCKE is needed to enter active low power.
2128 // This is neglected here.
2129 schedulePowerEvent(pwr_state
, tick
);
2130 //push Command to DRAMPower
2131 cmdList
.push_back(Command(MemCommand::PDN_F_PRE
, 0, tick
));
2132 DPRINTF(DRAMPower
, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick
,
2133 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2134 } else if (pwr_state
== PWR_REF
) {
2135 // if a refresh just occurred
2136 // transition to PRE_PDN now that all banks are closed
2137 // precharge power down requires tCKE to enter. For simplicity
2138 // this is not considered.
2139 schedulePowerEvent(PWR_PRE_PDN
, tick
);
2140 //push Command to DRAMPower
2141 cmdList
.push_back(Command(MemCommand::PDN_F_PRE
, 0, tick
));
2142 DPRINTF(DRAMPower
, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick
,
2143 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2144 } else if (pwr_state
== PWR_SREF
) {
2145 // should only enter SREF after PRE-PD wakeup to do a refresh
2146 assert(pwrStatePostRefresh
== PWR_PRE_PDN
);
2147 // self refresh requires time tCKESR to enter. For simplicity,
2148 // this is not considered.
2149 schedulePowerEvent(PWR_SREF
, tick
);
2150 // push Command to DRAMPower
2151 cmdList
.push_back(Command(MemCommand::SREN
, 0, tick
));
2152 DPRINTF(DRAMPower
, "%llu,SREN,0,%d\n", divCeil(tick
,
2153 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2155 // Ensure that we don't power-down and back up in same tick
2156 // Once we commit to PD entry, do it and wait for at least 1tCK
2157 // This could be replaced with tCKE if/when that is added to the model
2158 wakeUpAllowedAt
= tick
+ memory
.tCK
;
2160 // Transitioning to a low power state, set flag
2161 inLowPowerState
= true;
2165 DRAMCtrl::Rank::scheduleWakeUpEvent(Tick exit_delay
)
2167 Tick wake_up_tick
= std::max(curTick(), wakeUpAllowedAt
);
2169 DPRINTF(DRAMState
, "Scheduling wake-up for rank %d at tick %d\n",
2170 rank
, wake_up_tick
);
2172 // if waking for refresh, hold previous state
2173 // else reset state back to IDLE
2174 if (refreshState
== REF_PD_EXIT
) {
2175 pwrStatePostRefresh
= pwrState
;
2177 // don't automatically transition back to LP state after next REF
2178 pwrStatePostRefresh
= PWR_IDLE
;
2181 // schedule wake-up with event to ensure entry has completed before
2182 // we try to wake-up
2183 schedule(wakeUpEvent
, wake_up_tick
);
2185 for (auto &b
: banks
) {
2186 // respect both causality and any existing bank
2187 // constraints, some banks could already have a
2188 // (auto) precharge scheduled
2189 b
.wrAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.wrAllowedAt
);
2190 b
.rdAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.rdAllowedAt
);
2191 b
.preAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.preAllowedAt
);
2192 b
.actAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.actAllowedAt
);
2194 // Transitioning out of low power state, clear flag
2195 inLowPowerState
= false;
2197 // push to DRAMPower
2198 // use pwrStateTrans for cases where we have a power event scheduled
2199 // to enter low power that has not yet been processed
2200 if (pwrStateTrans
== PWR_ACT_PDN
) {
2201 cmdList
.push_back(Command(MemCommand::PUP_ACT
, 0, wake_up_tick
));
2202 DPRINTF(DRAMPower
, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick
,
2203 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2205 } else if (pwrStateTrans
== PWR_PRE_PDN
) {
2206 cmdList
.push_back(Command(MemCommand::PUP_PRE
, 0, wake_up_tick
));
2207 DPRINTF(DRAMPower
, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick
,
2208 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2209 } else if (pwrStateTrans
== PWR_SREF
) {
2210 cmdList
.push_back(Command(MemCommand::SREX
, 0, wake_up_tick
));
2211 DPRINTF(DRAMPower
, "%llu,SREX,0,%d\n", divCeil(wake_up_tick
,
2212 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2217 DRAMCtrl::Rank::processWakeUpEvent()
2219 // Should be in a power-down or self-refresh state
2220 assert((pwrState
== PWR_ACT_PDN
) || (pwrState
== PWR_PRE_PDN
) ||
2221 (pwrState
== PWR_SREF
));
2223 // Check current state to determine transition state
2224 if (pwrState
== PWR_ACT_PDN
) {
2225 // banks still open, transition to PWR_ACT
2226 schedulePowerEvent(PWR_ACT
, curTick());
2228 // transitioning from a precharge power-down or self-refresh state
2229 // banks are closed - transition to PWR_IDLE
2230 schedulePowerEvent(PWR_IDLE
, curTick());
2235 DRAMCtrl::Rank::processPowerEvent()
2237 assert(curTick() >= pwrStateTick
);
2238 // remember where we were, and for how long
2239 Tick duration
= curTick() - pwrStateTick
;
2240 PowerState prev_state
= pwrState
;
2242 // update the accounting
2243 stats
.memoryStateTime
[prev_state
] += duration
;
2245 // track to total idle time
2246 if ((prev_state
== PWR_PRE_PDN
) || (prev_state
== PWR_ACT_PDN
) ||
2247 (prev_state
== PWR_SREF
)) {
2248 stats
.totalIdleTime
+= duration
;
2251 pwrState
= pwrStateTrans
;
2252 pwrStateTick
= curTick();
2254 // if rank was refreshing, make sure to start scheduling requests again
2255 if (prev_state
== PWR_REF
) {
2256 // bus IDLED prior to REF
2257 // counter should be one for refresh command only
2258 assert(outstandingEvents
== 1);
2259 // REF complete, decrement count and go back to IDLE
2260 --outstandingEvents
;
2261 refreshState
= REF_IDLE
;
2263 DPRINTF(DRAMState
, "Was refreshing for %llu ticks\n", duration
);
2264 // if moving back to power-down after refresh
2265 if (pwrState
!= PWR_IDLE
) {
2266 assert(pwrState
== PWR_PRE_PDN
);
2267 DPRINTF(DRAMState
, "Switching to power down state after refreshing"
2268 " rank %d at %llu tick\n", rank
, curTick());
2271 // completed refresh event, ensure next request is scheduled
2272 if (!memory
.nextReqEvent
.scheduled()) {
2273 DPRINTF(DRAM
, "Scheduling next request after refreshing"
2274 " rank %d\n", rank
);
2275 schedule(memory
.nextReqEvent
, curTick());
2279 if ((pwrState
== PWR_ACT
) && (refreshState
== REF_PD_EXIT
)) {
2280 // have exited ACT PD
2281 assert(prev_state
== PWR_ACT_PDN
);
2283 // go back to REF event and close banks
2284 refreshState
= REF_PRE
;
2285 schedule(refreshEvent
, curTick());
2286 } else if (pwrState
== PWR_IDLE
) {
2287 DPRINTF(DRAMState
, "All banks precharged\n");
2288 if (prev_state
== PWR_SREF
) {
2289 // set refresh state to REF_SREF_EXIT, ensuring inRefIdleState
2290 // continues to return false during tXS after SREF exit
2291 // Schedule a refresh which kicks things back into action
2293 refreshState
= REF_SREF_EXIT
;
2294 schedule(refreshEvent
, curTick() + memory
.tXS
);
2296 // if we have a pending refresh, and are now moving to
2297 // the idle state, directly transition to, or schedule refresh
2298 if ((refreshState
== REF_PRE
) || (refreshState
== REF_PD_EXIT
)) {
2299 // ensure refresh is restarted only after final PRE command.
2300 // do not restart refresh if controller is in an intermediate
2301 // state, after PRE_PDN exit, when banks are IDLE but an
2302 // ACT is scheduled.
2303 if (!activateEvent
.scheduled()) {
2304 // there should be nothing waiting at this point
2305 assert(!powerEvent
.scheduled());
2306 if (refreshState
== REF_PD_EXIT
) {
2307 // exiting PRE PD, will be in IDLE until tXP expires
2308 // and then should transition to PWR_REF state
2309 assert(prev_state
== PWR_PRE_PDN
);
2310 schedulePowerEvent(PWR_REF
, curTick() + memory
.tXP
);
2311 } else if (refreshState
== REF_PRE
) {
2312 // can directly move to PWR_REF state and proceed below
2316 // must have PRE scheduled to transition back to IDLE
2317 // and re-kick off refresh
2318 assert(prechargeEvent
.scheduled());
2324 // transition to the refresh state and re-start refresh process
2325 // refresh state machine will schedule the next power state transition
2326 if (pwrState
== PWR_REF
) {
2327 // completed final PRE for refresh or exiting power-down
2328 assert(refreshState
== REF_PRE
|| refreshState
== REF_PD_EXIT
);
2330 // exited PRE PD for refresh, with no pending commands
2331 // bypass auto-refresh and go straight to SREF, where memory
2332 // will issue refresh immediately upon entry
2333 if (pwrStatePostRefresh
== PWR_PRE_PDN
&& isQueueEmpty() &&
2334 (memory
.drainState() != DrainState::Draining
) &&
2335 (memory
.drainState() != DrainState::Drained
) &&
2336 memory
.enableDRAMPowerdown
) {
2337 DPRINTF(DRAMState
, "Rank %d bypassing refresh and transitioning "
2338 "to self refresh at %11u tick\n", rank
, curTick());
2339 powerDownSleep(PWR_SREF
, curTick());
2341 // Since refresh was bypassed, remove event by decrementing count
2342 assert(outstandingEvents
== 1);
2343 --outstandingEvents
;
2345 // reset state back to IDLE temporarily until SREF is entered
2346 pwrState
= PWR_IDLE
;
2348 // Not bypassing refresh for SREF entry
2350 DPRINTF(DRAMState
, "Refreshing\n");
2352 // there should be nothing waiting at this point
2353 assert(!powerEvent
.scheduled());
2355 // kick the refresh event loop into action again, and that
2356 // in turn will schedule a transition to the idle power
2357 // state once the refresh is done
2358 schedule(refreshEvent
, curTick());
2360 // Banks transitioned to IDLE, start REF
2361 refreshState
= REF_START
;
2368 DRAMCtrl::Rank::updatePowerStats()
2370 // All commands up to refresh have completed
2371 // flush cmdList to DRAMPower
2374 // Call the function that calculates window energy at intermediate update
2375 // events like at refresh, stats dump as well as at simulation exit.
2376 // Window starts at the last time the calcWindowEnergy function was called
2377 // and is upto current time.
2378 power
.powerlib
.calcWindowEnergy(divCeil(curTick(), memory
.tCK
) -
2379 memory
.timeStampOffset
);
2381 // Get the energy from DRAMPower
2382 Data::MemoryPowerModel::Energy energy
= power
.powerlib
.getEnergy();
2384 // The energy components inside the power lib are calculated over
2385 // the window so accumulate into the corresponding gem5 stat
2386 stats
.actEnergy
+= energy
.act_energy
* memory
.devicesPerRank
;
2387 stats
.preEnergy
+= energy
.pre_energy
* memory
.devicesPerRank
;
2388 stats
.readEnergy
+= energy
.read_energy
* memory
.devicesPerRank
;
2389 stats
.writeEnergy
+= energy
.write_energy
* memory
.devicesPerRank
;
2390 stats
.refreshEnergy
+= energy
.ref_energy
* memory
.devicesPerRank
;
2391 stats
.actBackEnergy
+= energy
.act_stdby_energy
* memory
.devicesPerRank
;
2392 stats
.preBackEnergy
+= energy
.pre_stdby_energy
* memory
.devicesPerRank
;
2393 stats
.actPowerDownEnergy
+= energy
.f_act_pd_energy
* memory
.devicesPerRank
;
2394 stats
.prePowerDownEnergy
+= energy
.f_pre_pd_energy
* memory
.devicesPerRank
;
2395 stats
.selfRefreshEnergy
+= energy
.sref_energy
* memory
.devicesPerRank
;
2397 // Accumulate window energy into the total energy.
2398 stats
.totalEnergy
+= energy
.window_energy
* memory
.devicesPerRank
;
2399 // Average power must not be accumulated but calculated over the time
2400 // since last stats reset. SimClock::Frequency is tick period not tick
2403 // power (mW) = ----------- * ----------
2404 // time (tick) tick_frequency
2405 stats
.averagePower
= (stats
.totalEnergy
.value() /
2406 (curTick() - memory
.lastStatsResetTick
)) *
2407 (SimClock::Frequency
/ 1000000000.0);
2411 DRAMCtrl::Rank::computeStats()
2413 DPRINTF(DRAM
,"Computing stats due to a dump callback\n");
2418 // final update of power state times
2419 stats
.memoryStateTime
[pwrState
] += (curTick() - pwrStateTick
);
2420 pwrStateTick
= curTick();
2424 DRAMCtrl::Rank::resetStats() {
2425 // The only way to clear the counters in DRAMPower is to call
2426 // calcWindowEnergy function as that then calls clearCounters. The
2427 // clearCounters method itself is private.
2428 power
.powerlib
.calcWindowEnergy(divCeil(curTick(), memory
.tCK
) -
2429 memory
.timeStampOffset
);
2433 DRAMCtrl::DRAMStats::DRAMStats(DRAMCtrl
&_dram
)
2434 : Stats::Group(&_dram
),
2437 ADD_STAT(readReqs
, "Number of read requests accepted"),
2438 ADD_STAT(writeReqs
, "Number of write requests accepted"),
2440 ADD_STAT(readBursts
,
2441 "Number of DRAM read bursts, "
2442 "including those serviced by the write queue"),
2443 ADD_STAT(writeBursts
,
2444 "Number of DRAM write bursts, "
2445 "including those merged in the write queue"),
2446 ADD_STAT(servicedByWrQ
,
2447 "Number of DRAM read bursts serviced by the write queue"),
2448 ADD_STAT(mergedWrBursts
,
2449 "Number of DRAM write bursts merged with an existing one"),
2451 ADD_STAT(neitherReadNorWriteReqs
,
2452 "Number of requests that are neither read nor write"),
2454 ADD_STAT(perBankRdBursts
, "Per bank write bursts"),
2455 ADD_STAT(perBankWrBursts
, "Per bank write bursts"),
2457 ADD_STAT(avgRdQLen
, "Average read queue length when enqueuing"),
2458 ADD_STAT(avgWrQLen
, "Average write queue length when enqueuing"),
2460 ADD_STAT(totQLat
, "Total ticks spent queuing"),
2461 ADD_STAT(totBusLat
, "Total ticks spent in databus transfers"),
2462 ADD_STAT(totMemAccLat
,
2463 "Total ticks spent from burst creation until serviced "
2465 ADD_STAT(avgQLat
, "Average queueing delay per DRAM burst"),
2466 ADD_STAT(avgBusLat
, "Average bus latency per DRAM burst"),
2467 ADD_STAT(avgMemAccLat
, "Average memory access latency per DRAM burst"),
2469 ADD_STAT(numRdRetry
, "Number of times read queue was full causing retry"),
2470 ADD_STAT(numWrRetry
, "Number of times write queue was full causing retry"),
2472 ADD_STAT(readRowHits
, "Number of row buffer hits during reads"),
2473 ADD_STAT(writeRowHits
, "Number of row buffer hits during writes"),
2474 ADD_STAT(readRowHitRate
, "Row buffer hit rate for reads"),
2475 ADD_STAT(writeRowHitRate
, "Row buffer hit rate for writes"),
2477 ADD_STAT(readPktSize
, "Read request sizes (log2)"),
2478 ADD_STAT(writePktSize
, "Write request sizes (log2)"),
2480 ADD_STAT(rdQLenPdf
, "What read queue length does an incoming req see"),
2481 ADD_STAT(wrQLenPdf
, "What write queue length does an incoming req see"),
2483 ADD_STAT(bytesPerActivate
, "Bytes accessed per row activation"),
2485 ADD_STAT(rdPerTurnAround
,
2486 "Reads before turning the bus around for writes"),
2487 ADD_STAT(wrPerTurnAround
,
2488 "Writes before turning the bus around for reads"),
2490 ADD_STAT(bytesReadDRAM
, "Total number of bytes read from DRAM"),
2491 ADD_STAT(bytesReadWrQ
, "Total number of bytes read from write queue"),
2492 ADD_STAT(bytesWritten
, "Total number of bytes written to DRAM"),
2493 ADD_STAT(bytesReadSys
, "Total read bytes from the system interface side"),
2494 ADD_STAT(bytesWrittenSys
,
2495 "Total written bytes from the system interface side"),
2497 ADD_STAT(avgRdBW
, "Average DRAM read bandwidth in MiByte/s"),
2498 ADD_STAT(avgWrBW
, "Average achieved write bandwidth in MiByte/s"),
2499 ADD_STAT(avgRdBWSys
, "Average system read bandwidth in MiByte/s"),
2500 ADD_STAT(avgWrBWSys
, "Average system write bandwidth in MiByte/s"),
2501 ADD_STAT(peakBW
, "Theoretical peak bandwidth in MiByte/s"),
2503 ADD_STAT(busUtil
, "Data bus utilization in percentage"),
2504 ADD_STAT(busUtilRead
, "Data bus utilization in percentage for reads"),
2505 ADD_STAT(busUtilWrite
, "Data bus utilization in percentage for writes"),
2507 ADD_STAT(totGap
, "Total gap between requests"),
2508 ADD_STAT(avgGap
, "Average gap between requests"),
2510 ADD_STAT(masterReadBytes
, "Per-master bytes read from memory"),
2511 ADD_STAT(masterWriteBytes
, "Per-master bytes write to memory"),
2512 ADD_STAT(masterReadRate
,
2513 "Per-master bytes read from memory rate (Bytes/sec)"),
2514 ADD_STAT(masterWriteRate
,
2515 "Per-master bytes write to memory rate (Bytes/sec)"),
2516 ADD_STAT(masterReadAccesses
,
2517 "Per-master read serviced memory accesses"),
2518 ADD_STAT(masterWriteAccesses
,
2519 "Per-master write serviced memory accesses"),
2520 ADD_STAT(masterReadTotalLat
,
2521 "Per-master read total memory access latency"),
2522 ADD_STAT(masterWriteTotalLat
,
2523 "Per-master write total memory access latency"),
2524 ADD_STAT(masterReadAvgLat
,
2525 "Per-master read average memory access latency"),
2526 ADD_STAT(masterWriteAvgLat
,
2527 "Per-master write average memory access latency"),
2529 ADD_STAT(pageHitRate
, "Row buffer hit rate, read and write combined")
2534 DRAMCtrl::DRAMStats::regStats()
2536 using namespace Stats
;
2538 assert(dram
._system
);
2539 const auto max_masters
= dram
._system
->maxMasters();
2541 perBankRdBursts
.init(dram
.banksPerRank
* dram
.ranksPerChannel
);
2542 perBankWrBursts
.init(dram
.banksPerRank
* dram
.ranksPerChannel
);
2544 avgRdQLen
.precision(2);
2545 avgWrQLen
.precision(2);
2546 avgQLat
.precision(2);
2547 avgBusLat
.precision(2);
2548 avgMemAccLat
.precision(2);
2550 readRowHitRate
.precision(2);
2551 writeRowHitRate
.precision(2);
2553 readPktSize
.init(ceilLog2(dram
.burstSize
) + 1);
2554 writePktSize
.init(ceilLog2(dram
.burstSize
) + 1);
2556 rdQLenPdf
.init(dram
.readBufferSize
);
2557 wrQLenPdf
.init(dram
.writeBufferSize
);
2560 .init(dram
.maxAccessesPerRow
?
2561 dram
.maxAccessesPerRow
: dram
.rowBufferSize
)
2565 .init(dram
.readBufferSize
)
2568 .init(dram
.writeBufferSize
)
2571 avgRdBW
.precision(2);
2572 avgWrBW
.precision(2);
2573 avgRdBWSys
.precision(2);
2574 avgWrBWSys
.precision(2);
2575 peakBW
.precision(2);
2576 busUtil
.precision(2);
2577 avgGap
.precision(2);
2578 busUtilWrite
.precision(2);
2579 pageHitRate
.precision(2);
2582 // per-master bytes read and written to memory
2585 .flags(nozero
| nonan
);
2589 .flags(nozero
| nonan
);
2591 // per-master bytes read and written to memory rate
2593 .flags(nozero
| nonan
)
2606 .flags(nozero
| nonan
);
2617 .flags(nozero
| nonan
)
2622 .flags(nozero
| nonan
);
2628 for (int i
= 0; i
< max_masters
; i
++) {
2629 const std::string master
= dram
._system
->getMasterName(i
);
2630 masterReadBytes
.subname(i
, master
);
2631 masterReadRate
.subname(i
, master
);
2632 masterWriteBytes
.subname(i
, master
);
2633 masterWriteRate
.subname(i
, master
);
2634 masterReadAccesses
.subname(i
, master
);
2635 masterWriteAccesses
.subname(i
, master
);
2636 masterReadTotalLat
.subname(i
, master
);
2637 masterReadAvgLat
.subname(i
, master
);
2638 masterWriteTotalLat
.subname(i
, master
);
2639 masterWriteAvgLat
.subname(i
, master
);
2643 avgQLat
= totQLat
/ (readBursts
- servicedByWrQ
);
2644 avgBusLat
= totBusLat
/ (readBursts
- servicedByWrQ
);
2645 avgMemAccLat
= totMemAccLat
/ (readBursts
- servicedByWrQ
);
2647 readRowHitRate
= (readRowHits
/ (readBursts
- servicedByWrQ
)) * 100;
2648 writeRowHitRate
= (writeRowHits
/ (writeBursts
- mergedWrBursts
)) * 100;
2650 avgRdBW
= (bytesReadDRAM
/ 1000000) / simSeconds
;
2651 avgWrBW
= (bytesWritten
/ 1000000) / simSeconds
;
2652 avgRdBWSys
= (bytesReadSys
/ 1000000) / simSeconds
;
2653 avgWrBWSys
= (bytesWrittenSys
/ 1000000) / simSeconds
;
2654 peakBW
= (SimClock::Frequency
/ dram
.tBURST
) * dram
.burstSize
/ 1000000;
2656 busUtil
= (avgRdBW
+ avgWrBW
) / peakBW
* 100;
2658 avgGap
= totGap
/ (readReqs
+ writeReqs
);
2660 busUtilRead
= avgRdBW
/ peakBW
* 100;
2661 busUtilWrite
= avgWrBW
/ peakBW
* 100;
2663 pageHitRate
= (writeRowHits
+ readRowHits
) /
2664 (writeBursts
- mergedWrBursts
+ readBursts
- servicedByWrQ
) * 100;
2666 masterReadRate
= masterReadBytes
/ simSeconds
;
2667 masterWriteRate
= masterWriteBytes
/ simSeconds
;
2668 masterReadAvgLat
= masterReadTotalLat
/ masterReadAccesses
;
2669 masterWriteAvgLat
= masterWriteTotalLat
/ masterWriteAccesses
;
2673 DRAMCtrl::DRAMStats::resetStats()
2675 dram
.lastStatsResetTick
= curTick();
2678 DRAMCtrl::RankStats::RankStats(DRAMCtrl
&_memory
, Rank
&_rank
)
2679 : Stats::Group(&_memory
, csprintf("rank%d", _rank
.rank
).c_str()),
2682 ADD_STAT(actEnergy
, "Energy for activate commands per rank (pJ)"),
2683 ADD_STAT(preEnergy
, "Energy for precharge commands per rank (pJ)"),
2684 ADD_STAT(readEnergy
, "Energy for read commands per rank (pJ)"),
2685 ADD_STAT(writeEnergy
, "Energy for write commands per rank (pJ)"),
2686 ADD_STAT(refreshEnergy
, "Energy for refresh commands per rank (pJ)"),
2687 ADD_STAT(actBackEnergy
, "Energy for active background per rank (pJ)"),
2688 ADD_STAT(preBackEnergy
, "Energy for precharge background per rank (pJ)"),
2689 ADD_STAT(actPowerDownEnergy
,
2690 "Energy for active power-down per rank (pJ)"),
2691 ADD_STAT(prePowerDownEnergy
,
2692 "Energy for precharge power-down per rank (pJ)"),
2693 ADD_STAT(selfRefreshEnergy
, "Energy for self refresh per rank (pJ)"),
2695 ADD_STAT(totalEnergy
, "Total energy per rank (pJ)"),
2696 ADD_STAT(averagePower
, "Core power per rank (mW)"),
2698 ADD_STAT(totalIdleTime
, "Total Idle time Per DRAM Rank"),
2699 ADD_STAT(memoryStateTime
, "Time in different power states")
2704 DRAMCtrl::RankStats::regStats()
2706 Stats::Group::regStats();
2708 memoryStateTime
.init(6);
2709 memoryStateTime
.subname(0, "IDLE");
2710 memoryStateTime
.subname(1, "REF");
2711 memoryStateTime
.subname(2, "SREF");
2712 memoryStateTime
.subname(3, "PRE_PDN");
2713 memoryStateTime
.subname(4, "ACT");
2714 memoryStateTime
.subname(5, "ACT_PDN");
2718 DRAMCtrl::RankStats::resetStats()
2720 Stats::Group::resetStats();
2726 DRAMCtrl::RankStats::preDumpStats()
2728 Stats::Group::preDumpStats();
2730 rank
.computeStats();
2734 DRAMCtrl::recvFunctional(PacketPtr pkt
)
2736 // rely on the abstract memory
2737 functionalAccess(pkt
);
2741 DRAMCtrl::getPort(const string
&if_name
, PortID idx
)
2743 if (if_name
!= "port") {
2744 return QoS::MemCtrl::getPort(if_name
, idx
);
2753 // if there is anything in any of our internal queues, keep track
2755 if (!(!totalWriteQueueSize
&& !totalReadQueueSize
&& respQueue
.empty() &&
2756 allRanksDrained())) {
2758 DPRINTF(Drain
, "DRAM controller not drained, write: %d, read: %d,"
2759 " resp: %d\n", totalWriteQueueSize
, totalReadQueueSize
,
2762 // the only queue that is not drained automatically over time
2763 // is the write queue, thus kick things into action if needed
2764 if (!totalWriteQueueSize
&& !nextReqEvent
.scheduled()) {
2765 schedule(nextReqEvent
, curTick());
2768 // also need to kick off events to exit self-refresh
2769 for (auto r
: ranks
) {
2770 // force self-refresh exit, which in turn will issue auto-refresh
2771 if (r
->pwrState
== PWR_SREF
) {
2772 DPRINTF(DRAM
,"Rank%d: Forcing self-refresh wakeup in drain\n",
2774 r
->scheduleWakeUpEvent(tXS
);
2778 return DrainState::Draining
;
2780 return DrainState::Drained
;
2785 DRAMCtrl::allRanksDrained() const
2787 // true until proven false
2788 bool all_ranks_drained
= true;
2789 for (auto r
: ranks
) {
2790 // then verify that the power state is IDLE ensuring all banks are
2791 // closed and rank is not in a low power state. Also verify that rank
2792 // is idle from a refresh point of view.
2793 all_ranks_drained
= r
->inPwrIdleState() && r
->inRefIdleState() &&
2796 return all_ranks_drained
;
2800 DRAMCtrl::drainResume()
2802 if (!isTimingMode
&& system()->isTimingMode()) {
2803 // if we switched to timing mode, kick things into action,
2804 // and behave as if we restored from a checkpoint
2806 } else if (isTimingMode
&& !system()->isTimingMode()) {
2807 // if we switch from timing mode, stop the refresh events to
2808 // not cause issues with KVM
2809 for (auto r
: ranks
) {
2815 isTimingMode
= system()->isTimingMode();
2818 DRAMCtrl::MemoryPort::MemoryPort(const std::string
& name
, DRAMCtrl
& _memory
)
2819 : QueuedSlavePort(name
, &_memory
, queue
), queue(_memory
, *this, true),
2824 DRAMCtrl::MemoryPort::getAddrRanges() const
2826 AddrRangeList ranges
;
2827 ranges
.push_back(memory
.getAddrRange());
2832 DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt
)
2834 pkt
->pushLabel(memory
.name());
2836 if (!queue
.trySatisfyFunctional(pkt
)) {
2837 // Default implementation of SimpleTimingPort::recvFunctional()
2838 // calls recvAtomic() and throws away the latency; we can save a
2839 // little here by just not calculating the latency.
2840 memory
.recvFunctional(pkt
);
2847 DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt
)
2849 return memory
.recvAtomic(pkt
);
2853 DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt
)
2855 // pass it to the memory controller
2856 return memory
.recvTimingReq(pkt
);
2860 DRAMCtrlParams::create()
2862 return new DRAMCtrl(this);