2 * Copyright (c) 2010-2017 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Andreas Hansson
47 #include "mem/dram_ctrl.hh"
49 #include "base/bitfield.hh"
50 #include "base/trace.hh"
51 #include "debug/DRAM.hh"
52 #include "debug/DRAMPower.hh"
53 #include "debug/DRAMState.hh"
54 #include "debug/Drain.hh"
55 #include "sim/system.hh"
60 DRAMCtrl::DRAMCtrl(const DRAMCtrlParams
* p
) :
62 port(name() + ".port", *this), isTimingMode(false),
63 retryRdReq(false), retryWrReq(false),
66 nextReqEvent(this), respondEvent(this),
67 deviceSize(p
->device_size
),
68 deviceBusWidth(p
->device_bus_width
), burstLength(p
->burst_length
),
69 deviceRowBufferSize(p
->device_rowbuffer_size
),
70 devicesPerRank(p
->devices_per_rank
),
71 burstSize((devicesPerRank
* burstLength
* deviceBusWidth
) / 8),
72 rowBufferSize(devicesPerRank
* deviceRowBufferSize
),
73 columnsPerRowBuffer(rowBufferSize
/ burstSize
),
74 columnsPerStripe(range
.interleaved() ? range
.granularity() / burstSize
: 1),
75 ranksPerChannel(p
->ranks_per_channel
),
76 bankGroupsPerRank(p
->bank_groups_per_rank
),
77 bankGroupArch(p
->bank_groups_per_rank
> 0),
78 banksPerRank(p
->banks_per_rank
), channels(p
->channels
), rowsPerBank(0),
79 readBufferSize(p
->read_buffer_size
),
80 writeBufferSize(p
->write_buffer_size
),
81 writeHighThreshold(writeBufferSize
* p
->write_high_thresh_perc
/ 100.0),
82 writeLowThreshold(writeBufferSize
* p
->write_low_thresh_perc
/ 100.0),
83 minWritesPerSwitch(p
->min_writes_per_switch
),
84 writesThisTime(0), readsThisTime(0),
85 tCK(p
->tCK
), tWTR(p
->tWTR
), tRTW(p
->tRTW
), tCS(p
->tCS
), tBURST(p
->tBURST
),
86 tCCD_L(p
->tCCD_L
), tRCD(p
->tRCD
), tCL(p
->tCL
), tRP(p
->tRP
), tRAS(p
->tRAS
),
87 tWR(p
->tWR
), tRTP(p
->tRTP
), tRFC(p
->tRFC
), tREFI(p
->tREFI
), tRRD(p
->tRRD
),
88 tRRD_L(p
->tRRD_L
), tXAW(p
->tXAW
), tXP(p
->tXP
), tXS(p
->tXS
),
89 activationLimit(p
->activation_limit
),
90 memSchedPolicy(p
->mem_sched_policy
), addrMapping(p
->addr_mapping
),
91 pageMgmt(p
->page_policy
),
92 maxAccessesPerRow(p
->max_accesses_per_row
),
93 frontendLatency(p
->static_frontend_latency
),
94 backendLatency(p
->static_backend_latency
),
95 busBusyUntil(0), prevArrival(0),
96 nextReqTime(0), activeRank(0), timeStampOffset(0)
98 // sanity check the ranks since we rely on bit slicing for the
100 fatal_if(!isPowerOf2(ranksPerChannel
), "DRAM rank count of %d is not "
101 "allowed, must be a power of two\n", ranksPerChannel
);
103 fatal_if(!isPowerOf2(burstSize
), "DRAM burst size %d is not allowed, "
104 "must be a power of two\n", burstSize
);
106 for (int i
= 0; i
< ranksPerChannel
; i
++) {
107 Rank
* rank
= new Rank(*this, p
);
108 ranks
.push_back(rank
);
110 rank
->actTicks
.resize(activationLimit
, 0);
111 rank
->banks
.resize(banksPerRank
);
114 for (int b
= 0; b
< banksPerRank
; b
++) {
115 rank
->banks
[b
].bank
= b
;
116 // GDDR addressing of banks to BG is linear.
117 // Here we assume that all DRAM generations address bank groups as
120 // Simply assign lower bits to bank group in order to
121 // rotate across bank groups as banks are incremented
122 // e.g. with 4 banks per bank group and 16 banks total:
123 // banks 0,4,8,12 are in bank group 0
124 // banks 1,5,9,13 are in bank group 1
125 // banks 2,6,10,14 are in bank group 2
126 // banks 3,7,11,15 are in bank group 3
127 rank
->banks
[b
].bankgr
= b
% bankGroupsPerRank
;
129 // No bank groups; simply assign to bank number
130 rank
->banks
[b
].bankgr
= b
;
135 // perform a basic check of the write thresholds
136 if (p
->write_low_thresh_perc
>= p
->write_high_thresh_perc
)
137 fatal("Write buffer low threshold %d must be smaller than the "
138 "high threshold %d\n", p
->write_low_thresh_perc
,
139 p
->write_high_thresh_perc
);
141 // determine the rows per bank by looking at the total capacity
142 uint64_t capacity
= ULL(1) << ceilLog2(AbstractMemory::size());
144 // determine the dram actual capacity from the DRAM config in Mbytes
145 uint64_t deviceCapacity
= deviceSize
/ (1024 * 1024) * devicesPerRank
*
148 // if actual DRAM size does not match memory capacity in system warn!
149 if (deviceCapacity
!= capacity
/ (1024 * 1024))
150 warn("DRAM device capacity (%d Mbytes) does not match the "
151 "address range assigned (%d Mbytes)\n", deviceCapacity
,
152 capacity
/ (1024 * 1024));
154 DPRINTF(DRAM
, "Memory capacity %lld (%lld) bytes\n", capacity
,
155 AbstractMemory::size());
157 DPRINTF(DRAM
, "Row buffer size %d bytes with %d columns per row buffer\n",
158 rowBufferSize
, columnsPerRowBuffer
);
160 rowsPerBank
= capacity
/ (rowBufferSize
* banksPerRank
* ranksPerChannel
);
162 // some basic sanity checks
163 if (tREFI
<= tRP
|| tREFI
<= tRFC
) {
164 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
168 // basic bank group architecture checks ->
170 // must have at least one bank per bank group
171 if (bankGroupsPerRank
> banksPerRank
) {
172 fatal("banks per rank (%d) must be equal to or larger than "
173 "banks groups per rank (%d)\n",
174 banksPerRank
, bankGroupsPerRank
);
176 // must have same number of banks in each bank group
177 if ((banksPerRank
% bankGroupsPerRank
) != 0) {
178 fatal("Banks per rank (%d) must be evenly divisible by bank groups "
179 "per rank (%d) for equal banks per bank group\n",
180 banksPerRank
, bankGroupsPerRank
);
182 // tCCD_L should be greater than minimal, back-to-back burst delay
183 if (tCCD_L
<= tBURST
) {
184 fatal("tCCD_L (%d) should be larger than tBURST (%d) when "
185 "bank groups per rank (%d) is greater than 1\n",
186 tCCD_L
, tBURST
, bankGroupsPerRank
);
188 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay
189 // some datasheets might specify it equal to tRRD
191 fatal("tRRD_L (%d) should be larger than tRRD (%d) when "
192 "bank groups per rank (%d) is greater than 1\n",
193 tRRD_L
, tRRD
, bankGroupsPerRank
);
202 AbstractMemory::init();
204 if (!port
.isConnected()) {
205 fatal("DRAMCtrl %s is unconnected!\n", name());
207 port
.sendRangeChange();
210 // a bit of sanity checks on the interleaving, save it for here to
211 // ensure that the system pointer is initialised
212 if (range
.interleaved()) {
213 if (channels
!= range
.stripes())
214 fatal("%s has %d interleaved address stripes but %d channel(s)\n",
215 name(), range
.stripes(), channels
);
217 if (addrMapping
== Enums::RoRaBaChCo
) {
218 if (rowBufferSize
!= range
.granularity()) {
219 fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
220 "address map\n", name());
222 } else if (addrMapping
== Enums::RoRaBaCoCh
||
223 addrMapping
== Enums::RoCoRaBaCh
) {
224 // for the interleavings with channel bits in the bottom,
225 // if the system uses a channel striping granularity that
226 // is larger than the DRAM burst size, then map the
227 // sequential accesses within a stripe to a number of
228 // columns in the DRAM, effectively placing some of the
229 // lower-order column bits as the least-significant bits
230 // of the address (above the ones denoting the burst size)
231 assert(columnsPerStripe
>= 1);
233 // channel striping has to be done at a granularity that
234 // is equal or larger to a cache line
235 if (system()->cacheLineSize() > range
.granularity()) {
236 fatal("Channel interleaving of %s must be at least as large "
237 "as the cache line size\n", name());
240 // ...and equal or smaller than the row-buffer size
241 if (rowBufferSize
< range
.granularity()) {
242 fatal("Channel interleaving of %s must be at most as large "
243 "as the row-buffer size\n", name());
245 // this is essentially the check above, so just to be sure
246 assert(columnsPerStripe
<= columnsPerRowBuffer
);
254 // remember the memory system mode of operation
255 isTimingMode
= system()->isTimingMode();
258 // timestamp offset should be in clock cycles for DRAMPower
259 timeStampOffset
= divCeil(curTick(), tCK
);
261 // update the start tick for the precharge accounting to the
263 for (auto r
: ranks
) {
264 r
->startup(curTick() + tREFI
- tRP
);
267 // shift the bus busy time sufficiently far ahead that we never
268 // have to worry about negative values when computing the time for
269 // the next request, this will add an insignificant bubble at the
270 // start of simulation
271 busBusyUntil
= curTick() + tRP
+ tRCD
+ tCL
;
276 DRAMCtrl::recvAtomic(PacketPtr pkt
)
278 DPRINTF(DRAM
, "recvAtomic: %s 0x%x\n", pkt
->cmdString(), pkt
->getAddr());
280 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
283 // do the actual memory access and turn the packet into a response
287 if (pkt
->hasData()) {
288 // this value is not supposed to be accurate, just enough to
289 // keep things going, mimic a closed page
290 latency
= tRP
+ tRCD
+ tCL
;
296 DRAMCtrl::readQueueFull(unsigned int neededEntries
) const
298 DPRINTF(DRAM
, "Read queue limit %d, current size %d, entries needed %d\n",
299 readBufferSize
, readQueue
.size() + respQueue
.size(),
303 (readQueue
.size() + respQueue
.size() + neededEntries
) > readBufferSize
;
307 DRAMCtrl::writeQueueFull(unsigned int neededEntries
) const
309 DPRINTF(DRAM
, "Write queue limit %d, current size %d, entries needed %d\n",
310 writeBufferSize
, writeQueue
.size(), neededEntries
);
311 return (writeQueue
.size() + neededEntries
) > writeBufferSize
;
314 DRAMCtrl::DRAMPacket
*
315 DRAMCtrl::decodeAddr(PacketPtr pkt
, Addr dramPktAddr
, unsigned size
,
318 // decode the address based on the address mapping scheme, with
319 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
320 // channel, respectively
323 // use a 64-bit unsigned during the computations as the row is
324 // always the top bits, and check before creating the DRAMPacket
327 // truncate the address to a DRAM burst, which makes it unique to
328 // a specific column, row, bank, rank and channel
329 Addr addr
= dramPktAddr
/ burstSize
;
331 // we have removed the lowest order address bits that denote the
332 // position within the column
333 if (addrMapping
== Enums::RoRaBaChCo
) {
334 // the lowest order bits denote the column to ensure that
335 // sequential cache lines occupy the same row
336 addr
= addr
/ columnsPerRowBuffer
;
338 // take out the channel part of the address
339 addr
= addr
/ channels
;
341 // after the channel bits, get the bank bits to interleave
343 bank
= addr
% banksPerRank
;
344 addr
= addr
/ banksPerRank
;
346 // after the bank, we get the rank bits which thus interleaves
348 rank
= addr
% ranksPerChannel
;
349 addr
= addr
/ ranksPerChannel
;
351 // lastly, get the row bits, no need to remove them from addr
352 row
= addr
% rowsPerBank
;
353 } else if (addrMapping
== Enums::RoRaBaCoCh
) {
354 // take out the lower-order column bits
355 addr
= addr
/ columnsPerStripe
;
357 // take out the channel part of the address
358 addr
= addr
/ channels
;
360 // next, the higher-order column bites
361 addr
= addr
/ (columnsPerRowBuffer
/ columnsPerStripe
);
363 // after the column bits, we get the bank bits to interleave
365 bank
= addr
% banksPerRank
;
366 addr
= addr
/ banksPerRank
;
368 // after the bank, we get the rank bits which thus interleaves
370 rank
= addr
% ranksPerChannel
;
371 addr
= addr
/ ranksPerChannel
;
373 // lastly, get the row bits, no need to remove them from addr
374 row
= addr
% rowsPerBank
;
375 } else if (addrMapping
== Enums::RoCoRaBaCh
) {
376 // optimise for closed page mode and utilise maximum
377 // parallelism of the DRAM (at the cost of power)
379 // take out the lower-order column bits
380 addr
= addr
/ columnsPerStripe
;
382 // take out the channel part of the address, not that this has
383 // to match with how accesses are interleaved between the
384 // controllers in the address mapping
385 addr
= addr
/ channels
;
387 // start with the bank bits, as this provides the maximum
388 // opportunity for parallelism between requests
389 bank
= addr
% banksPerRank
;
390 addr
= addr
/ banksPerRank
;
392 // next get the rank bits
393 rank
= addr
% ranksPerChannel
;
394 addr
= addr
/ ranksPerChannel
;
396 // next, the higher-order column bites
397 addr
= addr
/ (columnsPerRowBuffer
/ columnsPerStripe
);
399 // lastly, get the row bits, no need to remove them from addr
400 row
= addr
% rowsPerBank
;
402 panic("Unknown address mapping policy chosen!");
404 assert(rank
< ranksPerChannel
);
405 assert(bank
< banksPerRank
);
406 assert(row
< rowsPerBank
);
407 assert(row
< Bank::NO_ROW
);
409 DPRINTF(DRAM
, "Address: %lld Rank %d Bank %d Row %d\n",
410 dramPktAddr
, rank
, bank
, row
);
412 // create the corresponding DRAM packet with the entry time and
413 // ready time set to the current tick, the latter will be updated
415 uint16_t bank_id
= banksPerRank
* rank
+ bank
;
416 return new DRAMPacket(pkt
, isRead
, rank
, bank
, row
, bank_id
, dramPktAddr
,
417 size
, ranks
[rank
]->banks
[bank
], *ranks
[rank
]);
421 DRAMCtrl::addToReadQueue(PacketPtr pkt
, unsigned int pktCount
)
423 // only add to the read queue here. whenever the request is
424 // eventually done, set the readyTime, and call schedule()
425 assert(!pkt
->isWrite());
427 assert(pktCount
!= 0);
429 // if the request size is larger than burst size, the pkt is split into
430 // multiple DRAM packets
431 // Note if the pkt starting address is not aligened to burst size, the
432 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
433 // are aligned to burst size boundaries. This is to ensure we accurately
434 // check read packets against packets in write queue.
435 Addr addr
= pkt
->getAddr();
436 unsigned pktsServicedByWrQ
= 0;
437 BurstHelper
* burst_helper
= NULL
;
438 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
439 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
440 pkt
->getAddr() + pkt
->getSize()) - addr
;
441 readPktSize
[ceilLog2(size
)]++;
444 // First check write buffer to see if the data is already at
446 bool foundInWrQ
= false;
447 Addr burst_addr
= burstAlign(addr
);
448 // if the burst address is not present then there is no need
449 // looking any further
450 if (isInWriteQueue
.find(burst_addr
) != isInWriteQueue
.end()) {
451 for (const auto& p
: writeQueue
) {
452 // check if the read is subsumed in the write queue
453 // packet we are looking at
454 if (p
->addr
<= addr
&& (addr
+ size
) <= (p
->addr
+ p
->size
)) {
458 DPRINTF(DRAM
, "Read to addr %lld with size %d serviced by "
459 "write queue\n", addr
, size
);
460 bytesReadWrQ
+= burstSize
;
466 // If not found in the write q, make a DRAM packet and
467 // push it onto the read queue
470 // Make the burst helper for split packets
471 if (pktCount
> 1 && burst_helper
== NULL
) {
472 DPRINTF(DRAM
, "Read to addr %lld translates to %d "
473 "dram requests\n", pkt
->getAddr(), pktCount
);
474 burst_helper
= new BurstHelper(pktCount
);
477 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, true);
478 dram_pkt
->burstHelper
= burst_helper
;
480 assert(!readQueueFull(1));
481 rdQLenPdf
[readQueue
.size() + respQueue
.size()]++;
483 DPRINTF(DRAM
, "Adding to read queue\n");
485 readQueue
.push_back(dram_pkt
);
487 // increment read entries of the rank
488 ++dram_pkt
->rankRef
.readEntries
;
491 avgRdQLen
= readQueue
.size() + respQueue
.size();
494 // Starting address of next dram pkt (aligend to burstSize boundary)
495 addr
= (addr
| (burstSize
- 1)) + 1;
498 // If all packets are serviced by write queue, we send the repsonse back
499 if (pktsServicedByWrQ
== pktCount
) {
500 accessAndRespond(pkt
, frontendLatency
);
504 // Update how many split packets are serviced by write queue
505 if (burst_helper
!= NULL
)
506 burst_helper
->burstsServiced
= pktsServicedByWrQ
;
508 // If we are not already scheduled to get a request out of the
510 if (!nextReqEvent
.scheduled()) {
511 DPRINTF(DRAM
, "Request scheduled immediately\n");
512 schedule(nextReqEvent
, curTick());
517 DRAMCtrl::addToWriteQueue(PacketPtr pkt
, unsigned int pktCount
)
519 // only add to the write queue here. whenever the request is
520 // eventually done, set the readyTime, and call schedule()
521 assert(pkt
->isWrite());
523 // if the request size is larger than burst size, the pkt is split into
524 // multiple DRAM packets
525 Addr addr
= pkt
->getAddr();
526 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
527 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
528 pkt
->getAddr() + pkt
->getSize()) - addr
;
529 writePktSize
[ceilLog2(size
)]++;
532 // see if we can merge with an existing item in the write
533 // queue and keep track of whether we have merged or not
534 bool merged
= isInWriteQueue
.find(burstAlign(addr
)) !=
535 isInWriteQueue
.end();
537 // if the item was not merged we need to create a new write
540 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, false);
542 assert(writeQueue
.size() < writeBufferSize
);
543 wrQLenPdf
[writeQueue
.size()]++;
545 DPRINTF(DRAM
, "Adding to write queue\n");
547 writeQueue
.push_back(dram_pkt
);
548 isInWriteQueue
.insert(burstAlign(addr
));
549 assert(writeQueue
.size() == isInWriteQueue
.size());
552 avgWrQLen
= writeQueue
.size();
554 // increment write entries of the rank
555 ++dram_pkt
->rankRef
.writeEntries
;
557 DPRINTF(DRAM
, "Merging write burst with existing queue entry\n");
559 // keep track of the fact that this burst effectively
560 // disappeared as it was merged with an existing one
564 // Starting address of next dram pkt (aligend to burstSize boundary)
565 addr
= (addr
| (burstSize
- 1)) + 1;
568 // we do not wait for the writes to be send to the actual memory,
569 // but instead take responsibility for the consistency here and
570 // snoop the write queue for any upcoming reads
571 // @todo, if a pkt size is larger than burst size, we might need a
572 // different front end latency
573 accessAndRespond(pkt
, frontendLatency
);
575 // If we are not already scheduled to get a request out of the
577 if (!nextReqEvent
.scheduled()) {
578 DPRINTF(DRAM
, "Request scheduled immediately\n");
579 schedule(nextReqEvent
, curTick());
584 DRAMCtrl::printQs() const {
585 DPRINTF(DRAM
, "===READ QUEUE===\n\n");
586 for (auto i
= readQueue
.begin() ; i
!= readQueue
.end() ; ++i
) {
587 DPRINTF(DRAM
, "Read %lu\n", (*i
)->addr
);
589 DPRINTF(DRAM
, "\n===RESP QUEUE===\n\n");
590 for (auto i
= respQueue
.begin() ; i
!= respQueue
.end() ; ++i
) {
591 DPRINTF(DRAM
, "Response %lu\n", (*i
)->addr
);
593 DPRINTF(DRAM
, "\n===WRITE QUEUE===\n\n");
594 for (auto i
= writeQueue
.begin() ; i
!= writeQueue
.end() ; ++i
) {
595 DPRINTF(DRAM
, "Write %lu\n", (*i
)->addr
);
600 DRAMCtrl::recvTimingReq(PacketPtr pkt
)
602 // This is where we enter from the outside world
603 DPRINTF(DRAM
, "recvTimingReq: request %s addr %lld size %d\n",
604 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
606 panic_if(pkt
->cacheResponding(), "Should not see packets where cache "
609 panic_if(!(pkt
->isRead() || pkt
->isWrite()),
610 "Should only see read and writes at memory controller\n");
612 // Calc avg gap between requests
613 if (prevArrival
!= 0) {
614 totGap
+= curTick() - prevArrival
;
616 prevArrival
= curTick();
619 // Find out how many dram packets a pkt translates to
620 // If the burst size is equal or larger than the pkt size, then a pkt
621 // translates to only one dram packet. Otherwise, a pkt translates to
622 // multiple dram packets
623 unsigned size
= pkt
->getSize();
624 unsigned offset
= pkt
->getAddr() & (burstSize
- 1);
625 unsigned int dram_pkt_count
= divCeil(offset
+ size
, burstSize
);
627 // check local buffers and do not accept if full
630 if (readQueueFull(dram_pkt_count
)) {
631 DPRINTF(DRAM
, "Read queue full, not accepting\n");
632 // remember that we have to retry this port
637 addToReadQueue(pkt
, dram_pkt_count
);
639 bytesReadSys
+= size
;
642 assert(pkt
->isWrite());
644 if (writeQueueFull(dram_pkt_count
)) {
645 DPRINTF(DRAM
, "Write queue full, not accepting\n");
646 // remember that we have to retry this port
651 addToWriteQueue(pkt
, dram_pkt_count
);
653 bytesWrittenSys
+= size
;
661 DRAMCtrl::processRespondEvent()
664 "processRespondEvent(): Some req has reached its readyTime\n");
666 DRAMPacket
* dram_pkt
= respQueue
.front();
668 // if a read has reached its ready-time, decrement the number of reads
669 // At this point the packet has been handled and there is a possibility
670 // to switch to low-power mode if no other packet is available
671 --dram_pkt
->rankRef
.readEntries
;
672 DPRINTF(DRAM
, "number of read entries for rank %d is %d\n",
673 dram_pkt
->rank
, dram_pkt
->rankRef
.readEntries
);
675 // counter should at least indicate one outstanding request
677 assert(dram_pkt
->rankRef
.outstandingEvents
> 0);
678 // read response received, decrement count
679 --dram_pkt
->rankRef
.outstandingEvents
;
681 // at this moment should not have transitioned to a low-power state
682 assert((dram_pkt
->rankRef
.pwrState
!= PWR_SREF
) &&
683 (dram_pkt
->rankRef
.pwrState
!= PWR_PRE_PDN
) &&
684 (dram_pkt
->rankRef
.pwrState
!= PWR_ACT_PDN
));
686 // track if this is the last packet before idling
687 // and that there are no outstanding commands to this rank
688 // if REF in progress, transition to LP state should not occur
689 // until REF completes
690 if ((dram_pkt
->rankRef
.refreshState
== REF_IDLE
) &&
691 (dram_pkt
->rankRef
.lowPowerEntryReady())) {
692 // verify that there are no events scheduled
693 assert(!dram_pkt
->rankRef
.activateEvent
.scheduled());
694 assert(!dram_pkt
->rankRef
.prechargeEvent
.scheduled());
696 // if coming from active state, schedule power event to
697 // active power-down else go to precharge power-down
698 DPRINTF(DRAMState
, "Rank %d sleep at tick %d; current power state is "
699 "%d\n", dram_pkt
->rank
, curTick(), dram_pkt
->rankRef
.pwrState
);
701 // default to ACT power-down unless already in IDLE state
702 // could be in IDLE if PRE issued before data returned
703 PowerState next_pwr_state
= PWR_ACT_PDN
;
704 if (dram_pkt
->rankRef
.pwrState
== PWR_IDLE
) {
705 next_pwr_state
= PWR_PRE_PDN
;
708 dram_pkt
->rankRef
.powerDownSleep(next_pwr_state
, curTick());
711 if (dram_pkt
->burstHelper
) {
712 // it is a split packet
713 dram_pkt
->burstHelper
->burstsServiced
++;
714 if (dram_pkt
->burstHelper
->burstsServiced
==
715 dram_pkt
->burstHelper
->burstCount
) {
716 // we have now serviced all children packets of a system packet
717 // so we can now respond to the requester
718 // @todo we probably want to have a different front end and back
719 // end latency for split packets
720 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
721 delete dram_pkt
->burstHelper
;
722 dram_pkt
->burstHelper
= NULL
;
725 // it is not a split packet
726 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
729 delete respQueue
.front();
730 respQueue
.pop_front();
732 if (!respQueue
.empty()) {
733 assert(respQueue
.front()->readyTime
>= curTick());
734 assert(!respondEvent
.scheduled());
735 schedule(respondEvent
, respQueue
.front()->readyTime
);
737 // if there is nothing left in any queue, signal a drain
738 if (drainState() == DrainState::Draining
&&
739 writeQueue
.empty() && readQueue
.empty() && allRanksDrained()) {
741 DPRINTF(Drain
, "DRAM controller done draining\n");
746 // We have made a location in the queue available at this point,
747 // so if there is a read that was forced to wait, retry now
755 DRAMCtrl::chooseNext(std::deque
<DRAMPacket
*>& queue
, Tick extra_col_delay
)
757 // This method does the arbitration between requests. The chosen
758 // packet is simply moved to the head of the queue. The other
759 // methods know that this is the place to look. For example, with
760 // FCFS, this method does nothing
761 assert(!queue
.empty());
763 // bool to indicate if a packet to an available rank is found
764 bool found_packet
= false;
765 if (queue
.size() == 1) {
766 DRAMPacket
* dram_pkt
= queue
.front();
767 // available rank corresponds to state refresh idle
768 if (ranks
[dram_pkt
->rank
]->isAvailable()) {
770 DPRINTF(DRAM
, "Single request, going to a free rank\n");
772 DPRINTF(DRAM
, "Single request, going to a busy rank\n");
777 if (memSchedPolicy
== Enums::fcfs
) {
778 // check if there is a packet going to a free rank
779 for (auto i
= queue
.begin(); i
!= queue
.end() ; ++i
) {
780 DRAMPacket
* dram_pkt
= *i
;
781 if (ranks
[dram_pkt
->rank
]->isAvailable()) {
783 queue
.push_front(dram_pkt
);
788 } else if (memSchedPolicy
== Enums::frfcfs
) {
789 found_packet
= reorderQueue(queue
, extra_col_delay
);
791 panic("No scheduling policy chosen\n");
796 DRAMCtrl::reorderQueue(std::deque
<DRAMPacket
*>& queue
, Tick extra_col_delay
)
798 // Only determine this if needed
799 uint64_t earliest_banks
= 0;
800 bool hidden_bank_prep
= false;
802 // search for seamless row hits first, if no seamless row hit is
803 // found then determine if there are other packets that can be issued
804 // without incurring additional bus delay due to bank timing
805 // Will select closed rows first to enable more open row possibilies
806 // in future selections
807 bool found_hidden_bank
= false;
809 // remember if we found a row hit, not seamless, but bank prepped
811 bool found_prepped_pkt
= false;
813 // if we have no row hit, prepped or not, and no seamless packet,
814 // just go for the earliest possible
815 bool found_earliest_pkt
= false;
817 auto selected_pkt_it
= queue
.end();
819 // time we need to issue a column command to be seamless
820 const Tick min_col_at
= std::max(busBusyUntil
- tCL
+ extra_col_delay
,
823 for (auto i
= queue
.begin(); i
!= queue
.end() ; ++i
) {
824 DRAMPacket
* dram_pkt
= *i
;
825 const Bank
& bank
= dram_pkt
->bankRef
;
827 // check if rank is available, if not, jump to the next packet
828 if (dram_pkt
->rankRef
.isAvailable()) {
829 // check if it is a row hit
830 if (bank
.openRow
== dram_pkt
->row
) {
831 // no additional rank-to-rank or same bank-group
832 // delays, or we switched read/write and might as well
833 // go for the row hit
834 if (bank
.colAllowedAt
<= min_col_at
) {
835 // FCFS within the hits, giving priority to
836 // commands that can issue seamlessly, without
837 // additional delay, such as same rank accesses
838 // and/or different bank-group accesses
839 DPRINTF(DRAM
, "Seamless row buffer hit\n");
841 // no need to look through the remaining queue entries
843 } else if (!found_hidden_bank
&& !found_prepped_pkt
) {
844 // if we did not find a packet to a closed row that can
845 // issue the bank commands without incurring delay, and
846 // did not yet find a packet to a prepped row, remember
849 found_prepped_pkt
= true;
850 DPRINTF(DRAM
, "Prepped row buffer hit\n");
852 } else if (!found_earliest_pkt
) {
853 // if we have not initialised the bank status, do it
854 // now, and only once per scheduling decisions
855 if (earliest_banks
== 0) {
856 // determine entries with earliest bank delay
857 pair
<uint64_t, bool> bankStatus
=
858 minBankPrep(queue
, min_col_at
);
859 earliest_banks
= bankStatus
.first
;
860 hidden_bank_prep
= bankStatus
.second
;
863 // bank is amongst first available banks
864 // minBankPrep will give priority to packets that can
866 if (bits(earliest_banks
, dram_pkt
->bankId
, dram_pkt
->bankId
)) {
867 found_earliest_pkt
= true;
868 found_hidden_bank
= hidden_bank_prep
;
870 // give priority to packets that can issue
871 // bank commands 'behind the scenes'
872 // any additional delay if any will be due to
873 // col-to-col command requirements
874 if (hidden_bank_prep
|| !found_prepped_pkt
)
881 if (selected_pkt_it
!= queue
.end()) {
882 DRAMPacket
* selected_pkt
= *selected_pkt_it
;
883 queue
.erase(selected_pkt_it
);
884 queue
.push_front(selected_pkt
);
892 DRAMCtrl::accessAndRespond(PacketPtr pkt
, Tick static_latency
)
894 DPRINTF(DRAM
, "Responding to Address %lld.. ",pkt
->getAddr());
896 bool needsResponse
= pkt
->needsResponse();
897 // do the actual memory access which also turns the packet into a
901 // turn packet around to go back to requester if response expected
903 // access already turned the packet into a response
904 assert(pkt
->isResponse());
905 // response_time consumes the static latency and is charged also
906 // with headerDelay that takes into account the delay provided by
907 // the xbar and also the payloadDelay that takes into account the
908 // number of data beats.
909 Tick response_time
= curTick() + static_latency
+ pkt
->headerDelay
+
911 // Here we reset the timing of the packet before sending it out.
912 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
914 // queue the packet in the response queue to be sent out after
915 // the static latency has passed
916 port
.schedTimingResp(pkt
, response_time
, true);
918 // @todo the packet is going to be deleted, and the DRAMPacket
919 // is still having a pointer to it
920 pendingDelete
.reset(pkt
);
923 DPRINTF(DRAM
, "Done\n");
929 DRAMCtrl::activateBank(Rank
& rank_ref
, Bank
& bank_ref
,
930 Tick act_tick
, uint32_t row
)
932 assert(rank_ref
.actTicks
.size() == activationLimit
);
934 DPRINTF(DRAM
, "Activate at tick %d\n", act_tick
);
936 // update the open row
937 assert(bank_ref
.openRow
== Bank::NO_ROW
);
938 bank_ref
.openRow
= row
;
940 // start counting anew, this covers both the case when we
941 // auto-precharged, and when this access is forced to
943 bank_ref
.bytesAccessed
= 0;
944 bank_ref
.rowAccesses
= 0;
946 ++rank_ref
.numBanksActive
;
947 assert(rank_ref
.numBanksActive
<= banksPerRank
);
949 DPRINTF(DRAM
, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
950 bank_ref
.bank
, rank_ref
.rank
, act_tick
,
951 ranks
[rank_ref
.rank
]->numBanksActive
);
953 rank_ref
.cmdList
.push_back(Command(MemCommand::ACT
, bank_ref
.bank
,
956 DPRINTF(DRAMPower
, "%llu,ACT,%d,%d\n", divCeil(act_tick
, tCK
) -
957 timeStampOffset
, bank_ref
.bank
, rank_ref
.rank
);
959 // The next access has to respect tRAS for this bank
960 bank_ref
.preAllowedAt
= act_tick
+ tRAS
;
962 // Respect the row-to-column command delay
963 bank_ref
.colAllowedAt
= std::max(act_tick
+ tRCD
, bank_ref
.colAllowedAt
);
965 // start by enforcing tRRD
966 for (int i
= 0; i
< banksPerRank
; i
++) {
967 // next activate to any bank in this rank must not happen
969 if (bankGroupArch
&& (bank_ref
.bankgr
== rank_ref
.banks
[i
].bankgr
)) {
970 // bank group architecture requires longer delays between
971 // ACT commands within the same bank group. Use tRRD_L
973 rank_ref
.banks
[i
].actAllowedAt
= std::max(act_tick
+ tRRD_L
,
974 rank_ref
.banks
[i
].actAllowedAt
);
976 // use shorter tRRD value when either
977 // 1) bank group architecture is not supportted
978 // 2) bank is in a different bank group
979 rank_ref
.banks
[i
].actAllowedAt
= std::max(act_tick
+ tRRD
,
980 rank_ref
.banks
[i
].actAllowedAt
);
984 // next, we deal with tXAW, if the activation limit is disabled
985 // then we directly schedule an activate power event
986 if (!rank_ref
.actTicks
.empty()) {
988 if (rank_ref
.actTicks
.back() &&
989 (act_tick
- rank_ref
.actTicks
.back()) < tXAW
) {
990 panic("Got %d activates in window %d (%llu - %llu) which "
991 "is smaller than %llu\n", activationLimit
, act_tick
-
992 rank_ref
.actTicks
.back(), act_tick
,
993 rank_ref
.actTicks
.back(), tXAW
);
996 // shift the times used for the book keeping, the last element
997 // (highest index) is the oldest one and hence the lowest value
998 rank_ref
.actTicks
.pop_back();
1000 // record an new activation (in the future)
1001 rank_ref
.actTicks
.push_front(act_tick
);
1003 // cannot activate more than X times in time window tXAW, push the
1004 // next one (the X + 1'st activate) to be tXAW away from the
1005 // oldest in our window of X
1006 if (rank_ref
.actTicks
.back() &&
1007 (act_tick
- rank_ref
.actTicks
.back()) < tXAW
) {
1008 DPRINTF(DRAM
, "Enforcing tXAW with X = %d, next activate "
1009 "no earlier than %llu\n", activationLimit
,
1010 rank_ref
.actTicks
.back() + tXAW
);
1011 for (int j
= 0; j
< banksPerRank
; j
++)
1012 // next activate must not happen before end of window
1013 rank_ref
.banks
[j
].actAllowedAt
=
1014 std::max(rank_ref
.actTicks
.back() + tXAW
,
1015 rank_ref
.banks
[j
].actAllowedAt
);
1019 // at the point when this activate takes place, make sure we
1020 // transition to the active power state
1021 if (!rank_ref
.activateEvent
.scheduled())
1022 schedule(rank_ref
.activateEvent
, act_tick
);
1023 else if (rank_ref
.activateEvent
.when() > act_tick
)
1024 // move it sooner in time
1025 reschedule(rank_ref
.activateEvent
, act_tick
);
1029 DRAMCtrl::prechargeBank(Rank
& rank_ref
, Bank
& bank
, Tick pre_at
, bool trace
)
1031 // make sure the bank has an open row
1032 assert(bank
.openRow
!= Bank::NO_ROW
);
1034 // sample the bytes per activate here since we are closing
1036 bytesPerActivate
.sample(bank
.bytesAccessed
);
1038 bank
.openRow
= Bank::NO_ROW
;
1040 // no precharge allowed before this one
1041 bank
.preAllowedAt
= pre_at
;
1043 Tick pre_done_at
= pre_at
+ tRP
;
1045 bank
.actAllowedAt
= std::max(bank
.actAllowedAt
, pre_done_at
);
1047 assert(rank_ref
.numBanksActive
!= 0);
1048 --rank_ref
.numBanksActive
;
1050 DPRINTF(DRAM
, "Precharging bank %d, rank %d at tick %lld, now got "
1051 "%d active\n", bank
.bank
, rank_ref
.rank
, pre_at
,
1052 rank_ref
.numBanksActive
);
1056 rank_ref
.cmdList
.push_back(Command(MemCommand::PRE
, bank
.bank
,
1058 DPRINTF(DRAMPower
, "%llu,PRE,%d,%d\n", divCeil(pre_at
, tCK
) -
1059 timeStampOffset
, bank
.bank
, rank_ref
.rank
);
1061 // if we look at the current number of active banks we might be
1062 // tempted to think the DRAM is now idle, however this can be
1063 // undone by an activate that is scheduled to happen before we
1064 // would have reached the idle state, so schedule an event and
1065 // rather check once we actually make it to the point in time when
1066 // the (last) precharge takes place
1067 if (!rank_ref
.prechargeEvent
.scheduled()) {
1068 schedule(rank_ref
.prechargeEvent
, pre_done_at
);
1069 // New event, increment count
1070 ++rank_ref
.outstandingEvents
;
1071 } else if (rank_ref
.prechargeEvent
.when() < pre_done_at
) {
1072 reschedule(rank_ref
.prechargeEvent
, pre_done_at
);
1077 DRAMCtrl::doDRAMAccess(DRAMPacket
* dram_pkt
)
1079 DPRINTF(DRAM
, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1080 dram_pkt
->addr
, dram_pkt
->rank
, dram_pkt
->bank
, dram_pkt
->row
);
1083 Rank
& rank
= dram_pkt
->rankRef
;
1085 // are we in or transitioning to a low-power state and have not scheduled
1086 // a power-up event?
1087 // if so, wake up from power down to issue RD/WR burst
1088 if (rank
.inLowPowerState
) {
1089 assert(rank
.pwrState
!= PWR_SREF
);
1090 rank
.scheduleWakeUpEvent(tXP
);
1094 Bank
& bank
= dram_pkt
->bankRef
;
1096 // for the state we need to track if it is a row hit or not
1097 bool row_hit
= true;
1099 // respect any constraints on the command (e.g. tRCD or tCCD)
1100 Tick cmd_at
= std::max(bank
.colAllowedAt
, curTick());
1102 // Determine the access latency and update the bank state
1103 if (bank
.openRow
== dram_pkt
->row
) {
1108 // If there is a page open, precharge it.
1109 if (bank
.openRow
!= Bank::NO_ROW
) {
1110 prechargeBank(rank
, bank
, std::max(bank
.preAllowedAt
, curTick()));
1113 // next we need to account for the delay in activating the
1115 Tick act_tick
= std::max(bank
.actAllowedAt
, curTick());
1117 // Record the activation and deal with all the global timing
1118 // constraints caused be a new activation (tRRD and tXAW)
1119 activateBank(rank
, bank
, act_tick
, dram_pkt
->row
);
1121 // issue the command as early as possible
1122 cmd_at
= bank
.colAllowedAt
;
1125 // we need to wait until the bus is available before we can issue
1127 cmd_at
= std::max(cmd_at
, busBusyUntil
- tCL
);
1129 // update the packet ready time
1130 dram_pkt
->readyTime
= cmd_at
+ tCL
+ tBURST
;
1132 // only one burst can use the bus at any one point in time
1133 assert(dram_pkt
->readyTime
- busBusyUntil
>= tBURST
);
1135 // update the time for the next read/write burst for each
1136 // bank (add a max with tCCD/tCCD_L here)
1138 for (int j
= 0; j
< ranksPerChannel
; j
++) {
1139 for (int i
= 0; i
< banksPerRank
; i
++) {
1140 // next burst to same bank group in this rank must not happen
1141 // before tCCD_L. Different bank group timing requirement is
1142 // tBURST; Add tCS for different ranks
1143 if (dram_pkt
->rank
== j
) {
1144 if (bankGroupArch
&&
1145 (bank
.bankgr
== ranks
[j
]->banks
[i
].bankgr
)) {
1146 // bank group architecture requires longer delays between
1147 // RD/WR burst commands to the same bank group.
1148 // Use tCCD_L in this case
1151 // use tBURST (equivalent to tCCD_S), the shorter
1152 // cas-to-cas delay value, when either:
1153 // 1) bank group architecture is not supportted
1154 // 2) bank is in a different bank group
1158 // different rank is by default in a different bank group
1159 // use tBURST (equivalent to tCCD_S), which is the shorter
1160 // cas-to-cas delay in this case
1161 // Add tCS to account for rank-to-rank bus delay requirements
1162 cmd_dly
= tBURST
+ tCS
;
1164 ranks
[j
]->banks
[i
].colAllowedAt
= std::max(cmd_at
+ cmd_dly
,
1165 ranks
[j
]->banks
[i
].colAllowedAt
);
1169 // Save rank of current access
1170 activeRank
= dram_pkt
->rank
;
1172 // If this is a write, we also need to respect the write recovery
1173 // time before a precharge, in the case of a read, respect the
1174 // read to precharge constraint
1175 bank
.preAllowedAt
= std::max(bank
.preAllowedAt
,
1176 dram_pkt
->isRead
? cmd_at
+ tRTP
:
1177 dram_pkt
->readyTime
+ tWR
);
1179 // increment the bytes accessed and the accesses per row
1180 bank
.bytesAccessed
+= burstSize
;
1183 // if we reached the max, then issue with an auto-precharge
1184 bool auto_precharge
= pageMgmt
== Enums::close
||
1185 bank
.rowAccesses
== maxAccessesPerRow
;
1187 // if we did not hit the limit, we might still want to
1189 if (!auto_precharge
&&
1190 (pageMgmt
== Enums::open_adaptive
||
1191 pageMgmt
== Enums::close_adaptive
)) {
1192 // a twist on the open and close page policies:
1193 // 1) open_adaptive page policy does not blindly keep the
1194 // page open, but close it if there are no row hits, and there
1195 // are bank conflicts in the queue
1196 // 2) close_adaptive page policy does not blindly close the
1197 // page, but closes it only if there are no row hits in the queue.
1198 // In this case, only force an auto precharge when there
1199 // are no same page hits in the queue
1200 bool got_more_hits
= false;
1201 bool got_bank_conflict
= false;
1203 // either look at the read queue or write queue
1204 const deque
<DRAMPacket
*>& queue
= dram_pkt
->isRead
? readQueue
:
1206 auto p
= queue
.begin();
1207 // make sure we are not considering the packet that we are
1208 // currently dealing with (which is the head of the queue)
1211 // keep on looking until we find a hit or reach the end of the queue
1212 // 1) if a hit is found, then both open and close adaptive policies keep
1214 // 2) if no hit is found, got_bank_conflict is set to true if a bank
1215 // conflict request is waiting in the queue
1216 while (!got_more_hits
&& p
!= queue
.end()) {
1217 bool same_rank_bank
= (dram_pkt
->rank
== (*p
)->rank
) &&
1218 (dram_pkt
->bank
== (*p
)->bank
);
1219 bool same_row
= dram_pkt
->row
== (*p
)->row
;
1220 got_more_hits
|= same_rank_bank
&& same_row
;
1221 got_bank_conflict
|= same_rank_bank
&& !same_row
;
1225 // auto pre-charge when either
1226 // 1) open_adaptive policy, we have not got any more hits, and
1227 // have a bank conflict
1228 // 2) close_adaptive policy and we have not got any more hits
1229 auto_precharge
= !got_more_hits
&&
1230 (got_bank_conflict
|| pageMgmt
== Enums::close_adaptive
);
1233 // DRAMPower trace command to be written
1234 std::string mem_cmd
= dram_pkt
->isRead
? "RD" : "WR";
1236 // MemCommand required for DRAMPower library
1237 MemCommand::cmds command
= (mem_cmd
== "RD") ? MemCommand::RD
:
1241 busBusyUntil
= dram_pkt
->readyTime
;
1243 DPRINTF(DRAM
, "Access to %lld, ready at %lld bus busy until %lld.\n",
1244 dram_pkt
->addr
, dram_pkt
->readyTime
, busBusyUntil
);
1246 dram_pkt
->rankRef
.cmdList
.push_back(Command(command
, dram_pkt
->bank
,
1249 DPRINTF(DRAMPower
, "%llu,%s,%d,%d\n", divCeil(cmd_at
, tCK
) -
1250 timeStampOffset
, mem_cmd
, dram_pkt
->bank
, dram_pkt
->rank
);
1252 // if this access should use auto-precharge, then we are
1253 // closing the row after the read/write burst
1254 if (auto_precharge
) {
1255 // if auto-precharge push a PRE command at the correct tick to the
1256 // list used by DRAMPower library to calculate power
1257 prechargeBank(rank
, bank
, std::max(curTick(), bank
.preAllowedAt
));
1259 DPRINTF(DRAM
, "Auto-precharged bank: %d\n", dram_pkt
->bankId
);
1262 // Update the minimum timing between the requests, this is a
1263 // conservative estimate of when we have to schedule the next
1264 // request to not introduce any unecessary bubbles. In most cases
1265 // we will wake up sooner than we have to.
1266 nextReqTime
= busBusyUntil
- (tRP
+ tRCD
+ tCL
);
1268 // Update the stats and schedule the next request
1269 if (dram_pkt
->isRead
) {
1273 bytesReadDRAM
+= burstSize
;
1274 perBankRdBursts
[dram_pkt
->bankId
]++;
1276 // Update latency stats
1277 totMemAccLat
+= dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1278 totBusLat
+= tBURST
;
1279 totQLat
+= cmd_at
- dram_pkt
->entryTime
;
1284 bytesWritten
+= burstSize
;
1285 perBankWrBursts
[dram_pkt
->bankId
]++;
1290 DRAMCtrl::processNextReqEvent()
1293 for (auto r
: ranks
) {
1294 if (!r
->isAvailable()) {
1295 if (r
->pwrState
!= PWR_SREF
) {
1296 // rank is busy refreshing
1297 DPRINTF(DRAMState
, "Rank %d is not available\n", r
->rank
);
1300 // let the rank know that if it was waiting to drain, it
1301 // is now done and ready to proceed
1302 r
->checkDrainDone();
1305 // check if we were in self-refresh and haven't started
1306 // to transition out
1307 if ((r
->pwrState
== PWR_SREF
) && r
->inLowPowerState
) {
1308 DPRINTF(DRAMState
, "Rank %d is in self-refresh\n", r
->rank
);
1309 // if we have commands queued to this rank and we don't have
1310 // a minimum number of active commands enqueued,
1311 // exit self-refresh
1312 if (r
->forceSelfRefreshExit()) {
1313 DPRINTF(DRAMState
, "rank %d was in self refresh and"
1314 " should wake up\n", r
->rank
);
1315 //wake up from self-refresh
1316 r
->scheduleWakeUpEvent(tXS
);
1317 // things are brought back into action once a refresh is
1318 // performed after self-refresh
1319 // continue with selection for other ranks
1325 if (busyRanks
== ranksPerChannel
) {
1326 // if all ranks are refreshing wait for them to finish
1327 // and stall this state machine without taking any further
1328 // action, and do not schedule a new nextReqEvent
1332 // pre-emptively set to false. Overwrite if in transitioning to
1334 bool switched_cmd_type
= false;
1335 if (busState
!= busStateNext
) {
1336 if (busState
== READ
) {
1337 DPRINTF(DRAM
, "Switching to writes after %d reads with %d reads "
1338 "waiting\n", readsThisTime
, readQueue
.size());
1340 // sample and reset the read-related stats as we are now
1341 // transitioning to writes, and all reads are done
1342 rdPerTurnAround
.sample(readsThisTime
);
1345 // now proceed to do the actual writes
1346 switched_cmd_type
= true;
1348 DPRINTF(DRAM
, "Switching to reads after %d writes with %d writes "
1349 "waiting\n", writesThisTime
, writeQueue
.size());
1351 wrPerTurnAround
.sample(writesThisTime
);
1354 switched_cmd_type
= true;
1356 // update busState to match next state until next transition
1357 busState
= busStateNext
;
1360 // when we get here it is either a read or a write
1361 if (busState
== READ
) {
1363 // track if we should switch or not
1364 bool switch_to_writes
= false;
1366 if (readQueue
.empty()) {
1367 // In the case there is no read request to go next,
1368 // trigger writes if we have passed the low threshold (or
1369 // if we are draining)
1370 if (!writeQueue
.empty() &&
1371 (drainState() == DrainState::Draining
||
1372 writeQueue
.size() > writeLowThreshold
)) {
1374 switch_to_writes
= true;
1376 // check if we are drained
1377 // not done draining until in PWR_IDLE state
1378 // ensuring all banks are closed and
1379 // have exited low power states
1380 if (drainState() == DrainState::Draining
&&
1381 respQueue
.empty() && allRanksDrained()) {
1383 DPRINTF(Drain
, "DRAM controller done draining\n");
1387 // nothing to do, not even any point in scheduling an
1388 // event for the next request
1392 // bool to check if there is a read to a free rank
1393 bool found_read
= false;
1395 // Figure out which read request goes next, and move it to the
1396 // front of the read queue
1397 // If we are changing command type, incorporate the minimum
1398 // bus turnaround delay which will be tCS (different rank) case
1399 found_read
= chooseNext(readQueue
,
1400 switched_cmd_type
? tCS
: 0);
1402 // if no read to an available rank is found then return
1403 // at this point. There could be writes to the available ranks
1404 // which are above the required threshold. However, to
1405 // avoid adding more complexity to the code, return and wait
1406 // for a refresh event to kick things into action again.
1410 DRAMPacket
* dram_pkt
= readQueue
.front();
1411 assert(dram_pkt
->rankRef
.isAvailable());
1413 // here we get a bit creative and shift the bus busy time not
1414 // just the tWTR, but also a CAS latency to capture the fact
1415 // that we are allowed to prepare a new bank, but not issue a
1416 // read command until after tWTR, in essence we capture a
1417 // bubble on the data bus that is tWTR + tCL
1418 if (switched_cmd_type
&& dram_pkt
->rank
== activeRank
) {
1419 busBusyUntil
+= tWTR
+ tCL
;
1422 doDRAMAccess(dram_pkt
);
1424 // At this point we're done dealing with the request
1425 readQueue
.pop_front();
1427 // Every respQueue which will generate an event, increment count
1428 ++dram_pkt
->rankRef
.outstandingEvents
;
1431 assert(dram_pkt
->size
<= burstSize
);
1432 assert(dram_pkt
->readyTime
>= curTick());
1434 // Insert into response queue. It will be sent back to the
1435 // requestor at its readyTime
1436 if (respQueue
.empty()) {
1437 assert(!respondEvent
.scheduled());
1438 schedule(respondEvent
, dram_pkt
->readyTime
);
1440 assert(respQueue
.back()->readyTime
<= dram_pkt
->readyTime
);
1441 assert(respondEvent
.scheduled());
1444 respQueue
.push_back(dram_pkt
);
1446 // we have so many writes that we have to transition
1447 if (writeQueue
.size() > writeHighThreshold
) {
1448 switch_to_writes
= true;
1452 // switching to writes, either because the read queue is empty
1453 // and the writes have passed the low threshold (or we are
1454 // draining), or because the writes hit the hight threshold
1455 if (switch_to_writes
) {
1456 // transition to writing
1457 busStateNext
= WRITE
;
1460 // bool to check if write to free rank is found
1461 bool found_write
= false;
1463 // If we are changing command type, incorporate the minimum
1464 // bus turnaround delay
1465 found_write
= chooseNext(writeQueue
,
1466 switched_cmd_type
? std::min(tRTW
, tCS
) : 0);
1468 // if no writes to an available rank are found then return.
1469 // There could be reads to the available ranks. However, to avoid
1470 // adding more complexity to the code, return at this point and wait
1471 // for a refresh event to kick things into action again.
1475 DRAMPacket
* dram_pkt
= writeQueue
.front();
1476 assert(dram_pkt
->rankRef
.isAvailable());
1478 assert(dram_pkt
->size
<= burstSize
);
1480 // add a bubble to the data bus, as defined by the
1481 // tRTW when access is to the same rank as previous burst
1482 // Different rank timing is handled with tCS, which is
1483 // applied to colAllowedAt
1484 if (switched_cmd_type
&& dram_pkt
->rank
== activeRank
) {
1485 busBusyUntil
+= tRTW
;
1488 doDRAMAccess(dram_pkt
);
1490 writeQueue
.pop_front();
1492 // removed write from queue, decrement count
1493 --dram_pkt
->rankRef
.writeEntries
;
1495 // Schedule write done event to decrement event count
1496 // after the readyTime has been reached
1497 // Only schedule latest write event to minimize events
1498 // required; only need to ensure that final event scheduled covers
1499 // the time that writes are outstanding and bus is active
1500 // to holdoff power-down entry events
1501 if (!dram_pkt
->rankRef
.writeDoneEvent
.scheduled()) {
1502 schedule(dram_pkt
->rankRef
.writeDoneEvent
, dram_pkt
->readyTime
);
1503 // New event, increment count
1504 ++dram_pkt
->rankRef
.outstandingEvents
;
1506 } else if (dram_pkt
->rankRef
.writeDoneEvent
.when() <
1507 dram_pkt
-> readyTime
) {
1508 reschedule(dram_pkt
->rankRef
.writeDoneEvent
, dram_pkt
->readyTime
);
1511 isInWriteQueue
.erase(burstAlign(dram_pkt
->addr
));
1514 // If we emptied the write queue, or got sufficiently below the
1515 // threshold (using the minWritesPerSwitch as the hysteresis) and
1516 // are not draining, or we have reads waiting and have done enough
1517 // writes, then switch to reads.
1518 if (writeQueue
.empty() ||
1519 (writeQueue
.size() + minWritesPerSwitch
< writeLowThreshold
&&
1520 drainState() != DrainState::Draining
) ||
1521 (!readQueue
.empty() && writesThisTime
>= minWritesPerSwitch
)) {
1522 // turn the bus back around for reads again
1523 busStateNext
= READ
;
1525 // note that the we switch back to reads also in the idle
1526 // case, which eventually will check for any draining and
1527 // also pause any further scheduling if there is really
1531 // It is possible that a refresh to another rank kicks things back into
1532 // action before reaching this point.
1533 if (!nextReqEvent
.scheduled())
1534 schedule(nextReqEvent
, std::max(nextReqTime
, curTick()));
1536 // If there is space available and we have writes waiting then let
1537 // them retry. This is done here to ensure that the retry does not
1538 // cause a nextReqEvent to be scheduled before we do so as part of
1539 // the next request processing
1540 if (retryWrReq
&& writeQueue
.size() < writeBufferSize
) {
1542 port
.sendRetryReq();
1546 pair
<uint64_t, bool>
1547 DRAMCtrl::minBankPrep(const deque
<DRAMPacket
*>& queue
,
1548 Tick min_col_at
) const
1550 uint64_t bank_mask
= 0;
1551 Tick min_act_at
= MaxTick
;
1553 // latest Tick for which ACT can occur without incurring additoinal
1554 // delay on the data bus
1555 const Tick hidden_act_max
= std::max(min_col_at
- tRCD
, curTick());
1557 // Flag condition when burst can issue back-to-back with previous burst
1558 bool found_seamless_bank
= false;
1560 // Flag condition when bank can be opened without incurring additional
1561 // delay on the data bus
1562 bool hidden_bank_prep
= false;
1564 // determine if we have queued transactions targetting the
1566 vector
<bool> got_waiting(ranksPerChannel
* banksPerRank
, false);
1567 for (const auto& p
: queue
) {
1568 if (p
->rankRef
.isAvailable())
1569 got_waiting
[p
->bankId
] = true;
1572 // Find command with optimal bank timing
1573 // Will prioritize commands that can issue seamlessly.
1574 for (int i
= 0; i
< ranksPerChannel
; i
++) {
1575 for (int j
= 0; j
< banksPerRank
; j
++) {
1576 uint16_t bank_id
= i
* banksPerRank
+ j
;
1578 // if we have waiting requests for the bank, and it is
1579 // amongst the first available, update the mask
1580 if (got_waiting
[bank_id
]) {
1581 // make sure this rank is not currently refreshing.
1582 assert(ranks
[i
]->isAvailable());
1583 // simplistic approximation of when the bank can issue
1584 // an activate, ignoring any rank-to-rank switching
1585 // cost in this calculation
1586 Tick act_at
= ranks
[i
]->banks
[j
].openRow
== Bank::NO_ROW
?
1587 std::max(ranks
[i
]->banks
[j
].actAllowedAt
, curTick()) :
1588 std::max(ranks
[i
]->banks
[j
].preAllowedAt
, curTick()) + tRP
;
1590 // When is the earliest the R/W burst can issue?
1591 Tick col_at
= std::max(ranks
[i
]->banks
[j
].colAllowedAt
,
1594 // bank can issue burst back-to-back (seamlessly) with
1596 bool new_seamless_bank
= col_at
<= min_col_at
;
1598 // if we found a new seamless bank or we have no
1599 // seamless banks, and got a bank with an earlier
1600 // activate time, it should be added to the bit mask
1601 if (new_seamless_bank
||
1602 (!found_seamless_bank
&& act_at
<= min_act_at
)) {
1603 // if we did not have a seamless bank before, and
1604 // we do now, reset the bank mask, also reset it
1605 // if we have not yet found a seamless bank and
1606 // the activate time is smaller than what we have
1608 if (!found_seamless_bank
&&
1609 (new_seamless_bank
|| act_at
< min_act_at
)) {
1613 found_seamless_bank
|= new_seamless_bank
;
1615 // ACT can occur 'behind the scenes'
1616 hidden_bank_prep
= act_at
<= hidden_act_max
;
1618 // set the bit corresponding to the available bank
1619 replaceBits(bank_mask
, bank_id
, bank_id
, 1);
1620 min_act_at
= act_at
;
1626 return make_pair(bank_mask
, hidden_bank_prep
);
1629 DRAMCtrl::Rank::Rank(DRAMCtrl
& _memory
, const DRAMCtrlParams
* _p
)
1630 : EventManager(&_memory
), memory(_memory
),
1631 pwrStateTrans(PWR_IDLE
), pwrStatePostRefresh(PWR_IDLE
),
1632 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE
),
1633 refreshState(REF_IDLE
), inLowPowerState(false), rank(0),
1634 readEntries(0), writeEntries(0), outstandingEvents(0),
1635 wakeUpAllowedAt(0), power(_p
, false), numBanksActive(0),
1636 writeDoneEvent(*this), activateEvent(*this), prechargeEvent(*this),
1637 refreshEvent(*this), powerEvent(*this), wakeUpEvent(*this)
1641 DRAMCtrl::Rank::startup(Tick ref_tick
)
1643 assert(ref_tick
> curTick());
1645 pwrStateTick
= curTick();
1647 // kick off the refresh, and give ourselves enough time to
1649 schedule(refreshEvent
, ref_tick
);
1653 DRAMCtrl::Rank::suspend()
1655 deschedule(refreshEvent
);
1660 // don't automatically transition back to LP state after next REF
1661 pwrStatePostRefresh
= PWR_IDLE
;
1665 DRAMCtrl::Rank::lowPowerEntryReady() const
1667 bool no_queued_cmds
= ((memory
.busStateNext
== READ
) && (readEntries
== 0))
1668 || ((memory
.busStateNext
== WRITE
) &&
1669 (writeEntries
== 0));
1671 if (refreshState
== REF_RUN
) {
1672 // have not decremented outstandingEvents for refresh command
1673 // still check if there are no commands queued to force PD
1674 // entry after refresh completes
1675 return no_queued_cmds
;
1677 // ensure no commands in Q and no commands scheduled
1678 return (no_queued_cmds
&& (outstandingEvents
== 0));
1683 DRAMCtrl::Rank::checkDrainDone()
1685 // if this rank was waiting to drain it is now able to proceed to
1687 if (refreshState
== REF_DRAIN
) {
1688 DPRINTF(DRAM
, "Refresh drain done, now precharging\n");
1690 refreshState
= REF_PD_EXIT
;
1692 // hand control back to the refresh event loop
1693 schedule(refreshEvent
, curTick());
1698 DRAMCtrl::Rank::flushCmdList()
1700 // at the moment sort the list of commands and update the counters
1701 // for DRAMPower libray when doing a refresh
1702 sort(cmdList
.begin(), cmdList
.end(), DRAMCtrl::sortTime
);
1704 auto next_iter
= cmdList
.begin();
1705 // push to commands to DRAMPower
1706 for ( ; next_iter
!= cmdList
.end() ; ++next_iter
) {
1707 Command cmd
= *next_iter
;
1708 if (cmd
.timeStamp
<= curTick()) {
1709 // Move all commands at or before curTick to DRAMPower
1710 power
.powerlib
.doCommand(cmd
.type
, cmd
.bank
,
1711 divCeil(cmd
.timeStamp
, memory
.tCK
) -
1712 memory
.timeStampOffset
);
1714 // done - found all commands at or before curTick()
1715 // next_iter references the 1st command after curTick
1719 // reset cmdList to only contain commands after curTick
1720 // if there are no commands after curTick, updated cmdList will be empty
1721 // in this case, next_iter is cmdList.end()
1722 cmdList
.assign(next_iter
, cmdList
.end());
1726 DRAMCtrl::Rank::processActivateEvent()
1728 // we should transition to the active state as soon as any bank is active
1729 if (pwrState
!= PWR_ACT
)
1730 // note that at this point numBanksActive could be back at
1731 // zero again due to a precharge scheduled in the future
1732 schedulePowerEvent(PWR_ACT
, curTick());
1736 DRAMCtrl::Rank::processPrechargeEvent()
1738 // counter should at least indicate one outstanding request
1739 // for this precharge
1740 assert(outstandingEvents
> 0);
1741 // precharge complete, decrement count
1742 --outstandingEvents
;
1744 // if we reached zero, then special conditions apply as we track
1745 // if all banks are precharged for the power models
1746 if (numBanksActive
== 0) {
1747 // no reads to this rank in the Q and no pending
1748 // RD/WR or refresh commands
1749 if (lowPowerEntryReady()) {
1750 // should still be in ACT state since bank still open
1751 assert(pwrState
== PWR_ACT
);
1753 // All banks closed - switch to precharge power down state.
1754 DPRINTF(DRAMState
, "Rank %d sleep at tick %d\n",
1756 powerDownSleep(PWR_PRE_PDN
, curTick());
1758 // we should transition to the idle state when the last bank
1760 schedulePowerEvent(PWR_IDLE
, curTick());
1766 DRAMCtrl::Rank::processWriteDoneEvent()
1768 // counter should at least indicate one outstanding request
1770 assert(outstandingEvents
> 0);
1771 // Write transfer on bus has completed
1772 // decrement per rank counter
1773 --outstandingEvents
;
1777 DRAMCtrl::Rank::processRefreshEvent()
1779 // when first preparing the refresh, remember when it was due
1780 if ((refreshState
== REF_IDLE
) || (refreshState
== REF_SREF_EXIT
)) {
1781 // remember when the refresh is due
1782 refreshDueAt
= curTick();
1785 refreshState
= REF_DRAIN
;
1787 // make nonzero while refresh is pending to ensure
1788 // power down and self-refresh are not entered
1789 ++outstandingEvents
;
1791 DPRINTF(DRAM
, "Refresh due\n");
1794 // let any scheduled read or write to the same rank go ahead,
1795 // after which it will
1796 // hand control back to this event loop
1797 if (refreshState
== REF_DRAIN
) {
1798 // if a request is at the moment being handled and this request is
1799 // accessing the current rank then wait for it to finish
1800 if ((rank
== memory
.activeRank
)
1801 && (memory
.nextReqEvent
.scheduled())) {
1802 // hand control over to the request loop until it is
1804 DPRINTF(DRAM
, "Refresh awaiting draining\n");
1808 refreshState
= REF_PD_EXIT
;
1812 // at this point, ensure that rank is not in a power-down state
1813 if (refreshState
== REF_PD_EXIT
) {
1814 // if rank was sleeping and we have't started exit process,
1815 // wake-up for refresh
1816 if (inLowPowerState
) {
1817 DPRINTF(DRAM
, "Wake Up for refresh\n");
1818 // save state and return after refresh completes
1819 scheduleWakeUpEvent(memory
.tXP
);
1822 refreshState
= REF_PRE
;
1826 // at this point, ensure that all banks are precharged
1827 if (refreshState
== REF_PRE
) {
1828 // precharge any active bank
1829 if (numBanksActive
!= 0) {
1830 // at the moment, we use a precharge all even if there is
1831 // only a single bank open
1832 DPRINTF(DRAM
, "Precharging all\n");
1834 // first determine when we can precharge
1835 Tick pre_at
= curTick();
1837 for (auto &b
: banks
) {
1838 // respect both causality and any existing bank
1839 // constraints, some banks could already have a
1840 // (auto) precharge scheduled
1841 pre_at
= std::max(b
.preAllowedAt
, pre_at
);
1844 // make sure all banks per rank are precharged, and for those that
1845 // already are, update their availability
1846 Tick act_allowed_at
= pre_at
+ memory
.tRP
;
1848 for (auto &b
: banks
) {
1849 if (b
.openRow
!= Bank::NO_ROW
) {
1850 memory
.prechargeBank(*this, b
, pre_at
, false);
1852 b
.actAllowedAt
= std::max(b
.actAllowedAt
, act_allowed_at
);
1853 b
.preAllowedAt
= std::max(b
.preAllowedAt
, pre_at
);
1857 // precharge all banks in rank
1858 cmdList
.push_back(Command(MemCommand::PREA
, 0, pre_at
));
1860 DPRINTF(DRAMPower
, "%llu,PREA,0,%d\n",
1861 divCeil(pre_at
, memory
.tCK
) -
1862 memory
.timeStampOffset
, rank
);
1863 } else if ((pwrState
== PWR_IDLE
) && (outstandingEvents
== 1)) {
1864 // Banks are closed, have transitioned to IDLE state, and
1865 // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1866 DPRINTF(DRAM
, "All banks already precharged, starting refresh\n");
1868 // go ahead and kick the power state machine into gear since
1869 // we are already idle
1870 schedulePowerEvent(PWR_REF
, curTick());
1872 // banks state is closed but haven't transitioned pwrState to IDLE
1873 // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1874 // should have outstanding precharge event in this case
1875 assert(prechargeEvent
.scheduled());
1876 // will start refresh when pwrState transitions to IDLE
1879 assert(numBanksActive
== 0);
1881 // wait for all banks to be precharged, at which point the
1882 // power state machine will transition to the idle state, and
1883 // automatically move to a refresh, at that point it will also
1884 // call this method to get the refresh event loop going again
1888 // last but not least we perform the actual refresh
1889 if (refreshState
== REF_START
) {
1890 // should never get here with any banks active
1891 assert(numBanksActive
== 0);
1892 assert(pwrState
== PWR_REF
);
1894 Tick ref_done_at
= curTick() + memory
.tRFC
;
1896 for (auto &b
: banks
) {
1897 b
.actAllowedAt
= ref_done_at
;
1900 // at the moment this affects all ranks
1901 cmdList
.push_back(Command(MemCommand::REF
, 0, curTick()));
1906 DPRINTF(DRAMPower
, "%llu,REF,0,%d\n", divCeil(curTick(), memory
.tCK
) -
1907 memory
.timeStampOffset
, rank
);
1909 // Update for next refresh
1910 refreshDueAt
+= memory
.tREFI
;
1912 // make sure we did not wait so long that we cannot make up
1914 if (refreshDueAt
< ref_done_at
) {
1915 fatal("Refresh was delayed so long we cannot catch up\n");
1918 // Run the refresh and schedule event to transition power states
1919 // when refresh completes
1920 refreshState
= REF_RUN
;
1921 schedule(refreshEvent
, ref_done_at
);
1925 if (refreshState
== REF_RUN
) {
1926 // should never get here with any banks active
1927 assert(numBanksActive
== 0);
1928 assert(pwrState
== PWR_REF
);
1930 assert(!powerEvent
.scheduled());
1932 if ((memory
.drainState() == DrainState::Draining
) ||
1933 (memory
.drainState() == DrainState::Drained
)) {
1934 // if draining, do not re-enter low-power mode.
1935 // simply go to IDLE and wait
1936 schedulePowerEvent(PWR_IDLE
, curTick());
1938 // At the moment, we sleep when the refresh ends and wait to be
1939 // woken up again if previously in a low-power state.
1940 if (pwrStatePostRefresh
!= PWR_IDLE
) {
1941 // power State should be power Refresh
1942 assert(pwrState
== PWR_REF
);
1943 DPRINTF(DRAMState
, "Rank %d sleeping after refresh and was in "
1944 "power state %d before refreshing\n", rank
,
1945 pwrStatePostRefresh
);
1946 powerDownSleep(pwrState
, curTick());
1948 // Force PRE power-down if there are no outstanding commands
1949 // in Q after refresh.
1950 } else if (lowPowerEntryReady()) {
1951 DPRINTF(DRAMState
, "Rank %d sleeping after refresh but was NOT"
1952 " in a low power state before refreshing\n", rank
);
1953 powerDownSleep(PWR_PRE_PDN
, curTick());
1956 // move to the idle power state once the refresh is done, this
1957 // will also move the refresh state machine to the refresh
1959 schedulePowerEvent(PWR_IDLE
, curTick());
1963 // if transitioning to self refresh do not schedule a new refresh;
1964 // when waking from self refresh, a refresh is scheduled again.
1965 if (pwrStateTrans
!= PWR_SREF
) {
1966 // compensate for the delay in actually performing the refresh
1967 // when scheduling the next one
1968 schedule(refreshEvent
, refreshDueAt
- memory
.tRP
);
1970 DPRINTF(DRAMState
, "Refresh done at %llu and next refresh"
1971 " at %llu\n", curTick(), refreshDueAt
);
1977 DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state
, Tick tick
)
1979 // respect causality
1980 assert(tick
>= curTick());
1982 if (!powerEvent
.scheduled()) {
1983 DPRINTF(DRAMState
, "Scheduling power event at %llu to state %d\n",
1986 // insert the new transition
1987 pwrStateTrans
= pwr_state
;
1989 schedule(powerEvent
, tick
);
1991 panic("Scheduled power event at %llu to state %d, "
1992 "with scheduled event at %llu to %d\n", tick
, pwr_state
,
1993 powerEvent
.when(), pwrStateTrans
);
1998 DRAMCtrl::Rank::powerDownSleep(PowerState pwr_state
, Tick tick
)
2000 // if low power state is active low, schedule to active low power state.
2001 // in reality tCKE is needed to enter active low power. This is neglected
2002 // here and could be added in the future.
2003 if (pwr_state
== PWR_ACT_PDN
) {
2004 schedulePowerEvent(pwr_state
, tick
);
2005 // push command to DRAMPower
2006 cmdList
.push_back(Command(MemCommand::PDN_F_ACT
, 0, tick
));
2007 DPRINTF(DRAMPower
, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick
,
2008 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2009 } else if (pwr_state
== PWR_PRE_PDN
) {
2010 // if low power state is precharge low, schedule to precharge low
2011 // power state. In reality tCKE is needed to enter active low power.
2012 // This is neglected here.
2013 schedulePowerEvent(pwr_state
, tick
);
2014 //push Command to DRAMPower
2015 cmdList
.push_back(Command(MemCommand::PDN_F_PRE
, 0, tick
));
2016 DPRINTF(DRAMPower
, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick
,
2017 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2018 } else if (pwr_state
== PWR_REF
) {
2019 // if a refresh just occured
2020 // transition to PRE_PDN now that all banks are closed
2021 // do not transition to SREF if commands are in Q; stay in PRE_PDN
2022 if (pwrStatePostRefresh
== PWR_ACT_PDN
|| !lowPowerEntryReady()) {
2023 // prechage power down requires tCKE to enter. For simplicity
2024 // this is not considered.
2025 schedulePowerEvent(PWR_PRE_PDN
, tick
);
2026 //push Command to DRAMPower
2027 cmdList
.push_back(Command(MemCommand::PDN_F_PRE
, 0, tick
));
2028 DPRINTF(DRAMPower
, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick
,
2029 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2031 // last low power State was power precharge
2032 assert(pwrStatePostRefresh
== PWR_PRE_PDN
);
2033 // self refresh requires time tCKESR to enter. For simplicity,
2034 // this is not considered.
2035 schedulePowerEvent(PWR_SREF
, tick
);
2036 // push Command to DRAMPower
2037 cmdList
.push_back(Command(MemCommand::SREN
, 0, tick
));
2038 DPRINTF(DRAMPower
, "%llu,SREN,0,%d\n", divCeil(tick
,
2039 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2042 // Ensure that we don't power-down and back up in same tick
2043 // Once we commit to PD entry, do it and wait for at least 1tCK
2044 // This could be replaced with tCKE if/when that is added to the model
2045 wakeUpAllowedAt
= tick
+ memory
.tCK
;
2047 // Transitioning to a low power state, set flag
2048 inLowPowerState
= true;
2052 DRAMCtrl::Rank::scheduleWakeUpEvent(Tick exit_delay
)
2054 Tick wake_up_tick
= std::max(curTick(), wakeUpAllowedAt
);
2056 DPRINTF(DRAMState
, "Scheduling wake-up for rank %d at tick %d\n",
2057 rank
, wake_up_tick
);
2059 // if waking for refresh, hold previous state
2060 // else reset state back to IDLE
2061 if (refreshState
== REF_PD_EXIT
) {
2062 pwrStatePostRefresh
= pwrState
;
2064 // don't automatically transition back to LP state after next REF
2065 pwrStatePostRefresh
= PWR_IDLE
;
2068 // schedule wake-up with event to ensure entry has completed before
2069 // we try to wake-up
2070 schedule(wakeUpEvent
, wake_up_tick
);
2072 for (auto &b
: banks
) {
2073 // respect both causality and any existing bank
2074 // constraints, some banks could already have a
2075 // (auto) precharge scheduled
2076 b
.colAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.colAllowedAt
);
2077 b
.preAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.preAllowedAt
);
2078 b
.actAllowedAt
= std::max(wake_up_tick
+ exit_delay
, b
.actAllowedAt
);
2080 // Transitioning out of low power state, clear flag
2081 inLowPowerState
= false;
2083 // push to DRAMPower
2084 // use pwrStateTrans for cases where we have a power event scheduled
2085 // to enter low power that has not yet been processed
2086 if (pwrStateTrans
== PWR_ACT_PDN
) {
2087 cmdList
.push_back(Command(MemCommand::PUP_ACT
, 0, wake_up_tick
));
2088 DPRINTF(DRAMPower
, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick
,
2089 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2091 } else if (pwrStateTrans
== PWR_PRE_PDN
) {
2092 cmdList
.push_back(Command(MemCommand::PUP_PRE
, 0, wake_up_tick
));
2093 DPRINTF(DRAMPower
, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick
,
2094 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2095 } else if (pwrStateTrans
== PWR_SREF
) {
2096 cmdList
.push_back(Command(MemCommand::SREX
, 0, wake_up_tick
));
2097 DPRINTF(DRAMPower
, "%llu,SREX,0,%d\n", divCeil(wake_up_tick
,
2098 memory
.tCK
) - memory
.timeStampOffset
, rank
);
2103 DRAMCtrl::Rank::processWakeUpEvent()
2105 // Should be in a power-down or self-refresh state
2106 assert((pwrState
== PWR_ACT_PDN
) || (pwrState
== PWR_PRE_PDN
) ||
2107 (pwrState
== PWR_SREF
));
2109 // Check current state to determine transition state
2110 if (pwrState
== PWR_ACT_PDN
) {
2111 // banks still open, transition to PWR_ACT
2112 schedulePowerEvent(PWR_ACT
, curTick());
2114 // transitioning from a precharge power-down or self-refresh state
2115 // banks are closed - transition to PWR_IDLE
2116 schedulePowerEvent(PWR_IDLE
, curTick());
2121 DRAMCtrl::Rank::processPowerEvent()
2123 assert(curTick() >= pwrStateTick
);
2124 // remember where we were, and for how long
2125 Tick duration
= curTick() - pwrStateTick
;
2126 PowerState prev_state
= pwrState
;
2128 // update the accounting
2129 pwrStateTime
[prev_state
] += duration
;
2131 // track to total idle time
2132 if ((prev_state
== PWR_PRE_PDN
) || (prev_state
== PWR_ACT_PDN
) ||
2133 (prev_state
== PWR_SREF
)) {
2134 totalIdleTime
+= duration
;
2137 pwrState
= pwrStateTrans
;
2138 pwrStateTick
= curTick();
2140 // if rank was refreshing, make sure to start scheduling requests again
2141 if (prev_state
== PWR_REF
) {
2142 // bus IDLED prior to REF
2143 // counter should be one for refresh command only
2144 assert(outstandingEvents
== 1);
2145 // REF complete, decrement count
2146 --outstandingEvents
;
2148 DPRINTF(DRAMState
, "Was refreshing for %llu ticks\n", duration
);
2149 // if sleeping after refresh
2150 if (pwrState
!= PWR_IDLE
) {
2151 assert((pwrState
== PWR_PRE_PDN
) || (pwrState
== PWR_SREF
));
2152 DPRINTF(DRAMState
, "Switching to power down state after refreshing"
2153 " rank %d at %llu tick\n", rank
, curTick());
2155 if (pwrState
!= PWR_SREF
) {
2156 // rank is not available in SREF
2157 // don't transition to IDLE in this case
2158 refreshState
= REF_IDLE
;
2160 // a request event could be already scheduled by the state
2161 // machine of the other rank
2162 if (!memory
.nextReqEvent
.scheduled()) {
2163 DPRINTF(DRAM
, "Scheduling next request after refreshing rank %d\n",
2165 schedule(memory
.nextReqEvent
, curTick());
2167 } else if (pwrState
== PWR_ACT
) {
2168 if (refreshState
== REF_PD_EXIT
) {
2169 // kick the refresh event loop into action again
2170 assert(prev_state
== PWR_ACT_PDN
);
2172 // go back to REF event and close banks
2173 refreshState
= REF_PRE
;
2174 schedule(refreshEvent
, curTick());
2176 } else if (pwrState
== PWR_IDLE
) {
2177 DPRINTF(DRAMState
, "All banks precharged\n");
2178 if (prev_state
== PWR_SREF
) {
2179 // set refresh state to REF_SREF_EXIT, ensuring isAvailable
2180 // continues to return false during tXS after SREF exit
2181 // Schedule a refresh which kicks things back into action
2183 refreshState
= REF_SREF_EXIT
;
2184 schedule(refreshEvent
, curTick() + memory
.tXS
);
2186 // if we have a pending refresh, and are now moving to
2187 // the idle state, directly transition to a refresh
2188 if ((refreshState
== REF_PRE
) || (refreshState
== REF_PD_EXIT
)) {
2189 // ensure refresh is restarted only after final PRE command.
2190 // do not restart refresh if controller is in an intermediate
2191 // state, after PRE_PDN exit, when banks are IDLE but an
2192 // ACT is scheduled.
2193 if (!activateEvent
.scheduled()) {
2194 // there should be nothing waiting at this point
2195 assert(!powerEvent
.scheduled());
2196 // update the state in zero time and proceed below
2199 // must have PRE scheduled to transition back to IDLE
2200 // and re-kick off refresh
2201 assert(prechargeEvent
.scheduled());
2207 // we transition to the refresh state, let the refresh state
2208 // machine know of this state update and let it deal with the
2209 // scheduling of the next power state transition as well as the
2210 // following refresh
2211 if (pwrState
== PWR_REF
) {
2212 assert(refreshState
== REF_PRE
|| refreshState
== REF_PD_EXIT
);
2213 DPRINTF(DRAMState
, "Refreshing\n");
2215 // kick the refresh event loop into action again, and that
2216 // in turn will schedule a transition to the idle power
2217 // state once the refresh is done
2218 if (refreshState
== REF_PD_EXIT
) {
2219 // Wait for PD exit timing to complete before issuing REF
2220 schedule(refreshEvent
, curTick() + memory
.tXP
);
2222 schedule(refreshEvent
, curTick());
2224 // Banks transitioned to IDLE, start REF
2225 refreshState
= REF_START
;
2230 DRAMCtrl::Rank::updatePowerStats()
2232 // All commands up to refresh have completed
2233 // flush cmdList to DRAMPower
2236 // update the counters for DRAMPower, passing false to
2237 // indicate that this is not the last command in the
2238 // list. DRAMPower requires this information for the
2239 // correct calculation of the background energy at the end
2240 // of the simulation. Ideally we would want to call this
2241 // function with true once at the end of the
2242 // simulation. However, the discarded energy is extremly
2243 // small and does not effect the final results.
2244 power
.powerlib
.updateCounters(false);
2246 // call the energy function
2247 power
.powerlib
.calcEnergy();
2249 // Get the energy and power from DRAMPower
2250 Data::MemoryPowerModel::Energy energy
=
2251 power
.powerlib
.getEnergy();
2252 Data::MemoryPowerModel::Power rank_power
=
2253 power
.powerlib
.getPower();
2255 actEnergy
= energy
.act_energy
* memory
.devicesPerRank
;
2256 preEnergy
= energy
.pre_energy
* memory
.devicesPerRank
;
2257 readEnergy
= energy
.read_energy
* memory
.devicesPerRank
;
2258 writeEnergy
= energy
.write_energy
* memory
.devicesPerRank
;
2259 refreshEnergy
= energy
.ref_energy
* memory
.devicesPerRank
;
2260 actBackEnergy
= energy
.act_stdby_energy
* memory
.devicesPerRank
;
2261 preBackEnergy
= energy
.pre_stdby_energy
* memory
.devicesPerRank
;
2262 actPowerDownEnergy
= energy
.f_act_pd_energy
* memory
.devicesPerRank
;
2263 prePowerDownEnergy
= energy
.f_pre_pd_energy
* memory
.devicesPerRank
;
2264 selfRefreshEnergy
= energy
.sref_energy
* memory
.devicesPerRank
;
2265 totalEnergy
= energy
.total_energy
* memory
.devicesPerRank
;
2266 averagePower
= rank_power
.average_power
* memory
.devicesPerRank
;
2270 DRAMCtrl::Rank::computeStats()
2272 DPRINTF(DRAM
,"Computing final stats\n");
2274 // Force DRAM power to update counters based on time spent in
2275 // current state up to curTick()
2276 cmdList
.push_back(Command(MemCommand::NOP
, 0, curTick()));
2281 // final update of power state times
2282 pwrStateTime
[pwrState
] += (curTick() - pwrStateTick
);
2283 pwrStateTick
= curTick();
2288 DRAMCtrl::Rank::regStats()
2290 using namespace Stats
;
2294 .name(name() + ".memoryStateTime")
2295 .desc("Time in different power states");
2296 pwrStateTime
.subname(0, "IDLE");
2297 pwrStateTime
.subname(1, "REF");
2298 pwrStateTime
.subname(2, "SREF");
2299 pwrStateTime
.subname(3, "PRE_PDN");
2300 pwrStateTime
.subname(4, "ACT");
2301 pwrStateTime
.subname(5, "ACT_PDN");
2304 .name(name() + ".actEnergy")
2305 .desc("Energy for activate commands per rank (pJ)");
2308 .name(name() + ".preEnergy")
2309 .desc("Energy for precharge commands per rank (pJ)");
2312 .name(name() + ".readEnergy")
2313 .desc("Energy for read commands per rank (pJ)");
2316 .name(name() + ".writeEnergy")
2317 .desc("Energy for write commands per rank (pJ)");
2320 .name(name() + ".refreshEnergy")
2321 .desc("Energy for refresh commands per rank (pJ)");
2324 .name(name() + ".actBackEnergy")
2325 .desc("Energy for active background per rank (pJ)");
2328 .name(name() + ".preBackEnergy")
2329 .desc("Energy for precharge background per rank (pJ)");
2332 .name(name() + ".actPowerDownEnergy")
2333 .desc("Energy for active power-down per rank (pJ)");
2336 .name(name() + ".prePowerDownEnergy")
2337 .desc("Energy for precharge power-down per rank (pJ)");
2340 .name(name() + ".selfRefreshEnergy")
2341 .desc("Energy for self refresh per rank (pJ)");
2344 .name(name() + ".totalEnergy")
2345 .desc("Total energy per rank (pJ)");
2348 .name(name() + ".averagePower")
2349 .desc("Core power per rank (mW)");
2352 .name(name() + ".totalIdleTime")
2353 .desc("Total Idle time Per DRAM Rank");
2355 registerDumpCallback(new RankDumpCallback(this));
2358 DRAMCtrl::regStats()
2360 using namespace Stats
;
2362 AbstractMemory::regStats();
2364 for (auto r
: ranks
) {
2369 .name(name() + ".readReqs")
2370 .desc("Number of read requests accepted");
2373 .name(name() + ".writeReqs")
2374 .desc("Number of write requests accepted");
2377 .name(name() + ".readBursts")
2378 .desc("Number of DRAM read bursts, "
2379 "including those serviced by the write queue");
2382 .name(name() + ".writeBursts")
2383 .desc("Number of DRAM write bursts, "
2384 "including those merged in the write queue");
2387 .name(name() + ".servicedByWrQ")
2388 .desc("Number of DRAM read bursts serviced by the write queue");
2391 .name(name() + ".mergedWrBursts")
2392 .desc("Number of DRAM write bursts merged with an existing one");
2395 .name(name() + ".neitherReadNorWriteReqs")
2396 .desc("Number of requests that are neither read nor write");
2399 .init(banksPerRank
* ranksPerChannel
)
2400 .name(name() + ".perBankRdBursts")
2401 .desc("Per bank write bursts");
2404 .init(banksPerRank
* ranksPerChannel
)
2405 .name(name() + ".perBankWrBursts")
2406 .desc("Per bank write bursts");
2409 .name(name() + ".avgRdQLen")
2410 .desc("Average read queue length when enqueuing")
2414 .name(name() + ".avgWrQLen")
2415 .desc("Average write queue length when enqueuing")
2419 .name(name() + ".totQLat")
2420 .desc("Total ticks spent queuing");
2423 .name(name() + ".totBusLat")
2424 .desc("Total ticks spent in databus transfers");
2427 .name(name() + ".totMemAccLat")
2428 .desc("Total ticks spent from burst creation until serviced "
2432 .name(name() + ".avgQLat")
2433 .desc("Average queueing delay per DRAM burst")
2436 avgQLat
= totQLat
/ (readBursts
- servicedByWrQ
);
2439 .name(name() + ".avgBusLat")
2440 .desc("Average bus latency per DRAM burst")
2443 avgBusLat
= totBusLat
/ (readBursts
- servicedByWrQ
);
2446 .name(name() + ".avgMemAccLat")
2447 .desc("Average memory access latency per DRAM burst")
2450 avgMemAccLat
= totMemAccLat
/ (readBursts
- servicedByWrQ
);
2453 .name(name() + ".numRdRetry")
2454 .desc("Number of times read queue was full causing retry");
2457 .name(name() + ".numWrRetry")
2458 .desc("Number of times write queue was full causing retry");
2461 .name(name() + ".readRowHits")
2462 .desc("Number of row buffer hits during reads");
2465 .name(name() + ".writeRowHits")
2466 .desc("Number of row buffer hits during writes");
2469 .name(name() + ".readRowHitRate")
2470 .desc("Row buffer hit rate for reads")
2473 readRowHitRate
= (readRowHits
/ (readBursts
- servicedByWrQ
)) * 100;
2476 .name(name() + ".writeRowHitRate")
2477 .desc("Row buffer hit rate for writes")
2480 writeRowHitRate
= (writeRowHits
/ (writeBursts
- mergedWrBursts
)) * 100;
2483 .init(ceilLog2(burstSize
) + 1)
2484 .name(name() + ".readPktSize")
2485 .desc("Read request sizes (log2)");
2488 .init(ceilLog2(burstSize
) + 1)
2489 .name(name() + ".writePktSize")
2490 .desc("Write request sizes (log2)");
2493 .init(readBufferSize
)
2494 .name(name() + ".rdQLenPdf")
2495 .desc("What read queue length does an incoming req see");
2498 .init(writeBufferSize
)
2499 .name(name() + ".wrQLenPdf")
2500 .desc("What write queue length does an incoming req see");
2503 .init(maxAccessesPerRow
)
2504 .name(name() + ".bytesPerActivate")
2505 .desc("Bytes accessed per row activation")
2509 .init(readBufferSize
)
2510 .name(name() + ".rdPerTurnAround")
2511 .desc("Reads before turning the bus around for writes")
2515 .init(writeBufferSize
)
2516 .name(name() + ".wrPerTurnAround")
2517 .desc("Writes before turning the bus around for reads")
2521 .name(name() + ".bytesReadDRAM")
2522 .desc("Total number of bytes read from DRAM");
2525 .name(name() + ".bytesReadWrQ")
2526 .desc("Total number of bytes read from write queue");
2529 .name(name() + ".bytesWritten")
2530 .desc("Total number of bytes written to DRAM");
2533 .name(name() + ".bytesReadSys")
2534 .desc("Total read bytes from the system interface side");
2537 .name(name() + ".bytesWrittenSys")
2538 .desc("Total written bytes from the system interface side");
2541 .name(name() + ".avgRdBW")
2542 .desc("Average DRAM read bandwidth in MiByte/s")
2545 avgRdBW
= (bytesReadDRAM
/ 1000000) / simSeconds
;
2548 .name(name() + ".avgWrBW")
2549 .desc("Average achieved write bandwidth in MiByte/s")
2552 avgWrBW
= (bytesWritten
/ 1000000) / simSeconds
;
2555 .name(name() + ".avgRdBWSys")
2556 .desc("Average system read bandwidth in MiByte/s")
2559 avgRdBWSys
= (bytesReadSys
/ 1000000) / simSeconds
;
2562 .name(name() + ".avgWrBWSys")
2563 .desc("Average system write bandwidth in MiByte/s")
2566 avgWrBWSys
= (bytesWrittenSys
/ 1000000) / simSeconds
;
2569 .name(name() + ".peakBW")
2570 .desc("Theoretical peak bandwidth in MiByte/s")
2573 peakBW
= (SimClock::Frequency
/ tBURST
) * burstSize
/ 1000000;
2576 .name(name() + ".busUtil")
2577 .desc("Data bus utilization in percentage")
2579 busUtil
= (avgRdBW
+ avgWrBW
) / peakBW
* 100;
2582 .name(name() + ".totGap")
2583 .desc("Total gap between requests");
2586 .name(name() + ".avgGap")
2587 .desc("Average gap between requests")
2590 avgGap
= totGap
/ (readReqs
+ writeReqs
);
2592 // Stats for DRAM Power calculation based on Micron datasheet
2594 .name(name() + ".busUtilRead")
2595 .desc("Data bus utilization in percentage for reads")
2598 busUtilRead
= avgRdBW
/ peakBW
* 100;
2601 .name(name() + ".busUtilWrite")
2602 .desc("Data bus utilization in percentage for writes")
2605 busUtilWrite
= avgWrBW
/ peakBW
* 100;
2608 .name(name() + ".pageHitRate")
2609 .desc("Row buffer hit rate, read and write combined")
2612 pageHitRate
= (writeRowHits
+ readRowHits
) /
2613 (writeBursts
- mergedWrBursts
+ readBursts
- servicedByWrQ
) * 100;
2617 DRAMCtrl::recvFunctional(PacketPtr pkt
)
2619 // rely on the abstract memory
2620 functionalAccess(pkt
);
2624 DRAMCtrl::getSlavePort(const string
&if_name
, PortID idx
)
2626 if (if_name
!= "port") {
2627 return MemObject::getSlavePort(if_name
, idx
);
2636 // if there is anything in any of our internal queues, keep track
2638 if (!(writeQueue
.empty() && readQueue
.empty() && respQueue
.empty() &&
2639 allRanksDrained())) {
2641 DPRINTF(Drain
, "DRAM controller not drained, write: %d, read: %d,"
2642 " resp: %d\n", writeQueue
.size(), readQueue
.size(),
2645 // the only queue that is not drained automatically over time
2646 // is the write queue, thus kick things into action if needed
2647 if (!writeQueue
.empty() && !nextReqEvent
.scheduled()) {
2648 schedule(nextReqEvent
, curTick());
2651 // also need to kick off events to exit self-refresh
2652 for (auto r
: ranks
) {
2653 // force self-refresh exit, which in turn will issue auto-refresh
2654 if (r
->pwrState
== PWR_SREF
) {
2655 DPRINTF(DRAM
,"Rank%d: Forcing self-refresh wakeup in drain\n",
2657 r
->scheduleWakeUpEvent(tXS
);
2661 return DrainState::Draining
;
2663 return DrainState::Drained
;
2668 DRAMCtrl::allRanksDrained() const
2670 // true until proven false
2671 bool all_ranks_drained
= true;
2672 for (auto r
: ranks
) {
2673 // then verify that the power state is IDLE
2674 // ensuring all banks are closed and rank is not in a low power state
2675 all_ranks_drained
= r
->inPwrIdleState() && all_ranks_drained
;
2677 return all_ranks_drained
;
2681 DRAMCtrl::drainResume()
2683 if (!isTimingMode
&& system()->isTimingMode()) {
2684 // if we switched to timing mode, kick things into action,
2685 // and behave as if we restored from a checkpoint
2687 } else if (isTimingMode
&& !system()->isTimingMode()) {
2688 // if we switch from timing mode, stop the refresh events to
2689 // not cause issues with KVM
2690 for (auto r
: ranks
) {
2696 isTimingMode
= system()->isTimingMode();
2699 DRAMCtrl::MemoryPort::MemoryPort(const std::string
& name
, DRAMCtrl
& _memory
)
2700 : QueuedSlavePort(name
, &_memory
, queue
), queue(_memory
, *this),
2705 DRAMCtrl::MemoryPort::getAddrRanges() const
2707 AddrRangeList ranges
;
2708 ranges
.push_back(memory
.getAddrRange());
2713 DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt
)
2715 pkt
->pushLabel(memory
.name());
2717 if (!queue
.checkFunctional(pkt
)) {
2718 // Default implementation of SimpleTimingPort::recvFunctional()
2719 // calls recvAtomic() and throws away the latency; we can save a
2720 // little here by just not calculating the latency.
2721 memory
.recvFunctional(pkt
);
2728 DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt
)
2730 return memory
.recvAtomic(pkt
);
2734 DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt
)
2736 // pass it to the memory controller
2737 return memory
.recvTimingReq(pkt
);
2741 DRAMCtrlParams::create()
2743 return new DRAMCtrl(this);