2 * Copyright (c) 2010-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Andreas Hansson
45 #include "base/bitfield.hh"
46 #include "base/trace.hh"
47 #include "debug/DRAM.hh"
48 #include "debug/Drain.hh"
49 #include "mem/dram_ctrl.hh"
50 #include "sim/system.hh"
54 DRAMCtrl::DRAMCtrl(const DRAMCtrlParams
* p
) :
56 port(name() + ".port", *this),
57 retryRdReq(false), retryWrReq(false),
58 rowHitFlag(false), busState(READ
),
59 respondEvent(this), refreshEvent(this),
60 nextReqEvent(this), drainManager(NULL
),
61 deviceBusWidth(p
->device_bus_width
), burstLength(p
->burst_length
),
62 deviceRowBufferSize(p
->device_rowbuffer_size
),
63 devicesPerRank(p
->devices_per_rank
),
64 burstSize((devicesPerRank
* burstLength
* deviceBusWidth
) / 8),
65 rowBufferSize(devicesPerRank
* deviceRowBufferSize
),
66 columnsPerRowBuffer(rowBufferSize
/ burstSize
),
67 ranksPerChannel(p
->ranks_per_channel
),
68 banksPerRank(p
->banks_per_rank
), channels(p
->channels
), rowsPerBank(0),
69 readBufferSize(p
->read_buffer_size
),
70 writeBufferSize(p
->write_buffer_size
),
71 writeHighThreshold(writeBufferSize
* p
->write_high_thresh_perc
/ 100.0),
72 writeLowThreshold(writeBufferSize
* p
->write_low_thresh_perc
/ 100.0),
73 minWritesPerSwitch(p
->min_writes_per_switch
),
74 writesThisTime(0), readsThisTime(0),
75 tWTR(p
->tWTR
), tRTW(p
->tRTW
), tBURST(p
->tBURST
),
76 tRCD(p
->tRCD
), tCL(p
->tCL
), tRP(p
->tRP
), tRAS(p
->tRAS
),
77 tRFC(p
->tRFC
), tREFI(p
->tREFI
), tRRD(p
->tRRD
),
78 tXAW(p
->tXAW
), activationLimit(p
->activation_limit
),
79 memSchedPolicy(p
->mem_sched_policy
), addrMapping(p
->addr_mapping
),
80 pageMgmt(p
->page_policy
),
81 maxAccessesPerRow(p
->max_accesses_per_row
),
82 frontendLatency(p
->static_frontend_latency
),
83 backendLatency(p
->static_backend_latency
),
84 busBusyUntil(0), refreshDueAt(0), refreshState(REF_IDLE
), prevArrival(0),
85 nextReqTime(0), idleStartTick(0), numBanksActive(0)
87 // create the bank states based on the dimensions of the ranks and
89 banks
.resize(ranksPerChannel
);
90 actTicks
.resize(ranksPerChannel
);
91 for (size_t c
= 0; c
< ranksPerChannel
; ++c
) {
92 banks
[c
].resize(banksPerRank
);
93 actTicks
[c
].resize(activationLimit
, 0);
96 // perform a basic check of the write thresholds
97 if (p
->write_low_thresh_perc
>= p
->write_high_thresh_perc
)
98 fatal("Write buffer low threshold %d must be smaller than the "
99 "high threshold %d\n", p
->write_low_thresh_perc
,
100 p
->write_high_thresh_perc
);
102 // determine the rows per bank by looking at the total capacity
103 uint64_t capacity
= ULL(1) << ceilLog2(AbstractMemory::size());
105 DPRINTF(DRAM
, "Memory capacity %lld (%lld) bytes\n", capacity
,
106 AbstractMemory::size());
108 DPRINTF(DRAM
, "Row buffer size %d bytes with %d columns per row buffer\n",
109 rowBufferSize
, columnsPerRowBuffer
);
111 rowsPerBank
= capacity
/ (rowBufferSize
* banksPerRank
* ranksPerChannel
);
113 if (range
.interleaved()) {
114 if (channels
!= range
.stripes())
115 fatal("%s has %d interleaved address stripes but %d channel(s)\n",
116 name(), range
.stripes(), channels
);
118 if (addrMapping
== Enums::RoRaBaChCo
) {
119 if (rowBufferSize
!= range
.granularity()) {
120 fatal("Interleaving of %s doesn't match RoRaBaChCo "
121 "address map\n", name());
123 } else if (addrMapping
== Enums::RoRaBaCoCh
) {
124 if (system()->cacheLineSize() != range
.granularity()) {
125 fatal("Interleaving of %s doesn't match RoRaBaCoCh "
126 "address map\n", name());
128 } else if (addrMapping
== Enums::RoCoRaBaCh
) {
129 if (system()->cacheLineSize() != range
.granularity())
130 fatal("Interleaving of %s doesn't match RoCoRaBaCh "
131 "address map\n", name());
135 // some basic sanity checks
136 if (tREFI
<= tRP
|| tREFI
<= tRFC
) {
137 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
145 if (!port
.isConnected()) {
146 fatal("DRAMCtrl %s is unconnected!\n", name());
148 port
.sendRangeChange();
155 // update the start tick for the precharge accounting to the
157 idleStartTick
= curTick();
159 // shift the bus busy time sufficiently far ahead that we never
160 // have to worry about negative values when computing the time for
161 // the next request, this will add an insignificant bubble at the
162 // start of simulation
163 busBusyUntil
= curTick() + tRP
+ tRCD
+ tCL
;
165 // print the configuration of the controller
168 // kick off the refresh, and give ourselves enough time to
170 schedule(refreshEvent
, curTick() + tREFI
- tRP
);
174 DRAMCtrl::recvAtomic(PacketPtr pkt
)
176 DPRINTF(DRAM
, "recvAtomic: %s 0x%x\n", pkt
->cmdString(), pkt
->getAddr());
178 // do the actual memory access and turn the packet into a response
182 if (!pkt
->memInhibitAsserted() && pkt
->hasData()) {
183 // this value is not supposed to be accurate, just enough to
184 // keep things going, mimic a closed page
185 latency
= tRP
+ tRCD
+ tCL
;
191 DRAMCtrl::readQueueFull(unsigned int neededEntries
) const
193 DPRINTF(DRAM
, "Read queue limit %d, current size %d, entries needed %d\n",
194 readBufferSize
, readQueue
.size() + respQueue
.size(),
198 (readQueue
.size() + respQueue
.size() + neededEntries
) > readBufferSize
;
202 DRAMCtrl::writeQueueFull(unsigned int neededEntries
) const
204 DPRINTF(DRAM
, "Write queue limit %d, current size %d, entries needed %d\n",
205 writeBufferSize
, writeQueue
.size(), neededEntries
);
206 return (writeQueue
.size() + neededEntries
) > writeBufferSize
;
209 DRAMCtrl::DRAMPacket
*
210 DRAMCtrl::decodeAddr(PacketPtr pkt
, Addr dramPktAddr
, unsigned size
,
213 // decode the address based on the address mapping scheme, with
214 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
215 // channel, respectively
220 // truncate the address to the access granularity
221 Addr addr
= dramPktAddr
/ burstSize
;
223 // we have removed the lowest order address bits that denote the
224 // position within the column
225 if (addrMapping
== Enums::RoRaBaChCo
) {
226 // the lowest order bits denote the column to ensure that
227 // sequential cache lines occupy the same row
228 addr
= addr
/ columnsPerRowBuffer
;
230 // take out the channel part of the address
231 addr
= addr
/ channels
;
233 // after the channel bits, get the bank bits to interleave
235 bank
= addr
% banksPerRank
;
236 addr
= addr
/ banksPerRank
;
238 // after the bank, we get the rank bits which thus interleaves
240 rank
= addr
% ranksPerChannel
;
241 addr
= addr
/ ranksPerChannel
;
243 // lastly, get the row bits
244 row
= addr
% rowsPerBank
;
245 addr
= addr
/ rowsPerBank
;
246 } else if (addrMapping
== Enums::RoRaBaCoCh
) {
247 // take out the channel part of the address
248 addr
= addr
/ channels
;
251 addr
= addr
/ columnsPerRowBuffer
;
253 // after the column bits, we get the bank bits to interleave
255 bank
= addr
% banksPerRank
;
256 addr
= addr
/ banksPerRank
;
258 // after the bank, we get the rank bits which thus interleaves
260 rank
= addr
% ranksPerChannel
;
261 addr
= addr
/ ranksPerChannel
;
263 // lastly, get the row bits
264 row
= addr
% rowsPerBank
;
265 addr
= addr
/ rowsPerBank
;
266 } else if (addrMapping
== Enums::RoCoRaBaCh
) {
267 // optimise for closed page mode and utilise maximum
268 // parallelism of the DRAM (at the cost of power)
270 // take out the channel part of the address, not that this has
271 // to match with how accesses are interleaved between the
272 // controllers in the address mapping
273 addr
= addr
/ channels
;
275 // start with the bank bits, as this provides the maximum
276 // opportunity for parallelism between requests
277 bank
= addr
% banksPerRank
;
278 addr
= addr
/ banksPerRank
;
280 // next get the rank bits
281 rank
= addr
% ranksPerChannel
;
282 addr
= addr
/ ranksPerChannel
;
284 // next the column bits which we do not need to keep track of
285 // and simply skip past
286 addr
= addr
/ columnsPerRowBuffer
;
288 // lastly, get the row bits
289 row
= addr
% rowsPerBank
;
290 addr
= addr
/ rowsPerBank
;
292 panic("Unknown address mapping policy chosen!");
294 assert(rank
< ranksPerChannel
);
295 assert(bank
< banksPerRank
);
296 assert(row
< rowsPerBank
);
298 DPRINTF(DRAM
, "Address: %lld Rank %d Bank %d Row %d\n",
299 dramPktAddr
, rank
, bank
, row
);
301 // create the corresponding DRAM packet with the entry time and
302 // ready time set to the current tick, the latter will be updated
304 uint16_t bank_id
= banksPerRank
* rank
+ bank
;
305 return new DRAMPacket(pkt
, isRead
, rank
, bank
, row
, bank_id
, dramPktAddr
,
306 size
, banks
[rank
][bank
]);
310 DRAMCtrl::addToReadQueue(PacketPtr pkt
, unsigned int pktCount
)
312 // only add to the read queue here. whenever the request is
313 // eventually done, set the readyTime, and call schedule()
314 assert(!pkt
->isWrite());
316 assert(pktCount
!= 0);
318 // if the request size is larger than burst size, the pkt is split into
319 // multiple DRAM packets
320 // Note if the pkt starting address is not aligened to burst size, the
321 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
322 // are aligned to burst size boundaries. This is to ensure we accurately
323 // check read packets against packets in write queue.
324 Addr addr
= pkt
->getAddr();
325 unsigned pktsServicedByWrQ
= 0;
326 BurstHelper
* burst_helper
= NULL
;
327 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
328 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
329 pkt
->getAddr() + pkt
->getSize()) - addr
;
330 readPktSize
[ceilLog2(size
)]++;
333 // First check write buffer to see if the data is already at
335 bool foundInWrQ
= false;
336 for (auto i
= writeQueue
.begin(); i
!= writeQueue
.end(); ++i
) {
337 // check if the read is subsumed in the write entry we are
339 if ((*i
)->addr
<= addr
&&
340 (addr
+ size
) <= ((*i
)->addr
+ (*i
)->size
)) {
344 DPRINTF(DRAM
, "Read to addr %lld with size %d serviced by "
345 "write queue\n", addr
, size
);
346 bytesReadWrQ
+= burstSize
;
351 // If not found in the write q, make a DRAM packet and
352 // push it onto the read queue
355 // Make the burst helper for split packets
356 if (pktCount
> 1 && burst_helper
== NULL
) {
357 DPRINTF(DRAM
, "Read to addr %lld translates to %d "
358 "dram requests\n", pkt
->getAddr(), pktCount
);
359 burst_helper
= new BurstHelper(pktCount
);
362 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, true);
363 dram_pkt
->burstHelper
= burst_helper
;
365 assert(!readQueueFull(1));
366 rdQLenPdf
[readQueue
.size() + respQueue
.size()]++;
368 DPRINTF(DRAM
, "Adding to read queue\n");
370 readQueue
.push_back(dram_pkt
);
373 avgRdQLen
= readQueue
.size() + respQueue
.size();
376 // Starting address of next dram pkt (aligend to burstSize boundary)
377 addr
= (addr
| (burstSize
- 1)) + 1;
380 // If all packets are serviced by write queue, we send the repsonse back
381 if (pktsServicedByWrQ
== pktCount
) {
382 accessAndRespond(pkt
, frontendLatency
);
386 // Update how many split packets are serviced by write queue
387 if (burst_helper
!= NULL
)
388 burst_helper
->burstsServiced
= pktsServicedByWrQ
;
390 // If we are not already scheduled to get a request out of the
392 if (!nextReqEvent
.scheduled()) {
393 DPRINTF(DRAM
, "Request scheduled immediately\n");
394 schedule(nextReqEvent
, curTick());
399 DRAMCtrl::addToWriteQueue(PacketPtr pkt
, unsigned int pktCount
)
401 // only add to the write queue here. whenever the request is
402 // eventually done, set the readyTime, and call schedule()
403 assert(pkt
->isWrite());
405 // if the request size is larger than burst size, the pkt is split into
406 // multiple DRAM packets
407 Addr addr
= pkt
->getAddr();
408 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
409 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
410 pkt
->getAddr() + pkt
->getSize()) - addr
;
411 writePktSize
[ceilLog2(size
)]++;
414 // see if we can merge with an existing item in the write
415 // queue and keep track of whether we have merged or not so we
416 // can stop at that point and also avoid enqueueing a new
419 auto w
= writeQueue
.begin();
421 while(!merged
&& w
!= writeQueue
.end()) {
422 // either of the two could be first, if they are the same
423 // it does not matter which way we go
424 if ((*w
)->addr
>= addr
) {
425 // the existing one starts after the new one, figure
426 // out where the new one ends with respect to the
428 if ((addr
+ size
) >= ((*w
)->addr
+ (*w
)->size
)) {
429 // check if the existing one is completely
430 // subsumed in the new one
431 DPRINTF(DRAM
, "Merging write covering existing burst\n");
433 // update both the address and the size
436 } else if ((addr
+ size
) >= (*w
)->addr
&&
437 ((*w
)->addr
+ (*w
)->size
- addr
) <= burstSize
) {
438 // the new one is just before or partially
439 // overlapping with the existing one, and together
440 // they fit within a burst
441 DPRINTF(DRAM
, "Merging write before existing burst\n");
443 // the existing queue item needs to be adjusted with
444 // respect to both address and size
445 (*w
)->size
= (*w
)->addr
+ (*w
)->size
- addr
;
449 // the new one starts after the current one, figure
450 // out where the existing one ends with respect to the
452 if (((*w
)->addr
+ (*w
)->size
) >= (addr
+ size
)) {
453 // check if the new one is completely subsumed in the
455 DPRINTF(DRAM
, "Merging write into existing burst\n");
457 // no adjustments necessary
458 } else if (((*w
)->addr
+ (*w
)->size
) >= addr
&&
459 (addr
+ size
- (*w
)->addr
) <= burstSize
) {
460 // the existing one is just before or partially
461 // overlapping with the new one, and together
462 // they fit within a burst
463 DPRINTF(DRAM
, "Merging write after existing burst\n");
465 // the address is right, and only the size has
467 (*w
)->size
= addr
+ size
- (*w
)->addr
;
473 // if the item was not merged we need to create a new write
476 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, false);
478 assert(writeQueue
.size() < writeBufferSize
);
479 wrQLenPdf
[writeQueue
.size()]++;
481 DPRINTF(DRAM
, "Adding to write queue\n");
483 writeQueue
.push_back(dram_pkt
);
486 avgWrQLen
= writeQueue
.size();
488 // keep track of the fact that this burst effectively
489 // disappeared as it was merged with an existing one
493 // Starting address of next dram pkt (aligend to burstSize boundary)
494 addr
= (addr
| (burstSize
- 1)) + 1;
497 // we do not wait for the writes to be send to the actual memory,
498 // but instead take responsibility for the consistency here and
499 // snoop the write queue for any upcoming reads
500 // @todo, if a pkt size is larger than burst size, we might need a
501 // different front end latency
502 accessAndRespond(pkt
, frontendLatency
);
504 // If we are not already scheduled to get a request out of the
506 if (!nextReqEvent
.scheduled()) {
507 DPRINTF(DRAM
, "Request scheduled immediately\n");
508 schedule(nextReqEvent
, curTick());
513 DRAMCtrl::printParams() const
515 // Sanity check print of important parameters
517 "Memory controller %s physical organization\n" \
518 "Number of devices per rank %d\n" \
519 "Device bus width (in bits) %d\n" \
520 "DRAM data bus burst (bytes) %d\n" \
521 "Row buffer size (bytes) %d\n" \
522 "Columns per row buffer %d\n" \
523 "Rows per bank %d\n" \
524 "Banks per rank %d\n" \
525 "Ranks per channel %d\n" \
526 "Total mem capacity (bytes) %u\n",
527 name(), devicesPerRank
, deviceBusWidth
, burstSize
, rowBufferSize
,
528 columnsPerRowBuffer
, rowsPerBank
, banksPerRank
, ranksPerChannel
,
529 rowBufferSize
* rowsPerBank
* banksPerRank
* ranksPerChannel
);
531 string scheduler
= memSchedPolicy
== Enums::fcfs
? "FCFS" : "FR-FCFS";
532 string address_mapping
= addrMapping
== Enums::RoRaBaChCo
? "RoRaBaChCo" :
533 (addrMapping
== Enums::RoRaBaCoCh
? "RoRaBaCoCh" : "RoCoRaBaCh");
534 string page_policy
= pageMgmt
== Enums::open
? "OPEN" :
535 (pageMgmt
== Enums::open_adaptive
? "OPEN (adaptive)" :
536 (pageMgmt
== Enums::close_adaptive
? "CLOSE (adaptive)" : "CLOSE"));
539 "Memory controller %s characteristics\n" \
540 "Read buffer size %d\n" \
541 "Write buffer size %d\n" \
542 "Write high thresh %d\n" \
543 "Write low thresh %d\n" \
545 "Address mapping %s\n" \
547 name(), readBufferSize
, writeBufferSize
, writeHighThreshold
,
548 writeLowThreshold
, scheduler
, address_mapping
, page_policy
);
550 DPRINTF(DRAM
, "Memory controller %s timing specs\n" \
554 "tBURST %d ticks\n" \
559 "tXAW (%d) %d ticks\n",
560 name(), tRCD
, tCL
, tRP
, tBURST
, tRFC
, tREFI
, tWTR
,
561 tRTW
, activationLimit
, tXAW
);
565 DRAMCtrl::printQs() const {
566 DPRINTF(DRAM
, "===READ QUEUE===\n\n");
567 for (auto i
= readQueue
.begin() ; i
!= readQueue
.end() ; ++i
) {
568 DPRINTF(DRAM
, "Read %lu\n", (*i
)->addr
);
570 DPRINTF(DRAM
, "\n===RESP QUEUE===\n\n");
571 for (auto i
= respQueue
.begin() ; i
!= respQueue
.end() ; ++i
) {
572 DPRINTF(DRAM
, "Response %lu\n", (*i
)->addr
);
574 DPRINTF(DRAM
, "\n===WRITE QUEUE===\n\n");
575 for (auto i
= writeQueue
.begin() ; i
!= writeQueue
.end() ; ++i
) {
576 DPRINTF(DRAM
, "Write %lu\n", (*i
)->addr
);
581 DRAMCtrl::recvTimingReq(PacketPtr pkt
)
583 /// @todo temporary hack to deal with memory corruption issues until
584 /// 4-phase transactions are complete
585 for (int x
= 0; x
< pendingDelete
.size(); x
++)
586 delete pendingDelete
[x
];
587 pendingDelete
.clear();
589 // This is where we enter from the outside world
590 DPRINTF(DRAM
, "recvTimingReq: request %s addr %lld size %d\n",
591 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
593 // simply drop inhibited packets for now
594 if (pkt
->memInhibitAsserted()) {
595 DPRINTF(DRAM
, "Inhibited packet -- Dropping it now\n");
596 pendingDelete
.push_back(pkt
);
600 // Calc avg gap between requests
601 if (prevArrival
!= 0) {
602 totGap
+= curTick() - prevArrival
;
604 prevArrival
= curTick();
607 // Find out how many dram packets a pkt translates to
608 // If the burst size is equal or larger than the pkt size, then a pkt
609 // translates to only one dram packet. Otherwise, a pkt translates to
610 // multiple dram packets
611 unsigned size
= pkt
->getSize();
612 unsigned offset
= pkt
->getAddr() & (burstSize
- 1);
613 unsigned int dram_pkt_count
= divCeil(offset
+ size
, burstSize
);
615 // check local buffers and do not accept if full
618 if (readQueueFull(dram_pkt_count
)) {
619 DPRINTF(DRAM
, "Read queue full, not accepting\n");
620 // remember that we have to retry this port
625 addToReadQueue(pkt
, dram_pkt_count
);
627 bytesReadSys
+= size
;
629 } else if (pkt
->isWrite()) {
631 if (writeQueueFull(dram_pkt_count
)) {
632 DPRINTF(DRAM
, "Write queue full, not accepting\n");
633 // remember that we have to retry this port
638 addToWriteQueue(pkt
, dram_pkt_count
);
640 bytesWrittenSys
+= size
;
643 DPRINTF(DRAM
,"Neither read nor write, ignore timing\n");
644 neitherReadNorWrite
++;
645 accessAndRespond(pkt
, 1);
652 DRAMCtrl::processRespondEvent()
655 "processRespondEvent(): Some req has reached its readyTime\n");
657 DRAMPacket
* dram_pkt
= respQueue
.front();
659 if (dram_pkt
->burstHelper
) {
660 // it is a split packet
661 dram_pkt
->burstHelper
->burstsServiced
++;
662 if (dram_pkt
->burstHelper
->burstsServiced
==
663 dram_pkt
->burstHelper
->burstCount
) {
664 // we have now serviced all children packets of a system packet
665 // so we can now respond to the requester
666 // @todo we probably want to have a different front end and back
667 // end latency for split packets
668 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
669 delete dram_pkt
->burstHelper
;
670 dram_pkt
->burstHelper
= NULL
;
673 // it is not a split packet
674 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
677 delete respQueue
.front();
678 respQueue
.pop_front();
680 if (!respQueue
.empty()) {
681 assert(respQueue
.front()->readyTime
>= curTick());
682 assert(!respondEvent
.scheduled());
683 schedule(respondEvent
, respQueue
.front()->readyTime
);
685 // if there is nothing left in any queue, signal a drain
686 if (writeQueue
.empty() && readQueue
.empty() &&
688 drainManager
->signalDrainDone();
693 // We have made a location in the queue available at this point,
694 // so if there is a read that was forced to wait, retry now
702 DRAMCtrl::chooseNext(std::deque
<DRAMPacket
*>& queue
)
704 // This method does the arbitration between requests. The chosen
705 // packet is simply moved to the head of the queue. The other
706 // methods know that this is the place to look. For example, with
707 // FCFS, this method does nothing
708 assert(!queue
.empty());
710 if (queue
.size() == 1) {
711 DPRINTF(DRAM
, "Single request, nothing to do\n");
715 if (memSchedPolicy
== Enums::fcfs
) {
716 // Do nothing, since the correct request is already head
717 } else if (memSchedPolicy
== Enums::frfcfs
) {
720 panic("No scheduling policy chosen\n");
724 DRAMCtrl::reorderQueue(std::deque
<DRAMPacket
*>& queue
)
726 // Only determine this when needed
727 uint64_t earliest_banks
= 0;
729 // Search for row hits first, if no row hit is found then schedule the
730 // packet to one of the earliest banks available
731 bool found_earliest_pkt
= false;
732 auto selected_pkt_it
= queue
.begin();
734 for (auto i
= queue
.begin(); i
!= queue
.end() ; ++i
) {
735 DRAMPacket
* dram_pkt
= *i
;
736 const Bank
& bank
= dram_pkt
->bankRef
;
737 // Check if it is a row hit
738 if (bank
.openRow
== dram_pkt
->row
) {
739 DPRINTF(DRAM
, "Row buffer hit\n");
742 } else if (!found_earliest_pkt
) {
743 // No row hit, go for first ready
744 if (earliest_banks
== 0)
745 earliest_banks
= minBankFreeAt(queue
);
747 // Bank is ready or is the first available bank
748 if (bank
.freeAt
<= curTick() ||
749 bits(earliest_banks
, dram_pkt
->bankId
, dram_pkt
->bankId
)) {
750 // Remember the packet to be scheduled to one of the earliest
753 found_earliest_pkt
= true;
758 DRAMPacket
* selected_pkt
= *selected_pkt_it
;
759 queue
.erase(selected_pkt_it
);
760 queue
.push_front(selected_pkt
);
764 DRAMCtrl::accessAndRespond(PacketPtr pkt
, Tick static_latency
)
766 DPRINTF(DRAM
, "Responding to Address %lld.. ",pkt
->getAddr());
768 bool needsResponse
= pkt
->needsResponse();
769 // do the actual memory access which also turns the packet into a
773 // turn packet around to go back to requester if response expected
775 // access already turned the packet into a response
776 assert(pkt
->isResponse());
778 // @todo someone should pay for this
779 pkt
->busFirstWordDelay
= pkt
->busLastWordDelay
= 0;
781 // queue the packet in the response queue to be sent out after
782 // the static latency has passed
783 port
.schedTimingResp(pkt
, curTick() + static_latency
);
785 // @todo the packet is going to be deleted, and the DRAMPacket
786 // is still having a pointer to it
787 pendingDelete
.push_back(pkt
);
790 DPRINTF(DRAM
, "Done\n");
796 DRAMCtrl::estimateLatency(DRAMPacket
* dram_pkt
, Tick inTime
)
798 // If a request reaches a bank at tick 'inTime', how much time
799 // *after* that does it take to finish the request, depending
800 // on bank status and page open policy. Note that this method
801 // considers only the time taken for the actual read or write
802 // to complete, NOT any additional time thereafter for tRAS or
807 Tick potentialActTick
;
809 const Bank
& bank
= dram_pkt
->bankRef
;
810 // open-page policy or close_adaptive policy
811 if (pageMgmt
== Enums::open
|| pageMgmt
== Enums::open_adaptive
||
812 pageMgmt
== Enums::close_adaptive
) {
813 if (bank
.openRow
== dram_pkt
->row
) {
814 // When we have a row-buffer hit,
815 // we don't care about tRAS having expired or not,
816 // but do care about bank being free for access
819 // When a series of requests arrive to the same row,
820 // DDR systems are capable of streaming data continuously
821 // at maximum bandwidth (subject to tCCD). Here, we approximate
822 // this condition, and assume that if whenever a bank is already
823 // busy and a new request comes in, it can be completed with no
824 // penalty beyond waiting for the existing read to complete.
825 if (bank
.freeAt
> inTime
) {
826 accLat
+= bank
.freeAt
- inTime
;
835 // Row-buffer miss, need to close existing row
836 // once tRAS has expired, then open the new one,
837 // then add cas latency.
838 Tick freeTime
= std::max(bank
.tRASDoneAt
, bank
.freeAt
);
840 if (freeTime
> inTime
)
841 accLat
+= freeTime
- inTime
;
843 // If the there is no open row (open adaptive), then there
844 // is no precharge delay, otherwise go with tRP
845 Tick precharge_delay
= bank
.openRow
== Bank::NO_ROW
? 0 : tRP
;
847 //The bank is free, and you may be able to activate
848 potentialActTick
= inTime
+ accLat
+ precharge_delay
;
849 if (potentialActTick
< bank
.actAllowedAt
)
850 accLat
+= bank
.actAllowedAt
- potentialActTick
;
852 accLat
+= precharge_delay
+ tRCD
+ tCL
;
853 bankLat
+= precharge_delay
+ tRCD
+ tCL
;
855 } else if (pageMgmt
== Enums::close
) {
856 // With a close page policy, no notion of
858 if (bank
.freeAt
> inTime
)
859 accLat
+= bank
.freeAt
- inTime
;
861 //The bank is free, and you may be able to activate
862 potentialActTick
= inTime
+ accLat
;
863 if (potentialActTick
< bank
.actAllowedAt
)
864 accLat
+= bank
.actAllowedAt
- potentialActTick
;
866 // page already closed, simply open the row, and
868 accLat
+= tRCD
+ tCL
;
869 bankLat
+= tRCD
+ tCL
;
871 panic("No page management policy chosen\n");
873 DPRINTF(DRAM
, "Returning < %lld, %lld > from estimateLatency()\n",
876 return make_pair(bankLat
, accLat
);
880 DRAMCtrl::recordActivate(Tick act_tick
, uint8_t rank
, uint8_t bank
,
883 assert(0 <= rank
&& rank
< ranksPerChannel
);
884 assert(actTicks
[rank
].size() == activationLimit
);
886 DPRINTF(DRAM
, "Activate at tick %d\n", act_tick
);
888 // idleStartTick is the tick when all the banks were
889 // precharged. Thus, the difference between act_tick and
890 // idleStartTick gives the time for which the DRAM is in an idle
891 // state with all banks precharged. Note that we may end up
892 // "changing history" by scheduling an activation before an
893 // already scheduled precharge, effectively canceling it out.
894 if (numBanksActive
== 0 && act_tick
> idleStartTick
) {
895 prechargeAllTime
+= act_tick
- idleStartTick
;
898 // update the open row
899 assert(banks
[rank
][bank
].openRow
== Bank::NO_ROW
);
900 banks
[rank
][bank
].openRow
= row
;
902 // start counting anew, this covers both the case when we
903 // auto-precharged, and when this access is forced to
905 banks
[rank
][bank
].bytesAccessed
= 0;
906 banks
[rank
][bank
].rowAccesses
= 0;
909 assert(numBanksActive
<= banksPerRank
* ranksPerChannel
);
911 DPRINTF(DRAM
, "Activate bank at tick %lld, now got %d active\n",
912 act_tick
, numBanksActive
);
914 // start by enforcing tRRD
915 for(int i
= 0; i
< banksPerRank
; i
++) {
916 // next activate must not happen before tRRD
917 banks
[rank
][i
].actAllowedAt
= act_tick
+ tRRD
;
919 // tRC should be added to activation tick of the bank currently accessed,
920 // where tRC = tRAS + tRP, this is just for a check as actAllowedAt for same
921 // bank is already captured by bank.freeAt and bank.tRASDoneAt
922 banks
[rank
][bank
].actAllowedAt
= act_tick
+ tRAS
+ tRP
;
924 // next, we deal with tXAW, if the activation limit is disabled
926 if (actTicks
[rank
].empty())
930 if (actTicks
[rank
].back() && (act_tick
- actTicks
[rank
].back()) < tXAW
) {
931 // @todo For now, stick with a warning
932 warn("Got %d activates in window %d (%d - %d) which is smaller "
933 "than %d\n", activationLimit
, act_tick
- actTicks
[rank
].back(),
934 act_tick
, actTicks
[rank
].back(), tXAW
);
937 // shift the times used for the book keeping, the last element
938 // (highest index) is the oldest one and hence the lowest value
939 actTicks
[rank
].pop_back();
941 // record an new activation (in the future)
942 actTicks
[rank
].push_front(act_tick
);
944 // cannot activate more than X times in time window tXAW, push the
945 // next one (the X + 1'st activate) to be tXAW away from the
946 // oldest in our window of X
947 if (actTicks
[rank
].back() && (act_tick
- actTicks
[rank
].back()) < tXAW
) {
948 DPRINTF(DRAM
, "Enforcing tXAW with X = %d, next activate no earlier "
949 "than %d\n", activationLimit
, actTicks
[rank
].back() + tXAW
);
950 for(int j
= 0; j
< banksPerRank
; j
++)
951 // next activate must not happen before end of window
952 banks
[rank
][j
].actAllowedAt
= actTicks
[rank
].back() + tXAW
;
957 DRAMCtrl::prechargeBank(Bank
& bank
, Tick free_at
)
959 // make sure the bank has an open row
960 assert(bank
.openRow
!= Bank::NO_ROW
);
962 // sample the bytes per activate here since we are closing
964 bytesPerActivate
.sample(bank
.bytesAccessed
);
966 bank
.openRow
= Bank::NO_ROW
;
968 bank
.freeAt
= free_at
;
970 assert(numBanksActive
!= 0);
973 DPRINTF(DRAM
, "Precharged bank, done at tick %lld, now got %d active\n",
974 bank
.freeAt
, numBanksActive
);
976 // if we reached zero, then special conditions apply as we track
977 // if all banks are precharged for the power models
978 if (numBanksActive
== 0) {
979 idleStartTick
= std::max(idleStartTick
, bank
.freeAt
);
980 DPRINTF(DRAM
, "All banks precharged at tick: %ld\n",
986 DRAMCtrl::doDRAMAccess(DRAMPacket
* dram_pkt
)
989 DPRINTF(DRAM
, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
990 dram_pkt
->addr
, dram_pkt
->rank
, dram_pkt
->bank
, dram_pkt
->row
);
992 // estimate the bank and access latency
993 pair
<Tick
, Tick
> lat
= estimateLatency(dram_pkt
, curTick());
994 Tick bankLat
= lat
.first
;
995 Tick accessLat
= lat
.second
;
998 // This request was woken up at this time based on a prior call
999 // to estimateLatency(). However, between then and now, both the
1000 // accessLatency and/or busBusyUntil may have changed. We need
1001 // to correct for that.
1003 Tick addDelay
= (curTick() + accessLat
< busBusyUntil
) ?
1004 busBusyUntil
- (curTick() + accessLat
) : 0;
1006 Bank
& bank
= dram_pkt
->bankRef
;
1008 // Update bank state
1009 if (pageMgmt
== Enums::open
|| pageMgmt
== Enums::open_adaptive
||
1010 pageMgmt
== Enums::close_adaptive
) {
1013 bank
.freeAt
= curTick() + addDelay
+ accessLat
;
1015 // If there is a page open, precharge it.
1016 if (bank
.openRow
!= Bank::NO_ROW
) {
1017 prechargeBank(bank
, std::max(std::max(bank
.freeAt
,
1022 // Any precharge is already part of the latency
1023 // estimation, so update the bank free time
1024 bank
.freeAt
= curTick() + addDelay
+ accessLat
;
1026 // any waiting for banks account for in freeAt
1027 actTick
= bank
.freeAt
- tCL
- tRCD
;
1029 // If you activated a new row do to this access, the next access
1030 // will have to respect tRAS for this bank
1031 bank
.tRASDoneAt
= actTick
+ tRAS
;
1033 recordActivate(actTick
, dram_pkt
->rank
, dram_pkt
->bank
,
1037 // increment the bytes accessed and the accesses per row
1038 bank
.bytesAccessed
+= burstSize
;
1041 // if we reached the max, then issue with an auto-precharge
1042 bool auto_precharge
= bank
.rowAccesses
== maxAccessesPerRow
;
1044 // if we did not hit the limit, we might still want to
1046 if (!auto_precharge
&&
1047 (pageMgmt
== Enums::open_adaptive
||
1048 pageMgmt
== Enums::close_adaptive
)) {
1049 // a twist on the open and close page policies:
1050 // 1) open_adaptive page policy does not blindly keep the
1051 // page open, but close it if there are no row hits, and there
1052 // are bank conflicts in the queue
1053 // 2) close_adaptive page policy does not blindly close the
1054 // page, but closes it only if there are no row hits in the queue.
1055 // In this case, only force an auto precharge when there
1056 // are no same page hits in the queue
1057 bool got_more_hits
= false;
1058 bool got_bank_conflict
= false;
1060 // either look at the read queue or write queue
1061 const deque
<DRAMPacket
*>& queue
= dram_pkt
->isRead
? readQueue
:
1063 auto p
= queue
.begin();
1064 // make sure we are not considering the packet that we are
1065 // currently dealing with (which is the head of the queue)
1068 // keep on looking until we have found required condition or
1070 while (!(got_more_hits
&&
1071 (got_bank_conflict
|| pageMgmt
== Enums::close_adaptive
)) &&
1073 bool same_rank_bank
= (dram_pkt
->rank
== (*p
)->rank
) &&
1074 (dram_pkt
->bank
== (*p
)->bank
);
1075 bool same_row
= dram_pkt
->row
== (*p
)->row
;
1076 got_more_hits
|= same_rank_bank
&& same_row
;
1077 got_bank_conflict
|= same_rank_bank
&& !same_row
;
1081 // auto pre-charge when either
1082 // 1) open_adaptive policy, we have not got any more hits, and
1083 // have a bank conflict
1084 // 2) close_adaptive policy and we have not got any more hits
1085 auto_precharge
= !got_more_hits
&&
1086 (got_bank_conflict
|| pageMgmt
== Enums::close_adaptive
);
1089 // if this access should use auto-precharge, then we are
1091 if (auto_precharge
) {
1092 prechargeBank(bank
, std::max(bank
.freeAt
, bank
.tRASDoneAt
) + tRP
);
1094 DPRINTF(DRAM
, "Auto-precharged bank: %d\n", dram_pkt
->bankId
);
1097 DPRINTF(DRAM
, "doDRAMAccess::bank.freeAt is %lld\n", bank
.freeAt
);
1098 } else if (pageMgmt
== Enums::close
) {
1099 actTick
= curTick() + addDelay
+ accessLat
- tRCD
- tCL
;
1100 recordActivate(actTick
, dram_pkt
->rank
, dram_pkt
->bank
, dram_pkt
->row
);
1102 bank
.freeAt
= actTick
+ tRCD
+ tCL
;
1103 bank
.tRASDoneAt
= actTick
+ tRAS
;
1105 // sample the relevant values when precharging
1106 bank
.bytesAccessed
= burstSize
;
1107 bank
.rowAccesses
= 1;
1109 prechargeBank(bank
, std::max(bank
.freeAt
, bank
.tRASDoneAt
) + tRP
);
1110 DPRINTF(DRAM
, "doDRAMAccess::bank.freeAt is %lld\n", bank
.freeAt
);
1112 panic("No page management policy chosen\n");
1114 // Update request parameters
1115 dram_pkt
->readyTime
= curTick() + addDelay
+ accessLat
+ tBURST
;
1118 DPRINTF(DRAM
, "Req %lld: curtick is %lld accessLat is %d " \
1119 "readytime is %lld busbusyuntil is %lld. " \
1120 "Scheduling at readyTime\n", dram_pkt
->addr
,
1121 curTick(), accessLat
, dram_pkt
->readyTime
, busBusyUntil
);
1123 // Make sure requests are not overlapping on the databus
1124 assert(dram_pkt
->readyTime
- busBusyUntil
>= tBURST
);
1127 busBusyUntil
= dram_pkt
->readyTime
;
1129 DPRINTF(DRAM
,"Access time is %lld\n",
1130 dram_pkt
->readyTime
- dram_pkt
->entryTime
);
1132 // Update the minimum timing between the requests, this is a
1133 // conservative estimate of when we have to schedule the next
1134 // request to not introduce any unecessary bubbles. In most cases
1135 // we will wake up sooner than we have to.
1136 nextReqTime
= busBusyUntil
- (tRP
+ tRCD
+ tCL
);
1138 // Update the stats and schedule the next request
1139 if (dram_pkt
->isRead
) {
1143 bytesReadDRAM
+= burstSize
;
1144 perBankRdBursts
[dram_pkt
->bankId
]++;
1146 // Update latency stats
1147 totMemAccLat
+= dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1148 totBankLat
+= bankLat
;
1149 totBusLat
+= tBURST
;
1150 totQLat
+= dram_pkt
->readyTime
- dram_pkt
->entryTime
- bankLat
-
1156 bytesWritten
+= burstSize
;
1157 perBankWrBursts
[dram_pkt
->bankId
]++;
1162 DRAMCtrl::moveToRespQ()
1164 // Remove from read queue
1165 DRAMPacket
* dram_pkt
= readQueue
.front();
1166 readQueue
.pop_front();
1169 assert(dram_pkt
->size
<= burstSize
);
1171 // Insert into response queue sorted by readyTime
1172 // It will be sent back to the requestor at its
1174 if (respQueue
.empty()) {
1175 respQueue
.push_front(dram_pkt
);
1176 assert(!respondEvent
.scheduled());
1177 assert(dram_pkt
->readyTime
>= curTick());
1178 schedule(respondEvent
, dram_pkt
->readyTime
);
1181 auto i
= respQueue
.begin();
1182 while (!done
&& i
!= respQueue
.end()) {
1183 if ((*i
)->readyTime
> dram_pkt
->readyTime
) {
1184 respQueue
.insert(i
, dram_pkt
);
1191 respQueue
.push_back(dram_pkt
);
1193 assert(respondEvent
.scheduled());
1195 if (respQueue
.front()->readyTime
< respondEvent
.when()) {
1196 assert(respQueue
.front()->readyTime
>= curTick());
1197 reschedule(respondEvent
, respQueue
.front()->readyTime
);
1203 DRAMCtrl::processNextReqEvent()
1205 if (busState
== READ_TO_WRITE
) {
1206 DPRINTF(DRAM
, "Switching to writes after %d reads with %d reads "
1207 "waiting\n", readsThisTime
, readQueue
.size());
1209 // sample and reset the read-related stats as we are now
1210 // transitioning to writes, and all reads are done
1211 rdPerTurnAround
.sample(readsThisTime
);
1214 // now proceed to do the actual writes
1216 } else if (busState
== WRITE_TO_READ
) {
1217 DPRINTF(DRAM
, "Switching to reads after %d writes with %d writes "
1218 "waiting\n", writesThisTime
, writeQueue
.size());
1220 wrPerTurnAround
.sample(writesThisTime
);
1226 if (refreshState
!= REF_IDLE
) {
1227 // if a refresh waiting for this event loop to finish, then hand
1228 // over now, and do not schedule a new nextReqEvent
1229 if (refreshState
== REF_DRAIN
) {
1230 DPRINTF(DRAM
, "Refresh drain done, now precharging\n");
1232 refreshState
= REF_PRE
;
1234 // hand control back to the refresh event loop
1235 schedule(refreshEvent
, curTick());
1238 // let the refresh finish before issuing any further requests
1242 // when we get here it is either a read or a write
1243 if (busState
== READ
) {
1245 // track if we should switch or not
1246 bool switch_to_writes
= false;
1248 if (readQueue
.empty()) {
1249 // In the case there is no read request to go next,
1250 // trigger writes if we have passed the low threshold (or
1251 // if we are draining)
1252 if (!writeQueue
.empty() &&
1253 (drainManager
|| writeQueue
.size() > writeLowThreshold
)) {
1255 switch_to_writes
= true;
1257 // check if we are drained
1258 if (respQueue
.empty () && drainManager
) {
1259 drainManager
->signalDrainDone();
1260 drainManager
= NULL
;
1263 // nothing to do, not even any point in scheduling an
1264 // event for the next request
1268 // Figure out which read request goes next, and move it to the
1269 // front of the read queue
1270 chooseNext(readQueue
);
1272 doDRAMAccess(readQueue
.front());
1274 // At this point we're done dealing with the request
1275 // It will be moved to a separate response queue with a
1276 // correct readyTime, and eventually be sent back at that
1280 // we have so many writes that we have to transition
1281 if (writeQueue
.size() > writeHighThreshold
) {
1282 switch_to_writes
= true;
1286 // switching to writes, either because the read queue is empty
1287 // and the writes have passed the low threshold (or we are
1288 // draining), or because the writes hit the hight threshold
1289 if (switch_to_writes
) {
1290 // transition to writing
1291 busState
= READ_TO_WRITE
;
1293 // add a bubble to the data bus, as defined by the
1295 busBusyUntil
+= tRTW
;
1297 // update the minimum timing between the requests,
1298 // this shifts us back in time far enough to do any
1300 nextReqTime
= busBusyUntil
- (tRP
+ tRCD
+ tCL
);
1303 chooseNext(writeQueue
);
1304 DRAMPacket
* dram_pkt
= writeQueue
.front();
1306 assert(dram_pkt
->size
<= burstSize
);
1307 doDRAMAccess(dram_pkt
);
1309 writeQueue
.pop_front();
1312 // If we emptied the write queue, or got sufficiently below the
1313 // threshold (using the minWritesPerSwitch as the hysteresis) and
1314 // are not draining, or we have reads waiting and have done enough
1315 // writes, then switch to reads.
1316 if (writeQueue
.empty() ||
1317 (writeQueue
.size() + minWritesPerSwitch
< writeLowThreshold
&&
1319 (!readQueue
.empty() && writesThisTime
>= minWritesPerSwitch
)) {
1320 // turn the bus back around for reads again
1321 busState
= WRITE_TO_READ
;
1323 // note that the we switch back to reads also in the idle
1324 // case, which eventually will check for any draining and
1325 // also pause any further scheduling if there is really
1328 // here we get a bit creative and shift the bus busy time not
1329 // just the tWTR, but also a CAS latency to capture the fact
1330 // that we are allowed to prepare a new bank, but not issue a
1331 // read command until after tWTR, in essence we capture a
1332 // bubble on the data bus that is tWTR + tCL
1333 busBusyUntil
+= tWTR
+ tCL
;
1335 // update the minimum timing between the requests, this shifts
1336 // us back in time far enough to do any bank preparation
1337 nextReqTime
= busBusyUntil
- (tRP
+ tRCD
+ tCL
);
1341 schedule(nextReqEvent
, std::max(nextReqTime
, curTick()));
1343 // If there is space available and we have writes waiting then let
1344 // them retry. This is done here to ensure that the retry does not
1345 // cause a nextReqEvent to be scheduled before we do so as part of
1346 // the next request processing
1347 if (retryWrReq
&& writeQueue
.size() < writeBufferSize
) {
1354 DRAMCtrl::minBankFreeAt(const deque
<DRAMPacket
*>& queue
) const
1356 uint64_t bank_mask
= 0;
1357 Tick freeAt
= MaxTick
;
1359 // detemrine if we have queued transactions targetting the
1361 vector
<bool> got_waiting(ranksPerChannel
* banksPerRank
, false);
1362 for (auto p
= queue
.begin(); p
!= queue
.end(); ++p
) {
1363 got_waiting
[(*p
)->bankId
] = true;
1366 for (int i
= 0; i
< ranksPerChannel
; i
++) {
1367 for (int j
= 0; j
< banksPerRank
; j
++) {
1368 // if we have waiting requests for the bank, and it is
1369 // amongst the first available, update the mask
1370 if (got_waiting
[i
* banksPerRank
+ j
] &&
1371 banks
[i
][j
].freeAt
<= freeAt
) {
1372 // reset bank mask if new minimum is found
1373 if (banks
[i
][j
].freeAt
< freeAt
)
1375 // set the bit corresponding to the available bank
1376 uint8_t bit_index
= i
* ranksPerChannel
+ j
;
1377 replaceBits(bank_mask
, bit_index
, bit_index
, 1);
1378 freeAt
= banks
[i
][j
].freeAt
;
1386 DRAMCtrl::processRefreshEvent()
1388 // when first preparing the refresh, remember when it was due
1389 if (refreshState
== REF_IDLE
) {
1390 // remember when the refresh is due
1391 refreshDueAt
= curTick();
1394 refreshState
= REF_DRAIN
;
1396 DPRINTF(DRAM
, "Refresh due\n");
1399 // let any scheduled read or write go ahead, after which it will
1400 // hand control back to this event loop
1401 if (refreshState
== REF_DRAIN
) {
1402 if (nextReqEvent
.scheduled()) {
1403 // hand control over to the request loop until it is
1405 DPRINTF(DRAM
, "Refresh awaiting draining\n");
1409 refreshState
= REF_PRE
;
1413 // at this point, ensure that all banks are precharged
1414 if (refreshState
== REF_PRE
) {
1415 DPRINTF(DRAM
, "Precharging all\n");
1417 // precharge any active bank
1418 for (int i
= 0; i
< ranksPerChannel
; i
++) {
1419 for (int j
= 0; j
< banksPerRank
; j
++) {
1420 if (banks
[i
][j
].openRow
!= Bank::NO_ROW
) {
1421 // respect both causality and any existing bank
1423 Tick free_at
= std::max(std::max(banks
[i
][j
].freeAt
,
1424 banks
[i
][j
].tRASDoneAt
),
1427 prechargeBank(banks
[i
][j
], free_at
);
1432 if (numBanksActive
!= 0)
1433 panic("Refresh scheduled with %d active banks\n", numBanksActive
);
1435 // advance the state
1436 refreshState
= REF_RUN
;
1438 // call ourselves in the future
1439 schedule(refreshEvent
, std::max(curTick(), idleStartTick
));
1443 // last but not least we perform the actual refresh
1444 if (refreshState
== REF_RUN
) {
1445 // should never get here with any banks active
1446 assert(numBanksActive
== 0);
1448 Tick banksFree
= curTick() + tRFC
;
1450 for (int i
= 0; i
< ranksPerChannel
; i
++) {
1451 for (int j
= 0; j
< banksPerRank
; j
++) {
1452 banks
[i
][j
].freeAt
= banksFree
;
1456 // make sure we did not wait so long that we cannot make up
1458 if (refreshDueAt
+ tREFI
< banksFree
) {
1459 fatal("Refresh was delayed so long we cannot catch up\n");
1462 // compensate for the delay in actually performing the refresh
1463 // when scheduling the next one
1464 schedule(refreshEvent
, refreshDueAt
+ tREFI
- tRP
);
1466 // back to business as usual
1467 refreshState
= REF_IDLE
;
1469 // we are now refreshing until tRFC is done
1470 idleStartTick
= banksFree
;
1472 // kick the normal request processing loop into action again
1473 // as early as possible, i.e. when the request is done, the
1474 // scheduling of this event also prevents any new requests
1475 // from going ahead before the scheduled point in time
1476 nextReqTime
= banksFree
;
1477 schedule(nextReqEvent
, nextReqTime
);
1482 DRAMCtrl::regStats()
1484 using namespace Stats
;
1486 AbstractMemory::regStats();
1489 .name(name() + ".readReqs")
1490 .desc("Number of read requests accepted");
1493 .name(name() + ".writeReqs")
1494 .desc("Number of write requests accepted");
1497 .name(name() + ".readBursts")
1498 .desc("Number of DRAM read bursts, "
1499 "including those serviced by the write queue");
1502 .name(name() + ".writeBursts")
1503 .desc("Number of DRAM write bursts, "
1504 "including those merged in the write queue");
1507 .name(name() + ".servicedByWrQ")
1508 .desc("Number of DRAM read bursts serviced by the write queue");
1511 .name(name() + ".mergedWrBursts")
1512 .desc("Number of DRAM write bursts merged with an existing one");
1515 .name(name() + ".neitherReadNorWriteReqs")
1516 .desc("Number of requests that are neither read nor write");
1519 .init(banksPerRank
* ranksPerChannel
)
1520 .name(name() + ".perBankRdBursts")
1521 .desc("Per bank write bursts");
1524 .init(banksPerRank
* ranksPerChannel
)
1525 .name(name() + ".perBankWrBursts")
1526 .desc("Per bank write bursts");
1529 .name(name() + ".avgRdQLen")
1530 .desc("Average read queue length when enqueuing")
1534 .name(name() + ".avgWrQLen")
1535 .desc("Average write queue length when enqueuing")
1539 .name(name() + ".totQLat")
1540 .desc("Total ticks spent queuing");
1543 .name(name() + ".totBankLat")
1544 .desc("Total ticks spent accessing banks");
1547 .name(name() + ".totBusLat")
1548 .desc("Total ticks spent in databus transfers");
1551 .name(name() + ".totMemAccLat")
1552 .desc("Total ticks spent from burst creation until serviced "
1556 .name(name() + ".avgQLat")
1557 .desc("Average queueing delay per DRAM burst")
1560 avgQLat
= totQLat
/ (readBursts
- servicedByWrQ
);
1563 .name(name() + ".avgBankLat")
1564 .desc("Average bank access latency per DRAM burst")
1567 avgBankLat
= totBankLat
/ (readBursts
- servicedByWrQ
);
1570 .name(name() + ".avgBusLat")
1571 .desc("Average bus latency per DRAM burst")
1574 avgBusLat
= totBusLat
/ (readBursts
- servicedByWrQ
);
1577 .name(name() + ".avgMemAccLat")
1578 .desc("Average memory access latency per DRAM burst")
1581 avgMemAccLat
= totMemAccLat
/ (readBursts
- servicedByWrQ
);
1584 .name(name() + ".numRdRetry")
1585 .desc("Number of times read queue was full causing retry");
1588 .name(name() + ".numWrRetry")
1589 .desc("Number of times write queue was full causing retry");
1592 .name(name() + ".readRowHits")
1593 .desc("Number of row buffer hits during reads");
1596 .name(name() + ".writeRowHits")
1597 .desc("Number of row buffer hits during writes");
1600 .name(name() + ".readRowHitRate")
1601 .desc("Row buffer hit rate for reads")
1604 readRowHitRate
= (readRowHits
/ (readBursts
- servicedByWrQ
)) * 100;
1607 .name(name() + ".writeRowHitRate")
1608 .desc("Row buffer hit rate for writes")
1611 writeRowHitRate
= (writeRowHits
/ (writeBursts
- mergedWrBursts
)) * 100;
1614 .init(ceilLog2(burstSize
) + 1)
1615 .name(name() + ".readPktSize")
1616 .desc("Read request sizes (log2)");
1619 .init(ceilLog2(burstSize
) + 1)
1620 .name(name() + ".writePktSize")
1621 .desc("Write request sizes (log2)");
1624 .init(readBufferSize
)
1625 .name(name() + ".rdQLenPdf")
1626 .desc("What read queue length does an incoming req see");
1629 .init(writeBufferSize
)
1630 .name(name() + ".wrQLenPdf")
1631 .desc("What write queue length does an incoming req see");
1634 .init(maxAccessesPerRow
)
1635 .name(name() + ".bytesPerActivate")
1636 .desc("Bytes accessed per row activation")
1640 .init(readBufferSize
)
1641 .name(name() + ".rdPerTurnAround")
1642 .desc("Reads before turning the bus around for writes")
1646 .init(writeBufferSize
)
1647 .name(name() + ".wrPerTurnAround")
1648 .desc("Writes before turning the bus around for reads")
1652 .name(name() + ".bytesReadDRAM")
1653 .desc("Total number of bytes read from DRAM");
1656 .name(name() + ".bytesReadWrQ")
1657 .desc("Total number of bytes read from write queue");
1660 .name(name() + ".bytesWritten")
1661 .desc("Total number of bytes written to DRAM");
1664 .name(name() + ".bytesReadSys")
1665 .desc("Total read bytes from the system interface side");
1668 .name(name() + ".bytesWrittenSys")
1669 .desc("Total written bytes from the system interface side");
1672 .name(name() + ".avgRdBW")
1673 .desc("Average DRAM read bandwidth in MiByte/s")
1676 avgRdBW
= (bytesReadDRAM
/ 1000000) / simSeconds
;
1679 .name(name() + ".avgWrBW")
1680 .desc("Average achieved write bandwidth in MiByte/s")
1683 avgWrBW
= (bytesWritten
/ 1000000) / simSeconds
;
1686 .name(name() + ".avgRdBWSys")
1687 .desc("Average system read bandwidth in MiByte/s")
1690 avgRdBWSys
= (bytesReadSys
/ 1000000) / simSeconds
;
1693 .name(name() + ".avgWrBWSys")
1694 .desc("Average system write bandwidth in MiByte/s")
1697 avgWrBWSys
= (bytesWrittenSys
/ 1000000) / simSeconds
;
1700 .name(name() + ".peakBW")
1701 .desc("Theoretical peak bandwidth in MiByte/s")
1704 peakBW
= (SimClock::Frequency
/ tBURST
) * burstSize
/ 1000000;
1707 .name(name() + ".busUtil")
1708 .desc("Data bus utilization in percentage")
1711 busUtil
= (avgRdBW
+ avgWrBW
) / peakBW
* 100;
1714 .name(name() + ".totGap")
1715 .desc("Total gap between requests");
1718 .name(name() + ".avgGap")
1719 .desc("Average gap between requests")
1722 avgGap
= totGap
/ (readReqs
+ writeReqs
);
1724 // Stats for DRAM Power calculation based on Micron datasheet
1726 .name(name() + ".busUtilRead")
1727 .desc("Data bus utilization in percentage for reads")
1730 busUtilRead
= avgRdBW
/ peakBW
* 100;
1733 .name(name() + ".busUtilWrite")
1734 .desc("Data bus utilization in percentage for writes")
1737 busUtilWrite
= avgWrBW
/ peakBW
* 100;
1740 .name(name() + ".pageHitRate")
1741 .desc("Row buffer hit rate, read and write combined")
1744 pageHitRate
= (writeRowHits
+ readRowHits
) /
1745 (writeBursts
- mergedWrBursts
+ readBursts
- servicedByWrQ
) * 100;
1748 .name(name() + ".prechargeAllPercent")
1749 .desc("Percentage of time for which DRAM has all the banks in "
1753 prechargeAllPercent
= prechargeAllTime
/ simTicks
* 100;
1757 DRAMCtrl::recvFunctional(PacketPtr pkt
)
1759 // rely on the abstract memory
1760 functionalAccess(pkt
);
1764 DRAMCtrl::getSlavePort(const string
&if_name
, PortID idx
)
1766 if (if_name
!= "port") {
1767 return MemObject::getSlavePort(if_name
, idx
);
1774 DRAMCtrl::drain(DrainManager
*dm
)
1776 unsigned int count
= port
.drain(dm
);
1778 // if there is anything in any of our internal queues, keep track
1780 if (!(writeQueue
.empty() && readQueue
.empty() &&
1781 respQueue
.empty())) {
1782 DPRINTF(Drain
, "DRAM controller not drained, write: %d, read: %d,"
1783 " resp: %d\n", writeQueue
.size(), readQueue
.size(),
1788 // the only part that is not drained automatically over time
1789 // is the write queue, thus kick things into action if needed
1790 if (!writeQueue
.empty() && !nextReqEvent
.scheduled()) {
1791 schedule(nextReqEvent
, curTick());
1796 setDrainState(Drainable::Draining
);
1798 setDrainState(Drainable::Drained
);
1802 DRAMCtrl::MemoryPort::MemoryPort(const std::string
& name
, DRAMCtrl
& _memory
)
1803 : QueuedSlavePort(name
, &_memory
, queue
), queue(_memory
, *this),
1808 DRAMCtrl::MemoryPort::getAddrRanges() const
1810 AddrRangeList ranges
;
1811 ranges
.push_back(memory
.getAddrRange());
1816 DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt
)
1818 pkt
->pushLabel(memory
.name());
1820 if (!queue
.checkFunctional(pkt
)) {
1821 // Default implementation of SimpleTimingPort::recvFunctional()
1822 // calls recvAtomic() and throws away the latency; we can save a
1823 // little here by just not calculating the latency.
1824 memory
.recvFunctional(pkt
);
1831 DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt
)
1833 return memory
.recvAtomic(pkt
);
1837 DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt
)
1839 // pass it to the memory controller
1840 return memory
.recvTimingReq(pkt
);
1844 DRAMCtrlParams::create()
1846 return new DRAMCtrl(this);