2 * Copyright (c) 2010-2013 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Andreas Hansson
45 #include "base/bitfield.hh"
46 #include "base/trace.hh"
47 #include "debug/DRAM.hh"
48 #include "debug/Drain.hh"
49 #include "mem/dram_ctrl.hh"
50 #include "sim/system.hh"
54 DRAMCtrl::DRAMCtrl(const DRAMCtrlParams
* p
) :
56 port(name() + ".port", *this),
57 retryRdReq(false), retryWrReq(false),
58 rowHitFlag(false), stopReads(false),
59 writeEvent(this), respondEvent(this),
60 refreshEvent(this), nextReqEvent(this), drainManager(NULL
),
61 deviceBusWidth(p
->device_bus_width
), burstLength(p
->burst_length
),
62 deviceRowBufferSize(p
->device_rowbuffer_size
),
63 devicesPerRank(p
->devices_per_rank
),
64 burstSize((devicesPerRank
* burstLength
* deviceBusWidth
) / 8),
65 rowBufferSize(devicesPerRank
* deviceRowBufferSize
),
66 columnsPerRowBuffer(rowBufferSize
/ burstSize
),
67 ranksPerChannel(p
->ranks_per_channel
),
68 banksPerRank(p
->banks_per_rank
), channels(p
->channels
), rowsPerBank(0),
69 readBufferSize(p
->read_buffer_size
),
70 writeBufferSize(p
->write_buffer_size
),
71 writeHighThreshold(writeBufferSize
* p
->write_high_thresh_perc
/ 100.0),
72 writeLowThreshold(writeBufferSize
* p
->write_low_thresh_perc
/ 100.0),
73 minWritesPerSwitch(p
->min_writes_per_switch
), writesThisTime(0),
74 tWTR(p
->tWTR
), tBURST(p
->tBURST
),
75 tRCD(p
->tRCD
), tCL(p
->tCL
), tRP(p
->tRP
), tRAS(p
->tRAS
),
76 tRFC(p
->tRFC
), tREFI(p
->tREFI
), tRRD(p
->tRRD
),
77 tXAW(p
->tXAW
), activationLimit(p
->activation_limit
),
78 memSchedPolicy(p
->mem_sched_policy
), addrMapping(p
->addr_mapping
),
79 pageMgmt(p
->page_policy
),
80 maxAccessesPerRow(p
->max_accesses_per_row
),
81 frontendLatency(p
->static_frontend_latency
),
82 backendLatency(p
->static_backend_latency
),
83 busBusyUntil(0), prevArrival(0),
84 newTime(0), startTickPrechargeAll(0), numBanksActive(0)
86 // create the bank states based on the dimensions of the ranks and
88 banks
.resize(ranksPerChannel
);
89 actTicks
.resize(ranksPerChannel
);
90 for (size_t c
= 0; c
< ranksPerChannel
; ++c
) {
91 banks
[c
].resize(banksPerRank
);
92 actTicks
[c
].resize(activationLimit
, 0);
95 // perform a basic check of the write thresholds
96 if (p
->write_low_thresh_perc
>= p
->write_high_thresh_perc
)
97 fatal("Write buffer low threshold %d must be smaller than the "
98 "high threshold %d\n", p
->write_low_thresh_perc
,
99 p
->write_high_thresh_perc
);
101 // determine the rows per bank by looking at the total capacity
102 uint64_t capacity
= ULL(1) << ceilLog2(AbstractMemory::size());
104 DPRINTF(DRAM
, "Memory capacity %lld (%lld) bytes\n", capacity
,
105 AbstractMemory::size());
107 DPRINTF(DRAM
, "Row buffer size %d bytes with %d columns per row buffer\n",
108 rowBufferSize
, columnsPerRowBuffer
);
110 rowsPerBank
= capacity
/ (rowBufferSize
* banksPerRank
* ranksPerChannel
);
112 if (range
.interleaved()) {
113 if (channels
!= range
.stripes())
114 fatal("%s has %d interleaved address stripes but %d channel(s)\n",
115 name(), range
.stripes(), channels
);
117 if (addrMapping
== Enums::RoRaBaChCo
) {
118 if (rowBufferSize
!= range
.granularity()) {
119 fatal("Interleaving of %s doesn't match RoRaBaChCo "
120 "address map\n", name());
122 } else if (addrMapping
== Enums::RoRaBaCoCh
) {
123 if (system()->cacheLineSize() != range
.granularity()) {
124 fatal("Interleaving of %s doesn't match RoRaBaCoCh "
125 "address map\n", name());
127 } else if (addrMapping
== Enums::RoCoRaBaCh
) {
128 if (system()->cacheLineSize() != range
.granularity())
129 fatal("Interleaving of %s doesn't match RoCoRaBaCh "
130 "address map\n", name());
138 if (!port
.isConnected()) {
139 fatal("DRAMCtrl %s is unconnected!\n", name());
141 port
.sendRangeChange();
148 // update the start tick for the precharge accounting to the
150 startTickPrechargeAll
= curTick();
152 // print the configuration of the controller
155 // kick off the refresh
156 schedule(refreshEvent
, curTick() + tREFI
);
160 DRAMCtrl::recvAtomic(PacketPtr pkt
)
162 DPRINTF(DRAM
, "recvAtomic: %s 0x%x\n", pkt
->cmdString(), pkt
->getAddr());
164 // do the actual memory access and turn the packet into a response
168 if (!pkt
->memInhibitAsserted() && pkt
->hasData()) {
169 // this value is not supposed to be accurate, just enough to
170 // keep things going, mimic a closed page
171 latency
= tRP
+ tRCD
+ tCL
;
177 DRAMCtrl::readQueueFull(unsigned int neededEntries
) const
179 DPRINTF(DRAM
, "Read queue limit %d, current size %d, entries needed %d\n",
180 readBufferSize
, readQueue
.size() + respQueue
.size(),
184 (readQueue
.size() + respQueue
.size() + neededEntries
) > readBufferSize
;
188 DRAMCtrl::writeQueueFull(unsigned int neededEntries
) const
190 DPRINTF(DRAM
, "Write queue limit %d, current size %d, entries needed %d\n",
191 writeBufferSize
, writeQueue
.size(), neededEntries
);
192 return (writeQueue
.size() + neededEntries
) > writeBufferSize
;
195 DRAMCtrl::DRAMPacket
*
196 DRAMCtrl::decodeAddr(PacketPtr pkt
, Addr dramPktAddr
, unsigned size
,
199 // decode the address based on the address mapping scheme, with
200 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
201 // channel, respectively
206 // truncate the address to the access granularity
207 Addr addr
= dramPktAddr
/ burstSize
;
209 // we have removed the lowest order address bits that denote the
210 // position within the column
211 if (addrMapping
== Enums::RoRaBaChCo
) {
212 // the lowest order bits denote the column to ensure that
213 // sequential cache lines occupy the same row
214 addr
= addr
/ columnsPerRowBuffer
;
216 // take out the channel part of the address
217 addr
= addr
/ channels
;
219 // after the channel bits, get the bank bits to interleave
221 bank
= addr
% banksPerRank
;
222 addr
= addr
/ banksPerRank
;
224 // after the bank, we get the rank bits which thus interleaves
226 rank
= addr
% ranksPerChannel
;
227 addr
= addr
/ ranksPerChannel
;
229 // lastly, get the row bits
230 row
= addr
% rowsPerBank
;
231 addr
= addr
/ rowsPerBank
;
232 } else if (addrMapping
== Enums::RoRaBaCoCh
) {
233 // take out the channel part of the address
234 addr
= addr
/ channels
;
237 addr
= addr
/ columnsPerRowBuffer
;
239 // after the column bits, we get the bank bits to interleave
241 bank
= addr
% banksPerRank
;
242 addr
= addr
/ banksPerRank
;
244 // after the bank, we get the rank bits which thus interleaves
246 rank
= addr
% ranksPerChannel
;
247 addr
= addr
/ ranksPerChannel
;
249 // lastly, get the row bits
250 row
= addr
% rowsPerBank
;
251 addr
= addr
/ rowsPerBank
;
252 } else if (addrMapping
== Enums::RoCoRaBaCh
) {
253 // optimise for closed page mode and utilise maximum
254 // parallelism of the DRAM (at the cost of power)
256 // take out the channel part of the address, not that this has
257 // to match with how accesses are interleaved between the
258 // controllers in the address mapping
259 addr
= addr
/ channels
;
261 // start with the bank bits, as this provides the maximum
262 // opportunity for parallelism between requests
263 bank
= addr
% banksPerRank
;
264 addr
= addr
/ banksPerRank
;
266 // next get the rank bits
267 rank
= addr
% ranksPerChannel
;
268 addr
= addr
/ ranksPerChannel
;
270 // next the column bits which we do not need to keep track of
271 // and simply skip past
272 addr
= addr
/ columnsPerRowBuffer
;
274 // lastly, get the row bits
275 row
= addr
% rowsPerBank
;
276 addr
= addr
/ rowsPerBank
;
278 panic("Unknown address mapping policy chosen!");
280 assert(rank
< ranksPerChannel
);
281 assert(bank
< banksPerRank
);
282 assert(row
< rowsPerBank
);
284 DPRINTF(DRAM
, "Address: %lld Rank %d Bank %d Row %d\n",
285 dramPktAddr
, rank
, bank
, row
);
287 // create the corresponding DRAM packet with the entry time and
288 // ready time set to the current tick, the latter will be updated
290 uint16_t bank_id
= banksPerRank
* rank
+ bank
;
291 return new DRAMPacket(pkt
, isRead
, rank
, bank
, row
, bank_id
, dramPktAddr
,
292 size
, banks
[rank
][bank
]);
296 DRAMCtrl::addToReadQueue(PacketPtr pkt
, unsigned int pktCount
)
298 // only add to the read queue here. whenever the request is
299 // eventually done, set the readyTime, and call schedule()
300 assert(!pkt
->isWrite());
302 assert(pktCount
!= 0);
304 // if the request size is larger than burst size, the pkt is split into
305 // multiple DRAM packets
306 // Note if the pkt starting address is not aligened to burst size, the
307 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
308 // are aligned to burst size boundaries. This is to ensure we accurately
309 // check read packets against packets in write queue.
310 Addr addr
= pkt
->getAddr();
311 unsigned pktsServicedByWrQ
= 0;
312 BurstHelper
* burst_helper
= NULL
;
313 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
314 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
315 pkt
->getAddr() + pkt
->getSize()) - addr
;
316 readPktSize
[ceilLog2(size
)]++;
319 // First check write buffer to see if the data is already at
321 bool foundInWrQ
= false;
322 for (auto i
= writeQueue
.begin(); i
!= writeQueue
.end(); ++i
) {
323 // check if the read is subsumed in the write entry we are
325 if ((*i
)->addr
<= addr
&&
326 (addr
+ size
) <= ((*i
)->addr
+ (*i
)->size
)) {
330 DPRINTF(DRAM
, "Read to addr %lld with size %d serviced by "
331 "write queue\n", addr
, size
);
332 bytesReadWrQ
+= burstSize
;
337 // If not found in the write q, make a DRAM packet and
338 // push it onto the read queue
341 // Make the burst helper for split packets
342 if (pktCount
> 1 && burst_helper
== NULL
) {
343 DPRINTF(DRAM
, "Read to addr %lld translates to %d "
344 "dram requests\n", pkt
->getAddr(), pktCount
);
345 burst_helper
= new BurstHelper(pktCount
);
348 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, true);
349 dram_pkt
->burstHelper
= burst_helper
;
351 assert(!readQueueFull(1));
352 rdQLenPdf
[readQueue
.size() + respQueue
.size()]++;
354 DPRINTF(DRAM
, "Adding to read queue\n");
356 readQueue
.push_back(dram_pkt
);
359 avgRdQLen
= readQueue
.size() + respQueue
.size();
362 // Starting address of next dram pkt (aligend to burstSize boundary)
363 addr
= (addr
| (burstSize
- 1)) + 1;
366 // If all packets are serviced by write queue, we send the repsonse back
367 if (pktsServicedByWrQ
== pktCount
) {
368 accessAndRespond(pkt
, frontendLatency
);
372 // Update how many split packets are serviced by write queue
373 if (burst_helper
!= NULL
)
374 burst_helper
->burstsServiced
= pktsServicedByWrQ
;
376 // If we are not already scheduled to get the read request out of
377 // the queue, do so now
378 if (!nextReqEvent
.scheduled() && !stopReads
) {
379 DPRINTF(DRAM
, "Request scheduled immediately\n");
380 schedule(nextReqEvent
, curTick());
385 DRAMCtrl::processWriteEvent()
387 assert(!writeQueue
.empty());
389 DPRINTF(DRAM
, "Beginning DRAM Write\n");
390 Tick temp1 M5_VAR_USED
= std::max(curTick(), busBusyUntil
);
391 Tick temp2 M5_VAR_USED
= std::max(curTick(), maxBankFreeAt());
394 DRAMPacket
* dram_pkt
= writeQueue
.front();
396 assert(dram_pkt
->size
<= burstSize
);
397 doDRAMAccess(dram_pkt
);
399 writeQueue
.pop_front();
404 DPRINTF(DRAM
, "Writing, bus busy for %lld ticks, banks busy "
405 "for %lld ticks\n", busBusyUntil
- temp1
, maxBankFreeAt() - temp2
);
407 // If we emptied the write queue, or got below the threshold and
408 // are not draining, or we have reads waiting and have done enough
409 // writes, then switch to reads. The retry above could already
410 // have caused it to be scheduled, so first check
411 if (writeQueue
.empty() ||
412 (writeQueue
.size() < writeLowThreshold
&& !drainManager
) ||
413 (!readQueue
.empty() && writesThisTime
>= minWritesPerSwitch
)) {
414 // turn the bus back around for reads again
415 busBusyUntil
+= tWTR
;
419 if (!nextReqEvent
.scheduled())
420 schedule(nextReqEvent
, busBusyUntil
);
422 assert(!writeEvent
.scheduled());
423 DPRINTF(DRAM
, "Next write scheduled at %lld\n", newTime
);
424 schedule(writeEvent
, newTime
);
432 // if there is nothing left in any queue, signal a drain
433 if (writeQueue
.empty() && readQueue
.empty() &&
434 respQueue
.empty () && drainManager
) {
435 drainManager
->signalDrainDone();
442 DRAMCtrl::triggerWrites()
444 DPRINTF(DRAM
, "Writes triggered at %lld\n", curTick());
445 // Flag variable to stop any more read scheduling
448 Tick write_start_time
= std::max(busBusyUntil
, curTick()) + tWTR
;
450 DPRINTF(DRAM
, "Writes scheduled at %lld\n", write_start_time
);
452 assert(write_start_time
>= curTick());
453 assert(!writeEvent
.scheduled());
454 schedule(writeEvent
, write_start_time
);
458 DRAMCtrl::addToWriteQueue(PacketPtr pkt
, unsigned int pktCount
)
460 // only add to the write queue here. whenever the request is
461 // eventually done, set the readyTime, and call schedule()
462 assert(pkt
->isWrite());
464 // if the request size is larger than burst size, the pkt is split into
465 // multiple DRAM packets
466 Addr addr
= pkt
->getAddr();
467 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
468 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
469 pkt
->getAddr() + pkt
->getSize()) - addr
;
470 writePktSize
[ceilLog2(size
)]++;
473 // see if we can merge with an existing item in the write
474 // queue and keep track of whether we have merged or not so we
475 // can stop at that point and also avoid enqueueing a new
478 auto w
= writeQueue
.begin();
480 while(!merged
&& w
!= writeQueue
.end()) {
481 // either of the two could be first, if they are the same
482 // it does not matter which way we go
483 if ((*w
)->addr
>= addr
) {
484 // the existing one starts after the new one, figure
485 // out where the new one ends with respect to the
487 if ((addr
+ size
) >= ((*w
)->addr
+ (*w
)->size
)) {
488 // check if the existing one is completely
489 // subsumed in the new one
490 DPRINTF(DRAM
, "Merging write covering existing burst\n");
492 // update both the address and the size
495 } else if ((addr
+ size
) >= (*w
)->addr
&&
496 ((*w
)->addr
+ (*w
)->size
- addr
) <= burstSize
) {
497 // the new one is just before or partially
498 // overlapping with the existing one, and together
499 // they fit within a burst
500 DPRINTF(DRAM
, "Merging write before existing burst\n");
502 // the existing queue item needs to be adjusted with
503 // respect to both address and size
504 (*w
)->size
= (*w
)->addr
+ (*w
)->size
- addr
;
508 // the new one starts after the current one, figure
509 // out where the existing one ends with respect to the
511 if (((*w
)->addr
+ (*w
)->size
) >= (addr
+ size
)) {
512 // check if the new one is completely subsumed in the
514 DPRINTF(DRAM
, "Merging write into existing burst\n");
516 // no adjustments necessary
517 } else if (((*w
)->addr
+ (*w
)->size
) >= addr
&&
518 (addr
+ size
- (*w
)->addr
) <= burstSize
) {
519 // the existing one is just before or partially
520 // overlapping with the new one, and together
521 // they fit within a burst
522 DPRINTF(DRAM
, "Merging write after existing burst\n");
524 // the address is right, and only the size has
526 (*w
)->size
= addr
+ size
- (*w
)->addr
;
532 // if the item was not merged we need to create a new write
535 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, false);
537 assert(writeQueue
.size() < writeBufferSize
);
538 wrQLenPdf
[writeQueue
.size()]++;
540 DPRINTF(DRAM
, "Adding to write queue\n");
542 writeQueue
.push_back(dram_pkt
);
545 avgWrQLen
= writeQueue
.size();
547 // keep track of the fact that this burst effectively
548 // disappeared as it was merged with an existing one
552 // Starting address of next dram pkt (aligend to burstSize boundary)
553 addr
= (addr
| (burstSize
- 1)) + 1;
556 // we do not wait for the writes to be send to the actual memory,
557 // but instead take responsibility for the consistency here and
558 // snoop the write queue for any upcoming reads
559 // @todo, if a pkt size is larger than burst size, we might need a
560 // different front end latency
561 accessAndRespond(pkt
, frontendLatency
);
563 // If your write buffer is starting to fill up, drain it!
564 if (writeQueue
.size() >= writeHighThreshold
&& !stopReads
){
570 DRAMCtrl::printParams() const
572 // Sanity check print of important parameters
574 "Memory controller %s physical organization\n" \
575 "Number of devices per rank %d\n" \
576 "Device bus width (in bits) %d\n" \
577 "DRAM data bus burst (bytes) %d\n" \
578 "Row buffer size (bytes) %d\n" \
579 "Columns per row buffer %d\n" \
580 "Rows per bank %d\n" \
581 "Banks per rank %d\n" \
582 "Ranks per channel %d\n" \
583 "Total mem capacity (bytes) %u\n",
584 name(), devicesPerRank
, deviceBusWidth
, burstSize
, rowBufferSize
,
585 columnsPerRowBuffer
, rowsPerBank
, banksPerRank
, ranksPerChannel
,
586 rowBufferSize
* rowsPerBank
* banksPerRank
* ranksPerChannel
);
588 string scheduler
= memSchedPolicy
== Enums::fcfs
? "FCFS" : "FR-FCFS";
589 string address_mapping
= addrMapping
== Enums::RoRaBaChCo
? "RoRaBaChCo" :
590 (addrMapping
== Enums::RoRaBaCoCh
? "RoRaBaCoCh" : "RoCoRaBaCh");
591 string page_policy
= pageMgmt
== Enums::open
? "OPEN" :
592 (pageMgmt
== Enums::open_adaptive
? "OPEN (adaptive)" :
593 (pageMgmt
== Enums::close_adaptive
? "CLOSE (adaptive)" : "CLOSE"));
596 "Memory controller %s characteristics\n" \
597 "Read buffer size %d\n" \
598 "Write buffer size %d\n" \
599 "Write high thresh %d\n" \
600 "Write low thresh %d\n" \
602 "Address mapping %s\n" \
604 name(), readBufferSize
, writeBufferSize
, writeHighThreshold
,
605 writeLowThreshold
, scheduler
, address_mapping
, page_policy
);
607 DPRINTF(DRAM
, "Memory controller %s timing specs\n" \
611 "tBURST %d ticks\n" \
615 "tXAW (%d) %d ticks\n",
616 name(), tRCD
, tCL
, tRP
, tBURST
, tRFC
, tREFI
, tWTR
,
617 activationLimit
, tXAW
);
621 DRAMCtrl::printQs() const {
622 DPRINTF(DRAM
, "===READ QUEUE===\n\n");
623 for (auto i
= readQueue
.begin() ; i
!= readQueue
.end() ; ++i
) {
624 DPRINTF(DRAM
, "Read %lu\n", (*i
)->addr
);
626 DPRINTF(DRAM
, "\n===RESP QUEUE===\n\n");
627 for (auto i
= respQueue
.begin() ; i
!= respQueue
.end() ; ++i
) {
628 DPRINTF(DRAM
, "Response %lu\n", (*i
)->addr
);
630 DPRINTF(DRAM
, "\n===WRITE QUEUE===\n\n");
631 for (auto i
= writeQueue
.begin() ; i
!= writeQueue
.end() ; ++i
) {
632 DPRINTF(DRAM
, "Write %lu\n", (*i
)->addr
);
637 DRAMCtrl::recvTimingReq(PacketPtr pkt
)
639 /// @todo temporary hack to deal with memory corruption issues until
640 /// 4-phase transactions are complete
641 for (int x
= 0; x
< pendingDelete
.size(); x
++)
642 delete pendingDelete
[x
];
643 pendingDelete
.clear();
645 // This is where we enter from the outside world
646 DPRINTF(DRAM
, "recvTimingReq: request %s addr %lld size %d\n",
647 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
649 // simply drop inhibited packets for now
650 if (pkt
->memInhibitAsserted()) {
651 DPRINTF(DRAM
, "Inhibited packet -- Dropping it now\n");
652 pendingDelete
.push_back(pkt
);
656 // Calc avg gap between requests
657 if (prevArrival
!= 0) {
658 totGap
+= curTick() - prevArrival
;
660 prevArrival
= curTick();
663 // Find out how many dram packets a pkt translates to
664 // If the burst size is equal or larger than the pkt size, then a pkt
665 // translates to only one dram packet. Otherwise, a pkt translates to
666 // multiple dram packets
667 unsigned size
= pkt
->getSize();
668 unsigned offset
= pkt
->getAddr() & (burstSize
- 1);
669 unsigned int dram_pkt_count
= divCeil(offset
+ size
, burstSize
);
671 // check local buffers and do not accept if full
674 if (readQueueFull(dram_pkt_count
)) {
675 DPRINTF(DRAM
, "Read queue full, not accepting\n");
676 // remember that we have to retry this port
681 addToReadQueue(pkt
, dram_pkt_count
);
683 bytesReadSys
+= size
;
685 } else if (pkt
->isWrite()) {
687 if (writeQueueFull(dram_pkt_count
)) {
688 DPRINTF(DRAM
, "Write queue full, not accepting\n");
689 // remember that we have to retry this port
694 addToWriteQueue(pkt
, dram_pkt_count
);
696 bytesWrittenSys
+= size
;
699 DPRINTF(DRAM
,"Neither read nor write, ignore timing\n");
700 neitherReadNorWrite
++;
701 accessAndRespond(pkt
, 1);
708 DRAMCtrl::processRespondEvent()
711 "processRespondEvent(): Some req has reached its readyTime\n");
713 DRAMPacket
* dram_pkt
= respQueue
.front();
715 if (dram_pkt
->burstHelper
) {
716 // it is a split packet
717 dram_pkt
->burstHelper
->burstsServiced
++;
718 if (dram_pkt
->burstHelper
->burstsServiced
==
719 dram_pkt
->burstHelper
->burstCount
) {
720 // we have now serviced all children packets of a system packet
721 // so we can now respond to the requester
722 // @todo we probably want to have a different front end and back
723 // end latency for split packets
724 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
725 delete dram_pkt
->burstHelper
;
726 dram_pkt
->burstHelper
= NULL
;
729 // it is not a split packet
730 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
733 delete respQueue
.front();
734 respQueue
.pop_front();
736 if (!respQueue
.empty()) {
737 assert(respQueue
.front()->readyTime
>= curTick());
738 assert(!respondEvent
.scheduled());
739 schedule(respondEvent
, respQueue
.front()->readyTime
);
741 // if there is nothing left in any queue, signal a drain
742 if (writeQueue
.empty() && readQueue
.empty() &&
744 drainManager
->signalDrainDone();
749 // We have made a location in the queue available at this point,
750 // so if there is a read that was forced to wait, retry now
758 DRAMCtrl::chooseNextWrite()
760 // This method does the arbitration between write requests. The
761 // chosen packet is simply moved to the head of the write
762 // queue. The other methods know that this is the place to
763 // look. For example, with FCFS, this method does nothing
764 assert(!writeQueue
.empty());
766 if (writeQueue
.size() == 1) {
767 DPRINTF(DRAM
, "Single write request, nothing to do\n");
771 if (memSchedPolicy
== Enums::fcfs
) {
772 // Do nothing, since the correct request is already head
773 } else if (memSchedPolicy
== Enums::frfcfs
) {
774 reorderQueue(writeQueue
);
776 panic("No scheduling policy chosen\n");
778 DPRINTF(DRAM
, "Selected next write request\n");
782 DRAMCtrl::chooseNextRead()
784 // This method does the arbitration between read requests. The
785 // chosen packet is simply moved to the head of the queue. The
786 // other methods know that this is the place to look. For example,
787 // with FCFS, this method does nothing
788 if (readQueue
.empty()) {
789 DPRINTF(DRAM
, "No read request to select\n");
793 // If there is only one request then there is nothing left to do
794 if (readQueue
.size() == 1)
797 if (memSchedPolicy
== Enums::fcfs
) {
798 // Do nothing, since the request to serve is already the first
799 // one in the read queue
800 } else if (memSchedPolicy
== Enums::frfcfs
) {
801 reorderQueue(readQueue
);
803 panic("No scheduling policy chosen!\n");
805 DPRINTF(DRAM
, "Selected next read request\n");
810 DRAMCtrl::reorderQueue(std::deque
<DRAMPacket
*>& queue
)
812 // Only determine this when needed
813 uint64_t earliest_banks
= 0;
815 // Search for row hits first, if no row hit is found then schedule the
816 // packet to one of the earliest banks available
817 bool found_earliest_pkt
= false;
818 auto selected_pkt_it
= queue
.begin();
820 for (auto i
= queue
.begin(); i
!= queue
.end() ; ++i
) {
821 DRAMPacket
* dram_pkt
= *i
;
822 const Bank
& bank
= dram_pkt
->bankRef
;
823 // Check if it is a row hit
824 if (bank
.openRow
== dram_pkt
->row
) {
825 DPRINTF(DRAM
, "Row buffer hit\n");
828 } else if (!found_earliest_pkt
) {
829 // No row hit, go for first ready
830 if (earliest_banks
== 0)
831 earliest_banks
= minBankFreeAt(queue
);
833 // Bank is ready or is the first available bank
834 if (bank
.freeAt
<= curTick() ||
835 bits(earliest_banks
, dram_pkt
->bankId
, dram_pkt
->bankId
)) {
836 // Remember the packet to be scheduled to one of the earliest
839 found_earliest_pkt
= true;
844 DRAMPacket
* selected_pkt
= *selected_pkt_it
;
845 queue
.erase(selected_pkt_it
);
846 queue
.push_front(selected_pkt
);
850 DRAMCtrl::accessAndRespond(PacketPtr pkt
, Tick static_latency
)
852 DPRINTF(DRAM
, "Responding to Address %lld.. ",pkt
->getAddr());
854 bool needsResponse
= pkt
->needsResponse();
855 // do the actual memory access which also turns the packet into a
859 // turn packet around to go back to requester if response expected
861 // access already turned the packet into a response
862 assert(pkt
->isResponse());
864 // @todo someone should pay for this
865 pkt
->busFirstWordDelay
= pkt
->busLastWordDelay
= 0;
867 // queue the packet in the response queue to be sent out after
868 // the static latency has passed
869 port
.schedTimingResp(pkt
, curTick() + static_latency
);
871 // @todo the packet is going to be deleted, and the DRAMPacket
872 // is still having a pointer to it
873 pendingDelete
.push_back(pkt
);
876 DPRINTF(DRAM
, "Done\n");
882 DRAMCtrl::estimateLatency(DRAMPacket
* dram_pkt
, Tick inTime
)
884 // If a request reaches a bank at tick 'inTime', how much time
885 // *after* that does it take to finish the request, depending
886 // on bank status and page open policy. Note that this method
887 // considers only the time taken for the actual read or write
888 // to complete, NOT any additional time thereafter for tRAS or
893 Tick potentialActTick
;
895 const Bank
& bank
= dram_pkt
->bankRef
;
896 // open-page policy or close_adaptive policy
897 if (pageMgmt
== Enums::open
|| pageMgmt
== Enums::open_adaptive
||
898 pageMgmt
== Enums::close_adaptive
) {
899 if (bank
.openRow
== dram_pkt
->row
) {
900 // When we have a row-buffer hit,
901 // we don't care about tRAS having expired or not,
902 // but do care about bank being free for access
905 // When a series of requests arrive to the same row,
906 // DDR systems are capable of streaming data continuously
907 // at maximum bandwidth (subject to tCCD). Here, we approximate
908 // this condition, and assume that if whenever a bank is already
909 // busy and a new request comes in, it can be completed with no
910 // penalty beyond waiting for the existing read to complete.
911 if (bank
.freeAt
> inTime
) {
912 accLat
+= bank
.freeAt
- inTime
;
921 // Row-buffer miss, need to close existing row
922 // once tRAS has expired, then open the new one,
923 // then add cas latency.
924 Tick freeTime
= std::max(bank
.tRASDoneAt
, bank
.freeAt
);
926 if (freeTime
> inTime
)
927 accLat
+= freeTime
- inTime
;
929 // If the there is no open row (open adaptive), then there
930 // is no precharge delay, otherwise go with tRP
931 Tick precharge_delay
= bank
.openRow
== -1 ? 0 : tRP
;
933 //The bank is free, and you may be able to activate
934 potentialActTick
= inTime
+ accLat
+ precharge_delay
;
935 if (potentialActTick
< bank
.actAllowedAt
)
936 accLat
+= bank
.actAllowedAt
- potentialActTick
;
938 accLat
+= precharge_delay
+ tRCD
+ tCL
;
939 bankLat
+= precharge_delay
+ tRCD
+ tCL
;
941 } else if (pageMgmt
== Enums::close
) {
942 // With a close page policy, no notion of
944 if (bank
.freeAt
> inTime
)
945 accLat
+= bank
.freeAt
- inTime
;
947 //The bank is free, and you may be able to activate
948 potentialActTick
= inTime
+ accLat
;
949 if (potentialActTick
< bank
.actAllowedAt
)
950 accLat
+= bank
.actAllowedAt
- potentialActTick
;
952 // page already closed, simply open the row, and
954 accLat
+= tRCD
+ tCL
;
955 bankLat
+= tRCD
+ tCL
;
957 panic("No page management policy chosen\n");
959 DPRINTF(DRAM
, "Returning < %lld, %lld > from estimateLatency()\n",
962 return make_pair(bankLat
, accLat
);
966 DRAMCtrl::processNextReqEvent()
972 DRAMCtrl::recordActivate(Tick act_tick
, uint8_t rank
, uint8_t bank
)
974 assert(0 <= rank
&& rank
< ranksPerChannel
);
975 assert(actTicks
[rank
].size() == activationLimit
);
977 DPRINTF(DRAM
, "Activate at tick %d\n", act_tick
);
979 // Tracking accesses after all banks are precharged.
980 // startTickPrechargeAll: is the tick when all the banks were again
981 // precharged. The difference between act_tick and startTickPrechargeAll
982 // gives the time for which DRAM doesn't get any accesses after refreshing
983 // or after a page is closed in closed-page or open-adaptive-page policy.
984 if ((numBanksActive
== 0) && (act_tick
> startTickPrechargeAll
)) {
985 prechargeAllTime
+= act_tick
- startTickPrechargeAll
;
988 // No need to update number of active banks for closed-page policy as only 1
989 // bank will be activated at any given point, which will be instatntly
991 if (pageMgmt
== Enums::open
|| pageMgmt
== Enums::open_adaptive
||
992 pageMgmt
== Enums::close_adaptive
)
995 // start by enforcing tRRD
996 for(int i
= 0; i
< banksPerRank
; i
++) {
997 // next activate must not happen before tRRD
998 banks
[rank
][i
].actAllowedAt
= act_tick
+ tRRD
;
1000 // tRC should be added to activation tick of the bank currently accessed,
1001 // where tRC = tRAS + tRP, this is just for a check as actAllowedAt for same
1002 // bank is already captured by bank.freeAt and bank.tRASDoneAt
1003 banks
[rank
][bank
].actAllowedAt
= act_tick
+ tRAS
+ tRP
;
1005 // next, we deal with tXAW, if the activation limit is disabled
1007 if (actTicks
[rank
].empty())
1011 if (actTicks
[rank
].back() && (act_tick
- actTicks
[rank
].back()) < tXAW
) {
1012 // @todo For now, stick with a warning
1013 warn("Got %d activates in window %d (%d - %d) which is smaller "
1014 "than %d\n", activationLimit
, act_tick
- actTicks
[rank
].back(),
1015 act_tick
, actTicks
[rank
].back(), tXAW
);
1018 // shift the times used for the book keeping, the last element
1019 // (highest index) is the oldest one and hence the lowest value
1020 actTicks
[rank
].pop_back();
1022 // record an new activation (in the future)
1023 actTicks
[rank
].push_front(act_tick
);
1025 // cannot activate more than X times in time window tXAW, push the
1026 // next one (the X + 1'st activate) to be tXAW away from the
1027 // oldest in our window of X
1028 if (actTicks
[rank
].back() && (act_tick
- actTicks
[rank
].back()) < tXAW
) {
1029 DPRINTF(DRAM
, "Enforcing tXAW with X = %d, next activate no earlier "
1030 "than %d\n", activationLimit
, actTicks
[rank
].back() + tXAW
);
1031 for(int j
= 0; j
< banksPerRank
; j
++)
1032 // next activate must not happen before end of window
1033 banks
[rank
][j
].actAllowedAt
= actTicks
[rank
].back() + tXAW
;
1038 DRAMCtrl::doDRAMAccess(DRAMPacket
* dram_pkt
)
1041 DPRINTF(DRAM
, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1042 dram_pkt
->addr
, dram_pkt
->rank
, dram_pkt
->bank
, dram_pkt
->row
);
1044 // estimate the bank and access latency
1045 pair
<Tick
, Tick
> lat
= estimateLatency(dram_pkt
, curTick());
1046 Tick bankLat
= lat
.first
;
1047 Tick accessLat
= lat
.second
;
1050 // This request was woken up at this time based on a prior call
1051 // to estimateLatency(). However, between then and now, both the
1052 // accessLatency and/or busBusyUntil may have changed. We need
1053 // to correct for that.
1055 Tick addDelay
= (curTick() + accessLat
< busBusyUntil
) ?
1056 busBusyUntil
- (curTick() + accessLat
) : 0;
1058 Bank
& bank
= dram_pkt
->bankRef
;
1060 // Update bank state
1061 if (pageMgmt
== Enums::open
|| pageMgmt
== Enums::open_adaptive
||
1062 pageMgmt
== Enums::close_adaptive
) {
1063 bank
.freeAt
= curTick() + addDelay
+ accessLat
;
1065 // If you activated a new row do to this access, the next access
1066 // will have to respect tRAS for this bank.
1068 // any waiting for banks account for in freeAt
1069 actTick
= bank
.freeAt
- tCL
- tRCD
;
1070 bank
.tRASDoneAt
= actTick
+ tRAS
;
1071 recordActivate(actTick
, dram_pkt
->rank
, dram_pkt
->bank
);
1073 // if we closed an open row as a result of this access,
1074 // then sample the number of bytes accessed before
1076 if (bank
.openRow
!= -1)
1077 bytesPerActivate
.sample(bank
.bytesAccessed
);
1079 // update the open row
1080 bank
.openRow
= dram_pkt
->row
;
1082 // start counting anew, this covers both the case when we
1083 // auto-precharged, and when this access is forced to
1085 bank
.bytesAccessed
= 0;
1086 bank
.rowAccesses
= 0;
1089 // increment the bytes accessed and the accesses per row
1090 bank
.bytesAccessed
+= burstSize
;
1093 // if we reached the max, then issue with an auto-precharge
1094 bool auto_precharge
= bank
.rowAccesses
== maxAccessesPerRow
;
1096 // if we did not hit the limit, we might still want to
1098 if (!auto_precharge
&&
1099 (pageMgmt
== Enums::open_adaptive
||
1100 pageMgmt
== Enums::close_adaptive
)) {
1101 // a twist on the open and close page policies:
1102 // 1) open_adaptive page policy does not blindly keep the
1103 // page open, but close it if there are no row hits, and there
1104 // are bank conflicts in the queue
1105 // 2) close_adaptive page policy does not blindly close the
1106 // page, but closes it only if there are no row hits in the queue.
1107 // In this case, only force an auto precharge when there
1108 // are no same page hits in the queue
1109 bool got_more_hits
= false;
1110 bool got_bank_conflict
= false;
1112 // either look at the read queue or write queue
1113 const deque
<DRAMPacket
*>& queue
= dram_pkt
->isRead
? readQueue
:
1115 auto p
= queue
.begin();
1116 // make sure we are not considering the packet that we are
1117 // currently dealing with (which is the head of the queue)
1120 // keep on looking until we have found required condition or
1122 while (!(got_more_hits
&&
1123 (got_bank_conflict
|| pageMgmt
== Enums::close_adaptive
)) &&
1125 bool same_rank_bank
= (dram_pkt
->rank
== (*p
)->rank
) &&
1126 (dram_pkt
->bank
== (*p
)->bank
);
1127 bool same_row
= dram_pkt
->row
== (*p
)->row
;
1128 got_more_hits
|= same_rank_bank
&& same_row
;
1129 got_bank_conflict
|= same_rank_bank
&& !same_row
;
1133 // auto pre-charge when either
1134 // 1) open_adaptive policy, we have not got any more hits, and
1135 // have a bank conflict
1136 // 2) close_adaptive policy and we have not got any more hits
1137 auto_precharge
= !got_more_hits
&&
1138 (got_bank_conflict
|| pageMgmt
== Enums::close_adaptive
);
1141 // if this access should use auto-precharge, then we are
1143 if (auto_precharge
) {
1145 bank
.freeAt
= std::max(bank
.freeAt
, bank
.tRASDoneAt
) + tRP
;
1147 if (numBanksActive
== 0) {
1148 startTickPrechargeAll
= std::max(startTickPrechargeAll
,
1150 DPRINTF(DRAM
, "All banks precharged at tick: %ld\n",
1151 startTickPrechargeAll
);
1154 // sample the bytes per activate here since we are closing
1156 bytesPerActivate
.sample(bank
.bytesAccessed
);
1158 DPRINTF(DRAM
, "Auto-precharged bank: %d\n", dram_pkt
->bankId
);
1161 DPRINTF(DRAM
, "doDRAMAccess::bank.freeAt is %lld\n", bank
.freeAt
);
1162 } else if (pageMgmt
== Enums::close
) {
1163 actTick
= curTick() + addDelay
+ accessLat
- tRCD
- tCL
;
1164 recordActivate(actTick
, dram_pkt
->rank
, dram_pkt
->bank
);
1166 // If the DRAM has a very quick tRAS, bank can be made free
1167 // after consecutive tCL,tRCD,tRP times. In general, however,
1168 // an additional wait is required to respect tRAS.
1169 bank
.freeAt
= std::max(actTick
+ tRAS
+ tRP
,
1170 actTick
+ tRCD
+ tCL
+ tRP
);
1171 DPRINTF(DRAM
, "doDRAMAccess::bank.freeAt is %lld\n", bank
.freeAt
);
1172 bytesPerActivate
.sample(burstSize
);
1173 startTickPrechargeAll
= std::max(startTickPrechargeAll
, bank
.freeAt
);
1175 panic("No page management policy chosen\n");
1177 // Update request parameters
1178 dram_pkt
->readyTime
= curTick() + addDelay
+ accessLat
+ tBURST
;
1181 DPRINTF(DRAM
, "Req %lld: curtick is %lld accessLat is %d " \
1182 "readytime is %lld busbusyuntil is %lld. " \
1183 "Scheduling at readyTime\n", dram_pkt
->addr
,
1184 curTick(), accessLat
, dram_pkt
->readyTime
, busBusyUntil
);
1186 // Make sure requests are not overlapping on the databus
1187 assert (dram_pkt
->readyTime
- busBusyUntil
>= tBURST
);
1190 busBusyUntil
= dram_pkt
->readyTime
;
1192 DPRINTF(DRAM
,"Access time is %lld\n",
1193 dram_pkt
->readyTime
- dram_pkt
->entryTime
);
1195 // Update the minimum timing between the requests
1196 newTime
= (busBusyUntil
> tRP
+ tRCD
+ tCL
) ?
1197 std::max(busBusyUntil
- (tRP
+ tRCD
+ tCL
), curTick()) : curTick();
1199 // Update the access related stats
1200 if (dram_pkt
->isRead
) {
1203 bytesReadDRAM
+= burstSize
;
1204 perBankRdBursts
[dram_pkt
->bankId
]++;
1208 bytesWritten
+= burstSize
;
1209 perBankWrBursts
[dram_pkt
->bankId
]++;
1211 // At this point, commonality between reads and writes ends.
1212 // For writes, we are done since we long ago responded to the
1217 // Update latency stats
1218 totMemAccLat
+= dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1219 totBankLat
+= bankLat
;
1220 totBusLat
+= tBURST
;
1221 totQLat
+= dram_pkt
->readyTime
- dram_pkt
->entryTime
- bankLat
- tBURST
;
1224 // At this point we're done dealing with the request
1225 // It will be moved to a separate response queue with a
1226 // correct readyTime, and eventually be sent back at that
1230 // Schedule the next read event
1231 if (!nextReqEvent
.scheduled() && !stopReads
) {
1232 schedule(nextReqEvent
, newTime
);
1234 if (newTime
< nextReqEvent
.when())
1235 reschedule(nextReqEvent
, newTime
);
1240 DRAMCtrl::moveToRespQ()
1242 // Remove from read queue
1243 DRAMPacket
* dram_pkt
= readQueue
.front();
1244 readQueue
.pop_front();
1247 assert(dram_pkt
->size
<= burstSize
);
1249 // Insert into response queue sorted by readyTime
1250 // It will be sent back to the requestor at its
1252 if (respQueue
.empty()) {
1253 respQueue
.push_front(dram_pkt
);
1254 assert(!respondEvent
.scheduled());
1255 assert(dram_pkt
->readyTime
>= curTick());
1256 schedule(respondEvent
, dram_pkt
->readyTime
);
1259 auto i
= respQueue
.begin();
1260 while (!done
&& i
!= respQueue
.end()) {
1261 if ((*i
)->readyTime
> dram_pkt
->readyTime
) {
1262 respQueue
.insert(i
, dram_pkt
);
1269 respQueue
.push_back(dram_pkt
);
1271 assert(respondEvent
.scheduled());
1273 if (respQueue
.front()->readyTime
< respondEvent
.when()) {
1274 assert(respQueue
.front()->readyTime
>= curTick());
1275 reschedule(respondEvent
, respQueue
.front()->readyTime
);
1281 DRAMCtrl::scheduleNextReq()
1283 DPRINTF(DRAM
, "Reached scheduleNextReq()\n");
1285 // Figure out which read request goes next, and move it to the
1286 // front of the read queue
1287 if (!chooseNextRead()) {
1288 // In the case there is no read request to go next, trigger
1289 // writes if we have passed the low threshold (or if we are
1291 if (!writeQueue
.empty() && !writeEvent
.scheduled() &&
1292 (writeQueue
.size() > writeLowThreshold
|| drainManager
))
1295 doDRAMAccess(readQueue
.front());
1300 DRAMCtrl::maxBankFreeAt() const
1304 for(int i
= 0; i
< ranksPerChannel
; i
++)
1305 for(int j
= 0; j
< banksPerRank
; j
++)
1306 banksFree
= std::max(banks
[i
][j
].freeAt
, banksFree
);
1312 DRAMCtrl::minBankFreeAt(const deque
<DRAMPacket
*>& queue
) const
1314 uint64_t bank_mask
= 0;
1315 Tick freeAt
= MaxTick
;
1317 // detemrine if we have queued transactions targetting the
1319 vector
<bool> got_waiting(ranksPerChannel
* banksPerRank
, false);
1320 for (auto p
= queue
.begin(); p
!= queue
.end(); ++p
) {
1321 got_waiting
[(*p
)->bankId
] = true;
1324 for (int i
= 0; i
< ranksPerChannel
; i
++) {
1325 for (int j
= 0; j
< banksPerRank
; j
++) {
1326 // if we have waiting requests for the bank, and it is
1327 // amongst the first available, update the mask
1328 if (got_waiting
[i
* banksPerRank
+ j
] &&
1329 banks
[i
][j
].freeAt
<= freeAt
) {
1330 // reset bank mask if new minimum is found
1331 if (banks
[i
][j
].freeAt
< freeAt
)
1333 // set the bit corresponding to the available bank
1334 uint8_t bit_index
= i
* ranksPerChannel
+ j
;
1335 replaceBits(bank_mask
, bit_index
, bit_index
, 1);
1336 freeAt
= banks
[i
][j
].freeAt
;
1344 DRAMCtrl::processRefreshEvent()
1346 DPRINTF(DRAM
, "Refreshing at tick %ld\n", curTick());
1348 Tick banksFree
= std::max(curTick(), maxBankFreeAt()) + tRFC
;
1350 for(int i
= 0; i
< ranksPerChannel
; i
++)
1351 for(int j
= 0; j
< banksPerRank
; j
++) {
1352 banks
[i
][j
].freeAt
= banksFree
;
1353 banks
[i
][j
].openRow
= -1;
1356 // updating startTickPrechargeAll, isprechargeAll
1358 startTickPrechargeAll
= banksFree
;
1360 schedule(refreshEvent
, curTick() + tREFI
);
1364 DRAMCtrl::regStats()
1366 using namespace Stats
;
1368 AbstractMemory::regStats();
1371 .name(name() + ".readReqs")
1372 .desc("Number of read requests accepted");
1375 .name(name() + ".writeReqs")
1376 .desc("Number of write requests accepted");
1379 .name(name() + ".readBursts")
1380 .desc("Number of DRAM read bursts, "
1381 "including those serviced by the write queue");
1384 .name(name() + ".writeBursts")
1385 .desc("Number of DRAM write bursts, "
1386 "including those merged in the write queue");
1389 .name(name() + ".servicedByWrQ")
1390 .desc("Number of DRAM read bursts serviced by the write queue");
1393 .name(name() + ".mergedWrBursts")
1394 .desc("Number of DRAM write bursts merged with an existing one");
1397 .name(name() + ".neitherReadNorWriteReqs")
1398 .desc("Number of requests that are neither read nor write");
1401 .init(banksPerRank
* ranksPerChannel
)
1402 .name(name() + ".perBankRdBursts")
1403 .desc("Per bank write bursts");
1406 .init(banksPerRank
* ranksPerChannel
)
1407 .name(name() + ".perBankWrBursts")
1408 .desc("Per bank write bursts");
1411 .name(name() + ".avgRdQLen")
1412 .desc("Average read queue length when enqueuing")
1416 .name(name() + ".avgWrQLen")
1417 .desc("Average write queue length when enqueuing")
1421 .name(name() + ".totQLat")
1422 .desc("Total ticks spent queuing");
1425 .name(name() + ".totBankLat")
1426 .desc("Total ticks spent accessing banks");
1429 .name(name() + ".totBusLat")
1430 .desc("Total ticks spent in databus transfers");
1433 .name(name() + ".totMemAccLat")
1434 .desc("Total ticks spent from burst creation until serviced "
1438 .name(name() + ".avgQLat")
1439 .desc("Average queueing delay per DRAM burst")
1442 avgQLat
= totQLat
/ (readBursts
- servicedByWrQ
);
1445 .name(name() + ".avgBankLat")
1446 .desc("Average bank access latency per DRAM burst")
1449 avgBankLat
= totBankLat
/ (readBursts
- servicedByWrQ
);
1452 .name(name() + ".avgBusLat")
1453 .desc("Average bus latency per DRAM burst")
1456 avgBusLat
= totBusLat
/ (readBursts
- servicedByWrQ
);
1459 .name(name() + ".avgMemAccLat")
1460 .desc("Average memory access latency per DRAM burst")
1463 avgMemAccLat
= totMemAccLat
/ (readBursts
- servicedByWrQ
);
1466 .name(name() + ".numRdRetry")
1467 .desc("Number of times read queue was full causing retry");
1470 .name(name() + ".numWrRetry")
1471 .desc("Number of times write queue was full causing retry");
1474 .name(name() + ".readRowHits")
1475 .desc("Number of row buffer hits during reads");
1478 .name(name() + ".writeRowHits")
1479 .desc("Number of row buffer hits during writes");
1482 .name(name() + ".readRowHitRate")
1483 .desc("Row buffer hit rate for reads")
1486 readRowHitRate
= (readRowHits
/ (readBursts
- servicedByWrQ
)) * 100;
1489 .name(name() + ".writeRowHitRate")
1490 .desc("Row buffer hit rate for writes")
1493 writeRowHitRate
= (writeRowHits
/ (writeBursts
- mergedWrBursts
)) * 100;
1496 .init(ceilLog2(burstSize
) + 1)
1497 .name(name() + ".readPktSize")
1498 .desc("Read request sizes (log2)");
1501 .init(ceilLog2(burstSize
) + 1)
1502 .name(name() + ".writePktSize")
1503 .desc("Write request sizes (log2)");
1506 .init(readBufferSize
)
1507 .name(name() + ".rdQLenPdf")
1508 .desc("What read queue length does an incoming req see");
1511 .init(writeBufferSize
)
1512 .name(name() + ".wrQLenPdf")
1513 .desc("What write queue length does an incoming req see");
1516 .init(maxAccessesPerRow
)
1517 .name(name() + ".bytesPerActivate")
1518 .desc("Bytes accessed per row activation")
1522 .name(name() + ".bytesReadDRAM")
1523 .desc("Total number of bytes read from DRAM");
1526 .name(name() + ".bytesReadWrQ")
1527 .desc("Total number of bytes read from write queue");
1530 .name(name() + ".bytesWritten")
1531 .desc("Total number of bytes written to DRAM");
1534 .name(name() + ".bytesReadSys")
1535 .desc("Total read bytes from the system interface side");
1538 .name(name() + ".bytesWrittenSys")
1539 .desc("Total written bytes from the system interface side");
1542 .name(name() + ".avgRdBW")
1543 .desc("Average DRAM read bandwidth in MiByte/s")
1546 avgRdBW
= (bytesReadDRAM
/ 1000000) / simSeconds
;
1549 .name(name() + ".avgWrBW")
1550 .desc("Average achieved write bandwidth in MiByte/s")
1553 avgWrBW
= (bytesWritten
/ 1000000) / simSeconds
;
1556 .name(name() + ".avgRdBWSys")
1557 .desc("Average system read bandwidth in MiByte/s")
1560 avgRdBWSys
= (bytesReadSys
/ 1000000) / simSeconds
;
1563 .name(name() + ".avgWrBWSys")
1564 .desc("Average system write bandwidth in MiByte/s")
1567 avgWrBWSys
= (bytesWrittenSys
/ 1000000) / simSeconds
;
1570 .name(name() + ".peakBW")
1571 .desc("Theoretical peak bandwidth in MiByte/s")
1574 peakBW
= (SimClock::Frequency
/ tBURST
) * burstSize
/ 1000000;
1577 .name(name() + ".busUtil")
1578 .desc("Data bus utilization in percentage")
1581 busUtil
= (avgRdBW
+ avgWrBW
) / peakBW
* 100;
1584 .name(name() + ".totGap")
1585 .desc("Total gap between requests");
1588 .name(name() + ".avgGap")
1589 .desc("Average gap between requests")
1592 avgGap
= totGap
/ (readReqs
+ writeReqs
);
1594 // Stats for DRAM Power calculation based on Micron datasheet
1596 .name(name() + ".busUtilRead")
1597 .desc("Data bus utilization in percentage for reads")
1600 busUtilRead
= avgRdBW
/ peakBW
* 100;
1603 .name(name() + ".busUtilWrite")
1604 .desc("Data bus utilization in percentage for writes")
1607 busUtilWrite
= avgWrBW
/ peakBW
* 100;
1610 .name(name() + ".pageHitRate")
1611 .desc("Row buffer hit rate, read and write combined")
1614 pageHitRate
= (writeRowHits
+ readRowHits
) /
1615 (writeBursts
- mergedWrBursts
+ readBursts
- servicedByWrQ
) * 100;
1618 .name(name() + ".prechargeAllPercent")
1619 .desc("Percentage of time for which DRAM has all the banks in "
1623 prechargeAllPercent
= prechargeAllTime
/ simTicks
* 100;
1627 DRAMCtrl::recvFunctional(PacketPtr pkt
)
1629 // rely on the abstract memory
1630 functionalAccess(pkt
);
1634 DRAMCtrl::getSlavePort(const string
&if_name
, PortID idx
)
1636 if (if_name
!= "port") {
1637 return MemObject::getSlavePort(if_name
, idx
);
1644 DRAMCtrl::drain(DrainManager
*dm
)
1646 unsigned int count
= port
.drain(dm
);
1648 // if there is anything in any of our internal queues, keep track
1650 if (!(writeQueue
.empty() && readQueue
.empty() &&
1651 respQueue
.empty())) {
1652 DPRINTF(Drain
, "DRAM controller not drained, write: %d, read: %d,"
1653 " resp: %d\n", writeQueue
.size(), readQueue
.size(),
1657 // the only part that is not drained automatically over time
1658 // is the write queue, thus trigger writes if there are any
1659 // waiting and no reads waiting, otherwise wait until the
1661 if (readQueue
.empty() && !writeQueue
.empty() &&
1662 !writeEvent
.scheduled())
1667 setDrainState(Drainable::Draining
);
1669 setDrainState(Drainable::Drained
);
1673 DRAMCtrl::MemoryPort::MemoryPort(const std::string
& name
, DRAMCtrl
& _memory
)
1674 : QueuedSlavePort(name
, &_memory
, queue
), queue(_memory
, *this),
1679 DRAMCtrl::MemoryPort::getAddrRanges() const
1681 AddrRangeList ranges
;
1682 ranges
.push_back(memory
.getAddrRange());
1687 DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt
)
1689 pkt
->pushLabel(memory
.name());
1691 if (!queue
.checkFunctional(pkt
)) {
1692 // Default implementation of SimpleTimingPort::recvFunctional()
1693 // calls recvAtomic() and throws away the latency; we can save a
1694 // little here by just not calculating the latency.
1695 memory
.recvFunctional(pkt
);
1702 DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt
)
1704 return memory
.recvAtomic(pkt
);
1708 DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt
)
1710 // pass it to the memory controller
1711 return memory
.recvTimingReq(pkt
);
1715 DRAMCtrlParams::create()
1717 return new DRAMCtrl(this);