2 * Copyright (c) 2010-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Andreas Hansson
46 #include "base/bitfield.hh"
47 #include "base/trace.hh"
48 #include "debug/DRAM.hh"
49 #include "debug/DRAMPower.hh"
50 #include "debug/DRAMState.hh"
51 #include "debug/Drain.hh"
52 #include "mem/dram_ctrl.hh"
53 #include "sim/system.hh"
58 DRAMCtrl::DRAMCtrl(const DRAMCtrlParams
* p
) :
60 port(name() + ".port", *this),
61 retryRdReq(false), retryWrReq(false),
63 nextReqEvent(this), respondEvent(this),
65 deviceSize(p
->device_size
),
66 deviceBusWidth(p
->device_bus_width
), burstLength(p
->burst_length
),
67 deviceRowBufferSize(p
->device_rowbuffer_size
),
68 devicesPerRank(p
->devices_per_rank
),
69 burstSize((devicesPerRank
* burstLength
* deviceBusWidth
) / 8),
70 rowBufferSize(devicesPerRank
* deviceRowBufferSize
),
71 columnsPerRowBuffer(rowBufferSize
/ burstSize
),
72 columnsPerStripe(range
.granularity() / burstSize
),
73 ranksPerChannel(p
->ranks_per_channel
),
74 bankGroupsPerRank(p
->bank_groups_per_rank
),
75 bankGroupArch(p
->bank_groups_per_rank
> 0),
76 banksPerRank(p
->banks_per_rank
), channels(p
->channels
), rowsPerBank(0),
77 readBufferSize(p
->read_buffer_size
),
78 writeBufferSize(p
->write_buffer_size
),
79 writeHighThreshold(writeBufferSize
* p
->write_high_thresh_perc
/ 100.0),
80 writeLowThreshold(writeBufferSize
* p
->write_low_thresh_perc
/ 100.0),
81 minWritesPerSwitch(p
->min_writes_per_switch
),
82 writesThisTime(0), readsThisTime(0),
83 tCK(p
->tCK
), tWTR(p
->tWTR
), tRTW(p
->tRTW
), tCS(p
->tCS
), tBURST(p
->tBURST
),
84 tCCD_L(p
->tCCD_L
), tRCD(p
->tRCD
), tCL(p
->tCL
), tRP(p
->tRP
), tRAS(p
->tRAS
),
85 tWR(p
->tWR
), tRTP(p
->tRTP
), tRFC(p
->tRFC
), tREFI(p
->tREFI
), tRRD(p
->tRRD
),
86 tRRD_L(p
->tRRD_L
), tXAW(p
->tXAW
), activationLimit(p
->activation_limit
),
87 memSchedPolicy(p
->mem_sched_policy
), addrMapping(p
->addr_mapping
),
88 pageMgmt(p
->page_policy
),
89 maxAccessesPerRow(p
->max_accesses_per_row
),
90 frontendLatency(p
->static_frontend_latency
),
91 backendLatency(p
->static_backend_latency
),
92 busBusyUntil(0), prevArrival(0),
93 nextReqTime(0), activeRank(0), timeStampOffset(0)
95 for (int i
= 0; i
< ranksPerChannel
; i
++) {
96 Rank
* rank
= new Rank(*this, p
);
97 ranks
.push_back(rank
);
99 rank
->actTicks
.resize(activationLimit
, 0);
100 rank
->banks
.resize(banksPerRank
);
103 for (int b
= 0; b
< banksPerRank
; b
++) {
104 rank
->banks
[b
].bank
= b
;
105 // GDDR addressing of banks to BG is linear.
106 // Here we assume that all DRAM generations address bank groups as
109 // Simply assign lower bits to bank group in order to
110 // rotate across bank groups as banks are incremented
111 // e.g. with 4 banks per bank group and 16 banks total:
112 // banks 0,4,8,12 are in bank group 0
113 // banks 1,5,9,13 are in bank group 1
114 // banks 2,6,10,14 are in bank group 2
115 // banks 3,7,11,15 are in bank group 3
116 rank
->banks
[b
].bankgr
= b
% bankGroupsPerRank
;
118 // No bank groups; simply assign to bank number
119 rank
->banks
[b
].bankgr
= b
;
124 // perform a basic check of the write thresholds
125 if (p
->write_low_thresh_perc
>= p
->write_high_thresh_perc
)
126 fatal("Write buffer low threshold %d must be smaller than the "
127 "high threshold %d\n", p
->write_low_thresh_perc
,
128 p
->write_high_thresh_perc
);
130 // determine the rows per bank by looking at the total capacity
131 uint64_t capacity
= ULL(1) << ceilLog2(AbstractMemory::size());
133 // determine the dram actual capacity from the DRAM config in Mbytes
134 uint64_t deviceCapacity
= deviceSize
/ (1024 * 1024) * devicesPerRank
*
137 // if actual DRAM size does not match memory capacity in system warn!
138 if (deviceCapacity
!= capacity
/ (1024 * 1024))
139 warn("DRAM device capacity (%d Mbytes) does not match the "
140 "address range assigned (%d Mbytes)\n", deviceCapacity
,
141 capacity
/ (1024 * 1024));
143 DPRINTF(DRAM
, "Memory capacity %lld (%lld) bytes\n", capacity
,
144 AbstractMemory::size());
146 DPRINTF(DRAM
, "Row buffer size %d bytes with %d columns per row buffer\n",
147 rowBufferSize
, columnsPerRowBuffer
);
149 rowsPerBank
= capacity
/ (rowBufferSize
* banksPerRank
* ranksPerChannel
);
151 // a bit of sanity checks on the interleaving
152 if (range
.interleaved()) {
153 if (channels
!= range
.stripes())
154 fatal("%s has %d interleaved address stripes but %d channel(s)\n",
155 name(), range
.stripes(), channels
);
157 if (addrMapping
== Enums::RoRaBaChCo
) {
158 if (rowBufferSize
!= range
.granularity()) {
159 fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
160 "address map\n", name());
162 } else if (addrMapping
== Enums::RoRaBaCoCh
||
163 addrMapping
== Enums::RoCoRaBaCh
) {
164 // for the interleavings with channel bits in the bottom,
165 // if the system uses a channel striping granularity that
166 // is larger than the DRAM burst size, then map the
167 // sequential accesses within a stripe to a number of
168 // columns in the DRAM, effectively placing some of the
169 // lower-order column bits as the least-significant bits
170 // of the address (above the ones denoting the burst size)
171 assert(columnsPerStripe
>= 1);
173 // channel striping has to be done at a granularity that
174 // is equal or larger to a cache line
175 if (system()->cacheLineSize() > range
.granularity()) {
176 fatal("Channel interleaving of %s must be at least as large "
177 "as the cache line size\n", name());
180 // ...and equal or smaller than the row-buffer size
181 if (rowBufferSize
< range
.granularity()) {
182 fatal("Channel interleaving of %s must be at most as large "
183 "as the row-buffer size\n", name());
185 // this is essentially the check above, so just to be sure
186 assert(columnsPerStripe
<= columnsPerRowBuffer
);
190 // some basic sanity checks
191 if (tREFI
<= tRP
|| tREFI
<= tRFC
) {
192 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
196 // basic bank group architecture checks ->
198 // must have at least one bank per bank group
199 if (bankGroupsPerRank
> banksPerRank
) {
200 fatal("banks per rank (%d) must be equal to or larger than "
201 "banks groups per rank (%d)\n",
202 banksPerRank
, bankGroupsPerRank
);
204 // must have same number of banks in each bank group
205 if ((banksPerRank
% bankGroupsPerRank
) != 0) {
206 fatal("Banks per rank (%d) must be evenly divisible by bank groups "
207 "per rank (%d) for equal banks per bank group\n",
208 banksPerRank
, bankGroupsPerRank
);
210 // tCCD_L should be greater than minimal, back-to-back burst delay
211 if (tCCD_L
<= tBURST
) {
212 fatal("tCCD_L (%d) should be larger than tBURST (%d) when "
213 "bank groups per rank (%d) is greater than 1\n",
214 tCCD_L
, tBURST
, bankGroupsPerRank
);
216 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay
217 // some datasheets might specify it equal to tRRD
219 fatal("tRRD_L (%d) should be larger than tRRD (%d) when "
220 "bank groups per rank (%d) is greater than 1\n",
221 tRRD_L
, tRRD
, bankGroupsPerRank
);
230 AbstractMemory::init();
232 if (!port
.isConnected()) {
233 fatal("DRAMCtrl %s is unconnected!\n", name());
235 port
.sendRangeChange();
242 // timestamp offset should be in clock cycles for DRAMPower
243 timeStampOffset
= divCeil(curTick(), tCK
);
245 // update the start tick for the precharge accounting to the
247 for (auto r
: ranks
) {
248 r
->startup(curTick() + tREFI
- tRP
);
251 // shift the bus busy time sufficiently far ahead that we never
252 // have to worry about negative values when computing the time for
253 // the next request, this will add an insignificant bubble at the
254 // start of simulation
255 busBusyUntil
= curTick() + tRP
+ tRCD
+ tCL
;
259 DRAMCtrl::recvAtomic(PacketPtr pkt
)
261 DPRINTF(DRAM
, "recvAtomic: %s 0x%x\n", pkt
->cmdString(), pkt
->getAddr());
263 // do the actual memory access and turn the packet into a response
267 if (!pkt
->memInhibitAsserted() && pkt
->hasData()) {
268 // this value is not supposed to be accurate, just enough to
269 // keep things going, mimic a closed page
270 latency
= tRP
+ tRCD
+ tCL
;
276 DRAMCtrl::readQueueFull(unsigned int neededEntries
) const
278 DPRINTF(DRAM
, "Read queue limit %d, current size %d, entries needed %d\n",
279 readBufferSize
, readQueue
.size() + respQueue
.size(),
283 (readQueue
.size() + respQueue
.size() + neededEntries
) > readBufferSize
;
287 DRAMCtrl::writeQueueFull(unsigned int neededEntries
) const
289 DPRINTF(DRAM
, "Write queue limit %d, current size %d, entries needed %d\n",
290 writeBufferSize
, writeQueue
.size(), neededEntries
);
291 return (writeQueue
.size() + neededEntries
) > writeBufferSize
;
294 DRAMCtrl::DRAMPacket
*
295 DRAMCtrl::decodeAddr(PacketPtr pkt
, Addr dramPktAddr
, unsigned size
,
298 // decode the address based on the address mapping scheme, with
299 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
300 // channel, respectively
303 // use a 64-bit unsigned during the computations as the row is
304 // always the top bits, and check before creating the DRAMPacket
307 // truncate the address to a DRAM burst, which makes it unique to
308 // a specific column, row, bank, rank and channel
309 Addr addr
= dramPktAddr
/ burstSize
;
311 // we have removed the lowest order address bits that denote the
312 // position within the column
313 if (addrMapping
== Enums::RoRaBaChCo
) {
314 // the lowest order bits denote the column to ensure that
315 // sequential cache lines occupy the same row
316 addr
= addr
/ columnsPerRowBuffer
;
318 // take out the channel part of the address
319 addr
= addr
/ channels
;
321 // after the channel bits, get the bank bits to interleave
323 bank
= addr
% banksPerRank
;
324 addr
= addr
/ banksPerRank
;
326 // after the bank, we get the rank bits which thus interleaves
328 rank
= addr
% ranksPerChannel
;
329 addr
= addr
/ ranksPerChannel
;
331 // lastly, get the row bits
332 row
= addr
% rowsPerBank
;
333 addr
= addr
/ rowsPerBank
;
334 } else if (addrMapping
== Enums::RoRaBaCoCh
) {
335 // take out the lower-order column bits
336 addr
= addr
/ columnsPerStripe
;
338 // take out the channel part of the address
339 addr
= addr
/ channels
;
341 // next, the higher-order column bites
342 addr
= addr
/ (columnsPerRowBuffer
/ columnsPerStripe
);
344 // after the column bits, we get the bank bits to interleave
346 bank
= addr
% banksPerRank
;
347 addr
= addr
/ banksPerRank
;
349 // after the bank, we get the rank bits which thus interleaves
351 rank
= addr
% ranksPerChannel
;
352 addr
= addr
/ ranksPerChannel
;
354 // lastly, get the row bits
355 row
= addr
% rowsPerBank
;
356 addr
= addr
/ rowsPerBank
;
357 } else if (addrMapping
== Enums::RoCoRaBaCh
) {
358 // optimise for closed page mode and utilise maximum
359 // parallelism of the DRAM (at the cost of power)
361 // take out the lower-order column bits
362 addr
= addr
/ columnsPerStripe
;
364 // take out the channel part of the address, not that this has
365 // to match with how accesses are interleaved between the
366 // controllers in the address mapping
367 addr
= addr
/ channels
;
369 // start with the bank bits, as this provides the maximum
370 // opportunity for parallelism between requests
371 bank
= addr
% banksPerRank
;
372 addr
= addr
/ banksPerRank
;
374 // next get the rank bits
375 rank
= addr
% ranksPerChannel
;
376 addr
= addr
/ ranksPerChannel
;
378 // next, the higher-order column bites
379 addr
= addr
/ (columnsPerRowBuffer
/ columnsPerStripe
);
381 // lastly, get the row bits
382 row
= addr
% rowsPerBank
;
383 addr
= addr
/ rowsPerBank
;
385 panic("Unknown address mapping policy chosen!");
387 assert(rank
< ranksPerChannel
);
388 assert(bank
< banksPerRank
);
389 assert(row
< rowsPerBank
);
390 assert(row
< Bank::NO_ROW
);
392 DPRINTF(DRAM
, "Address: %lld Rank %d Bank %d Row %d\n",
393 dramPktAddr
, rank
, bank
, row
);
395 // create the corresponding DRAM packet with the entry time and
396 // ready time set to the current tick, the latter will be updated
398 uint16_t bank_id
= banksPerRank
* rank
+ bank
;
399 return new DRAMPacket(pkt
, isRead
, rank
, bank
, row
, bank_id
, dramPktAddr
,
400 size
, ranks
[rank
]->banks
[bank
], *ranks
[rank
]);
404 DRAMCtrl::addToReadQueue(PacketPtr pkt
, unsigned int pktCount
)
406 // only add to the read queue here. whenever the request is
407 // eventually done, set the readyTime, and call schedule()
408 assert(!pkt
->isWrite());
410 assert(pktCount
!= 0);
412 // if the request size is larger than burst size, the pkt is split into
413 // multiple DRAM packets
414 // Note if the pkt starting address is not aligened to burst size, the
415 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
416 // are aligned to burst size boundaries. This is to ensure we accurately
417 // check read packets against packets in write queue.
418 Addr addr
= pkt
->getAddr();
419 unsigned pktsServicedByWrQ
= 0;
420 BurstHelper
* burst_helper
= NULL
;
421 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
422 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
423 pkt
->getAddr() + pkt
->getSize()) - addr
;
424 readPktSize
[ceilLog2(size
)]++;
427 // First check write buffer to see if the data is already at
429 bool foundInWrQ
= false;
430 for (auto i
= writeQueue
.begin(); i
!= writeQueue
.end(); ++i
) {
431 // check if the read is subsumed in the write entry we are
433 if ((*i
)->addr
<= addr
&&
434 (addr
+ size
) <= ((*i
)->addr
+ (*i
)->size
)) {
438 DPRINTF(DRAM
, "Read to addr %lld with size %d serviced by "
439 "write queue\n", addr
, size
);
440 bytesReadWrQ
+= burstSize
;
445 // If not found in the write q, make a DRAM packet and
446 // push it onto the read queue
449 // Make the burst helper for split packets
450 if (pktCount
> 1 && burst_helper
== NULL
) {
451 DPRINTF(DRAM
, "Read to addr %lld translates to %d "
452 "dram requests\n", pkt
->getAddr(), pktCount
);
453 burst_helper
= new BurstHelper(pktCount
);
456 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, true);
457 dram_pkt
->burstHelper
= burst_helper
;
459 assert(!readQueueFull(1));
460 rdQLenPdf
[readQueue
.size() + respQueue
.size()]++;
462 DPRINTF(DRAM
, "Adding to read queue\n");
464 readQueue
.push_back(dram_pkt
);
467 avgRdQLen
= readQueue
.size() + respQueue
.size();
470 // Starting address of next dram pkt (aligend to burstSize boundary)
471 addr
= (addr
| (burstSize
- 1)) + 1;
474 // If all packets are serviced by write queue, we send the repsonse back
475 if (pktsServicedByWrQ
== pktCount
) {
476 accessAndRespond(pkt
, frontendLatency
);
480 // Update how many split packets are serviced by write queue
481 if (burst_helper
!= NULL
)
482 burst_helper
->burstsServiced
= pktsServicedByWrQ
;
484 // If we are not already scheduled to get a request out of the
486 if (!nextReqEvent
.scheduled()) {
487 DPRINTF(DRAM
, "Request scheduled immediately\n");
488 schedule(nextReqEvent
, curTick());
493 DRAMCtrl::addToWriteQueue(PacketPtr pkt
, unsigned int pktCount
)
495 // only add to the write queue here. whenever the request is
496 // eventually done, set the readyTime, and call schedule()
497 assert(pkt
->isWrite());
499 // if the request size is larger than burst size, the pkt is split into
500 // multiple DRAM packets
501 Addr addr
= pkt
->getAddr();
502 for (int cnt
= 0; cnt
< pktCount
; ++cnt
) {
503 unsigned size
= std::min((addr
| (burstSize
- 1)) + 1,
504 pkt
->getAddr() + pkt
->getSize()) - addr
;
505 writePktSize
[ceilLog2(size
)]++;
508 // see if we can merge with an existing item in the write
509 // queue and keep track of whether we have merged or not so we
510 // can stop at that point and also avoid enqueueing a new
513 auto w
= writeQueue
.begin();
515 while(!merged
&& w
!= writeQueue
.end()) {
516 // either of the two could be first, if they are the same
517 // it does not matter which way we go
518 if ((*w
)->addr
>= addr
) {
519 // the existing one starts after the new one, figure
520 // out where the new one ends with respect to the
522 if ((addr
+ size
) >= ((*w
)->addr
+ (*w
)->size
)) {
523 // check if the existing one is completely
524 // subsumed in the new one
525 DPRINTF(DRAM
, "Merging write covering existing burst\n");
527 // update both the address and the size
530 } else if ((addr
+ size
) >= (*w
)->addr
&&
531 ((*w
)->addr
+ (*w
)->size
- addr
) <= burstSize
) {
532 // the new one is just before or partially
533 // overlapping with the existing one, and together
534 // they fit within a burst
535 DPRINTF(DRAM
, "Merging write before existing burst\n");
537 // the existing queue item needs to be adjusted with
538 // respect to both address and size
539 (*w
)->size
= (*w
)->addr
+ (*w
)->size
- addr
;
543 // the new one starts after the current one, figure
544 // out where the existing one ends with respect to the
546 if (((*w
)->addr
+ (*w
)->size
) >= (addr
+ size
)) {
547 // check if the new one is completely subsumed in the
549 DPRINTF(DRAM
, "Merging write into existing burst\n");
551 // no adjustments necessary
552 } else if (((*w
)->addr
+ (*w
)->size
) >= addr
&&
553 (addr
+ size
- (*w
)->addr
) <= burstSize
) {
554 // the existing one is just before or partially
555 // overlapping with the new one, and together
556 // they fit within a burst
557 DPRINTF(DRAM
, "Merging write after existing burst\n");
559 // the address is right, and only the size has
561 (*w
)->size
= addr
+ size
- (*w
)->addr
;
567 // if the item was not merged we need to create a new write
570 DRAMPacket
* dram_pkt
= decodeAddr(pkt
, addr
, size
, false);
572 assert(writeQueue
.size() < writeBufferSize
);
573 wrQLenPdf
[writeQueue
.size()]++;
575 DPRINTF(DRAM
, "Adding to write queue\n");
577 writeQueue
.push_back(dram_pkt
);
580 avgWrQLen
= writeQueue
.size();
582 // keep track of the fact that this burst effectively
583 // disappeared as it was merged with an existing one
587 // Starting address of next dram pkt (aligend to burstSize boundary)
588 addr
= (addr
| (burstSize
- 1)) + 1;
591 // we do not wait for the writes to be send to the actual memory,
592 // but instead take responsibility for the consistency here and
593 // snoop the write queue for any upcoming reads
594 // @todo, if a pkt size is larger than burst size, we might need a
595 // different front end latency
596 accessAndRespond(pkt
, frontendLatency
);
598 // If we are not already scheduled to get a request out of the
600 if (!nextReqEvent
.scheduled()) {
601 DPRINTF(DRAM
, "Request scheduled immediately\n");
602 schedule(nextReqEvent
, curTick());
607 DRAMCtrl::printQs() const {
608 DPRINTF(DRAM
, "===READ QUEUE===\n\n");
609 for (auto i
= readQueue
.begin() ; i
!= readQueue
.end() ; ++i
) {
610 DPRINTF(DRAM
, "Read %lu\n", (*i
)->addr
);
612 DPRINTF(DRAM
, "\n===RESP QUEUE===\n\n");
613 for (auto i
= respQueue
.begin() ; i
!= respQueue
.end() ; ++i
) {
614 DPRINTF(DRAM
, "Response %lu\n", (*i
)->addr
);
616 DPRINTF(DRAM
, "\n===WRITE QUEUE===\n\n");
617 for (auto i
= writeQueue
.begin() ; i
!= writeQueue
.end() ; ++i
) {
618 DPRINTF(DRAM
, "Write %lu\n", (*i
)->addr
);
623 DRAMCtrl::recvTimingReq(PacketPtr pkt
)
625 /// @todo temporary hack to deal with memory corruption issues until
626 /// 4-phase transactions are complete
627 for (int x
= 0; x
< pendingDelete
.size(); x
++)
628 delete pendingDelete
[x
];
629 pendingDelete
.clear();
631 // This is where we enter from the outside world
632 DPRINTF(DRAM
, "recvTimingReq: request %s addr %lld size %d\n",
633 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
635 // simply drop inhibited packets for now
636 if (pkt
->memInhibitAsserted()) {
637 DPRINTF(DRAM
, "Inhibited packet -- Dropping it now\n");
638 pendingDelete
.push_back(pkt
);
642 // Calc avg gap between requests
643 if (prevArrival
!= 0) {
644 totGap
+= curTick() - prevArrival
;
646 prevArrival
= curTick();
649 // Find out how many dram packets a pkt translates to
650 // If the burst size is equal or larger than the pkt size, then a pkt
651 // translates to only one dram packet. Otherwise, a pkt translates to
652 // multiple dram packets
653 unsigned size
= pkt
->getSize();
654 unsigned offset
= pkt
->getAddr() & (burstSize
- 1);
655 unsigned int dram_pkt_count
= divCeil(offset
+ size
, burstSize
);
657 // check local buffers and do not accept if full
660 if (readQueueFull(dram_pkt_count
)) {
661 DPRINTF(DRAM
, "Read queue full, not accepting\n");
662 // remember that we have to retry this port
667 addToReadQueue(pkt
, dram_pkt_count
);
669 bytesReadSys
+= size
;
671 } else if (pkt
->isWrite()) {
673 if (writeQueueFull(dram_pkt_count
)) {
674 DPRINTF(DRAM
, "Write queue full, not accepting\n");
675 // remember that we have to retry this port
680 addToWriteQueue(pkt
, dram_pkt_count
);
682 bytesWrittenSys
+= size
;
685 DPRINTF(DRAM
,"Neither read nor write, ignore timing\n");
686 neitherReadNorWrite
++;
687 accessAndRespond(pkt
, 1);
694 DRAMCtrl::processRespondEvent()
697 "processRespondEvent(): Some req has reached its readyTime\n");
699 DRAMPacket
* dram_pkt
= respQueue
.front();
701 if (dram_pkt
->burstHelper
) {
702 // it is a split packet
703 dram_pkt
->burstHelper
->burstsServiced
++;
704 if (dram_pkt
->burstHelper
->burstsServiced
==
705 dram_pkt
->burstHelper
->burstCount
) {
706 // we have now serviced all children packets of a system packet
707 // so we can now respond to the requester
708 // @todo we probably want to have a different front end and back
709 // end latency for split packets
710 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
711 delete dram_pkt
->burstHelper
;
712 dram_pkt
->burstHelper
= NULL
;
715 // it is not a split packet
716 accessAndRespond(dram_pkt
->pkt
, frontendLatency
+ backendLatency
);
719 delete respQueue
.front();
720 respQueue
.pop_front();
722 if (!respQueue
.empty()) {
723 assert(respQueue
.front()->readyTime
>= curTick());
724 assert(!respondEvent
.scheduled());
725 schedule(respondEvent
, respQueue
.front()->readyTime
);
727 // if there is nothing left in any queue, signal a drain
728 if (writeQueue
.empty() && readQueue
.empty() &&
730 DPRINTF(Drain
, "DRAM controller done draining\n");
731 drainManager
->signalDrainDone();
736 // We have made a location in the queue available at this point,
737 // so if there is a read that was forced to wait, retry now
745 DRAMCtrl::chooseNext(std::deque
<DRAMPacket
*>& queue
, bool switched_cmd_type
)
747 // This method does the arbitration between requests. The chosen
748 // packet is simply moved to the head of the queue. The other
749 // methods know that this is the place to look. For example, with
750 // FCFS, this method does nothing
751 assert(!queue
.empty());
753 // bool to indicate if a packet to an available rank is found
754 bool found_packet
= false;
755 if (queue
.size() == 1) {
756 DRAMPacket
* dram_pkt
= queue
.front();
757 // available rank corresponds to state refresh idle
758 if (ranks
[dram_pkt
->rank
]->isAvailable()) {
760 DPRINTF(DRAM
, "Single request, going to a free rank\n");
762 DPRINTF(DRAM
, "Single request, going to a busy rank\n");
767 if (memSchedPolicy
== Enums::fcfs
) {
768 // check if there is a packet going to a free rank
769 for(auto i
= queue
.begin(); i
!= queue
.end() ; ++i
) {
770 DRAMPacket
* dram_pkt
= *i
;
771 if (ranks
[dram_pkt
->rank
]->isAvailable()) {
773 queue
.push_front(dram_pkt
);
778 } else if (memSchedPolicy
== Enums::frfcfs
) {
779 found_packet
= reorderQueue(queue
, switched_cmd_type
);
781 panic("No scheduling policy chosen\n");
786 DRAMCtrl::reorderQueue(std::deque
<DRAMPacket
*>& queue
, bool switched_cmd_type
)
788 // Only determine this when needed
789 uint64_t earliest_banks
= 0;
791 // Search for row hits first, if no row hit is found then schedule the
792 // packet to one of the earliest banks available
793 bool found_packet
= false;
794 bool found_earliest_pkt
= false;
795 bool found_prepped_diff_rank_pkt
= false;
796 auto selected_pkt_it
= queue
.end();
798 for (auto i
= queue
.begin(); i
!= queue
.end() ; ++i
) {
799 DRAMPacket
* dram_pkt
= *i
;
800 const Bank
& bank
= dram_pkt
->bankRef
;
801 // check if rank is busy. If this is the case jump to the next packet
802 // Check if it is a row hit
803 if (dram_pkt
->rankRef
.isAvailable()) {
804 if (bank
.openRow
== dram_pkt
->row
) {
805 if (dram_pkt
->rank
== activeRank
|| switched_cmd_type
) {
806 // FCFS within the hits, giving priority to commands
807 // that access the same rank as the previous burst
808 // to minimize bus turnaround delays
809 // Only give rank prioity when command type is
811 DPRINTF(DRAM
, "Row buffer hit\n");
814 } else if (!found_prepped_diff_rank_pkt
) {
815 // found row hit for command on different rank
818 found_prepped_diff_rank_pkt
= true;
820 } else if (!found_earliest_pkt
& !found_prepped_diff_rank_pkt
) {
821 // packet going to a rank which is currently not waiting for a
822 // refresh, No row hit and
823 // haven't found an entry with a row hit to a new rank
824 if (earliest_banks
== 0)
825 // Determine entries with earliest bank prep delay
826 // Function will give priority to commands that access the
827 // same rank as previous burst and can prep
828 // the bank seamlessly
829 earliest_banks
= minBankPrep(queue
, switched_cmd_type
);
831 // FCFS - Bank is first available bank
832 if (bits(earliest_banks
, dram_pkt
->bankId
,
834 // Remember the packet to be scheduled to one of
835 // the earliest banks available, FCFS amongst the
838 //if the packet found is going to a rank that is currently
839 //not busy then update the found_packet to true
840 found_earliest_pkt
= true;
846 if (selected_pkt_it
!= queue
.end()) {
847 DRAMPacket
* selected_pkt
= *selected_pkt_it
;
848 queue
.erase(selected_pkt_it
);
849 queue
.push_front(selected_pkt
);
856 DRAMCtrl::accessAndRespond(PacketPtr pkt
, Tick static_latency
)
858 DPRINTF(DRAM
, "Responding to Address %lld.. ",pkt
->getAddr());
860 bool needsResponse
= pkt
->needsResponse();
861 // do the actual memory access which also turns the packet into a
865 // turn packet around to go back to requester if response expected
867 // access already turned the packet into a response
868 assert(pkt
->isResponse());
870 // @todo someone should pay for this
871 pkt
->firstWordDelay
= pkt
->lastWordDelay
= 0;
873 // queue the packet in the response queue to be sent out after
874 // the static latency has passed
875 port
.schedTimingResp(pkt
, curTick() + static_latency
);
877 // @todo the packet is going to be deleted, and the DRAMPacket
878 // is still having a pointer to it
879 pendingDelete
.push_back(pkt
);
882 DPRINTF(DRAM
, "Done\n");
888 DRAMCtrl::activateBank(Rank
& rank_ref
, Bank
& bank_ref
,
889 Tick act_tick
, uint32_t row
)
891 assert(rank_ref
.actTicks
.size() == activationLimit
);
893 DPRINTF(DRAM
, "Activate at tick %d\n", act_tick
);
895 // update the open row
896 assert(bank_ref
.openRow
== Bank::NO_ROW
);
897 bank_ref
.openRow
= row
;
899 // start counting anew, this covers both the case when we
900 // auto-precharged, and when this access is forced to
902 bank_ref
.bytesAccessed
= 0;
903 bank_ref
.rowAccesses
= 0;
905 ++rank_ref
.numBanksActive
;
906 assert(rank_ref
.numBanksActive
<= banksPerRank
);
908 DPRINTF(DRAM
, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
909 bank_ref
.bank
, rank_ref
.rank
, act_tick
,
910 ranks
[rank_ref
.rank
]->numBanksActive
);
912 rank_ref
.power
.powerlib
.doCommand(MemCommand::ACT
, bank_ref
.bank
,
913 divCeil(act_tick
, tCK
) -
916 DPRINTF(DRAMPower
, "%llu,ACT,%d,%d\n", divCeil(act_tick
, tCK
) -
917 timeStampOffset
, bank_ref
.bank
, rank_ref
.rank
);
919 // The next access has to respect tRAS for this bank
920 bank_ref
.preAllowedAt
= act_tick
+ tRAS
;
922 // Respect the row-to-column command delay
923 bank_ref
.colAllowedAt
= std::max(act_tick
+ tRCD
, bank_ref
.colAllowedAt
);
925 // start by enforcing tRRD
926 for(int i
= 0; i
< banksPerRank
; i
++) {
927 // next activate to any bank in this rank must not happen
929 if (bankGroupArch
&& (bank_ref
.bankgr
== rank_ref
.banks
[i
].bankgr
)) {
930 // bank group architecture requires longer delays between
931 // ACT commands within the same bank group. Use tRRD_L
933 rank_ref
.banks
[i
].actAllowedAt
= std::max(act_tick
+ tRRD_L
,
934 rank_ref
.banks
[i
].actAllowedAt
);
936 // use shorter tRRD value when either
937 // 1) bank group architecture is not supportted
938 // 2) bank is in a different bank group
939 rank_ref
.banks
[i
].actAllowedAt
= std::max(act_tick
+ tRRD
,
940 rank_ref
.banks
[i
].actAllowedAt
);
944 // next, we deal with tXAW, if the activation limit is disabled
945 // then we directly schedule an activate power event
946 if (!rank_ref
.actTicks
.empty()) {
948 if (rank_ref
.actTicks
.back() &&
949 (act_tick
- rank_ref
.actTicks
.back()) < tXAW
) {
950 panic("Got %d activates in window %d (%llu - %llu) which "
951 "is smaller than %llu\n", activationLimit
, act_tick
-
952 rank_ref
.actTicks
.back(), act_tick
,
953 rank_ref
.actTicks
.back(), tXAW
);
956 // shift the times used for the book keeping, the last element
957 // (highest index) is the oldest one and hence the lowest value
958 rank_ref
.actTicks
.pop_back();
960 // record an new activation (in the future)
961 rank_ref
.actTicks
.push_front(act_tick
);
963 // cannot activate more than X times in time window tXAW, push the
964 // next one (the X + 1'st activate) to be tXAW away from the
965 // oldest in our window of X
966 if (rank_ref
.actTicks
.back() &&
967 (act_tick
- rank_ref
.actTicks
.back()) < tXAW
) {
968 DPRINTF(DRAM
, "Enforcing tXAW with X = %d, next activate "
969 "no earlier than %llu\n", activationLimit
,
970 rank_ref
.actTicks
.back() + tXAW
);
971 for(int j
= 0; j
< banksPerRank
; j
++)
972 // next activate must not happen before end of window
973 rank_ref
.banks
[j
].actAllowedAt
=
974 std::max(rank_ref
.actTicks
.back() + tXAW
,
975 rank_ref
.banks
[j
].actAllowedAt
);
979 // at the point when this activate takes place, make sure we
980 // transition to the active power state
981 if (!rank_ref
.activateEvent
.scheduled())
982 schedule(rank_ref
.activateEvent
, act_tick
);
983 else if (rank_ref
.activateEvent
.when() > act_tick
)
984 // move it sooner in time
985 reschedule(rank_ref
.activateEvent
, act_tick
);
989 DRAMCtrl::prechargeBank(Rank
& rank_ref
, Bank
& bank
, Tick pre_at
, bool trace
)
991 // make sure the bank has an open row
992 assert(bank
.openRow
!= Bank::NO_ROW
);
994 // sample the bytes per activate here since we are closing
996 bytesPerActivate
.sample(bank
.bytesAccessed
);
998 bank
.openRow
= Bank::NO_ROW
;
1000 // no precharge allowed before this one
1001 bank
.preAllowedAt
= pre_at
;
1003 Tick pre_done_at
= pre_at
+ tRP
;
1005 bank
.actAllowedAt
= std::max(bank
.actAllowedAt
, pre_done_at
);
1007 assert(rank_ref
.numBanksActive
!= 0);
1008 --rank_ref
.numBanksActive
;
1010 DPRINTF(DRAM
, "Precharging bank %d, rank %d at tick %lld, now got "
1011 "%d active\n", bank
.bank
, rank_ref
.rank
, pre_at
,
1012 rank_ref
.numBanksActive
);
1016 rank_ref
.power
.powerlib
.doCommand(MemCommand::PRE
, bank
.bank
,
1017 divCeil(pre_at
, tCK
) -
1019 DPRINTF(DRAMPower
, "%llu,PRE,%d,%d\n", divCeil(pre_at
, tCK
) -
1020 timeStampOffset
, bank
.bank
, rank_ref
.rank
);
1022 // if we look at the current number of active banks we might be
1023 // tempted to think the DRAM is now idle, however this can be
1024 // undone by an activate that is scheduled to happen before we
1025 // would have reached the idle state, so schedule an event and
1026 // rather check once we actually make it to the point in time when
1027 // the (last) precharge takes place
1028 if (!rank_ref
.prechargeEvent
.scheduled())
1029 schedule(rank_ref
.prechargeEvent
, pre_done_at
);
1030 else if (rank_ref
.prechargeEvent
.when() < pre_done_at
)
1031 reschedule(rank_ref
.prechargeEvent
, pre_done_at
);
1035 DRAMCtrl::doDRAMAccess(DRAMPacket
* dram_pkt
)
1037 DPRINTF(DRAM
, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1038 dram_pkt
->addr
, dram_pkt
->rank
, dram_pkt
->bank
, dram_pkt
->row
);
1041 Rank
& rank
= dram_pkt
->rankRef
;
1044 Bank
& bank
= dram_pkt
->bankRef
;
1046 // for the state we need to track if it is a row hit or not
1047 bool row_hit
= true;
1049 // respect any constraints on the command (e.g. tRCD or tCCD)
1050 Tick cmd_at
= std::max(bank
.colAllowedAt
, curTick());
1052 // Determine the access latency and update the bank state
1053 if (bank
.openRow
== dram_pkt
->row
) {
1058 // If there is a page open, precharge it.
1059 if (bank
.openRow
!= Bank::NO_ROW
) {
1060 prechargeBank(rank
, bank
, std::max(bank
.preAllowedAt
, curTick()));
1063 // next we need to account for the delay in activating the
1065 Tick act_tick
= std::max(bank
.actAllowedAt
, curTick());
1067 // Record the activation and deal with all the global timing
1068 // constraints caused be a new activation (tRRD and tXAW)
1069 activateBank(rank
, bank
, act_tick
, dram_pkt
->row
);
1071 // issue the command as early as possible
1072 cmd_at
= bank
.colAllowedAt
;
1075 // we need to wait until the bus is available before we can issue
1077 cmd_at
= std::max(cmd_at
, busBusyUntil
- tCL
);
1079 // update the packet ready time
1080 dram_pkt
->readyTime
= cmd_at
+ tCL
+ tBURST
;
1082 // only one burst can use the bus at any one point in time
1083 assert(dram_pkt
->readyTime
- busBusyUntil
>= tBURST
);
1085 // update the time for the next read/write burst for each
1086 // bank (add a max with tCCD/tCCD_L here)
1088 for(int j
= 0; j
< ranksPerChannel
; j
++) {
1089 for(int i
= 0; i
< banksPerRank
; i
++) {
1090 // next burst to same bank group in this rank must not happen
1091 // before tCCD_L. Different bank group timing requirement is
1092 // tBURST; Add tCS for different ranks
1093 if (dram_pkt
->rank
== j
) {
1094 if (bankGroupArch
&&
1095 (bank
.bankgr
== ranks
[j
]->banks
[i
].bankgr
)) {
1096 // bank group architecture requires longer delays between
1097 // RD/WR burst commands to the same bank group.
1098 // Use tCCD_L in this case
1101 // use tBURST (equivalent to tCCD_S), the shorter
1102 // cas-to-cas delay value, when either:
1103 // 1) bank group architecture is not supportted
1104 // 2) bank is in a different bank group
1108 // different rank is by default in a different bank group
1109 // use tBURST (equivalent to tCCD_S), which is the shorter
1110 // cas-to-cas delay in this case
1111 // Add tCS to account for rank-to-rank bus delay requirements
1112 cmd_dly
= tBURST
+ tCS
;
1114 ranks
[j
]->banks
[i
].colAllowedAt
= std::max(cmd_at
+ cmd_dly
,
1115 ranks
[j
]->banks
[i
].colAllowedAt
);
1119 // Save rank of current access
1120 activeRank
= dram_pkt
->rank
;
1122 // If this is a write, we also need to respect the write recovery
1123 // time before a precharge, in the case of a read, respect the
1124 // read to precharge constraint
1125 bank
.preAllowedAt
= std::max(bank
.preAllowedAt
,
1126 dram_pkt
->isRead
? cmd_at
+ tRTP
:
1127 dram_pkt
->readyTime
+ tWR
);
1129 // increment the bytes accessed and the accesses per row
1130 bank
.bytesAccessed
+= burstSize
;
1133 // if we reached the max, then issue with an auto-precharge
1134 bool auto_precharge
= pageMgmt
== Enums::close
||
1135 bank
.rowAccesses
== maxAccessesPerRow
;
1137 // if we did not hit the limit, we might still want to
1139 if (!auto_precharge
&&
1140 (pageMgmt
== Enums::open_adaptive
||
1141 pageMgmt
== Enums::close_adaptive
)) {
1142 // a twist on the open and close page policies:
1143 // 1) open_adaptive page policy does not blindly keep the
1144 // page open, but close it if there are no row hits, and there
1145 // are bank conflicts in the queue
1146 // 2) close_adaptive page policy does not blindly close the
1147 // page, but closes it only if there are no row hits in the queue.
1148 // In this case, only force an auto precharge when there
1149 // are no same page hits in the queue
1150 bool got_more_hits
= false;
1151 bool got_bank_conflict
= false;
1153 // either look at the read queue or write queue
1154 const deque
<DRAMPacket
*>& queue
= dram_pkt
->isRead
? readQueue
:
1156 auto p
= queue
.begin();
1157 // make sure we are not considering the packet that we are
1158 // currently dealing with (which is the head of the queue)
1161 // keep on looking until we have found required condition or
1163 while (!(got_more_hits
&&
1164 (got_bank_conflict
|| pageMgmt
== Enums::close_adaptive
)) &&
1166 bool same_rank_bank
= (dram_pkt
->rank
== (*p
)->rank
) &&
1167 (dram_pkt
->bank
== (*p
)->bank
);
1168 bool same_row
= dram_pkt
->row
== (*p
)->row
;
1169 got_more_hits
|= same_rank_bank
&& same_row
;
1170 got_bank_conflict
|= same_rank_bank
&& !same_row
;
1174 // auto pre-charge when either
1175 // 1) open_adaptive policy, we have not got any more hits, and
1176 // have a bank conflict
1177 // 2) close_adaptive policy and we have not got any more hits
1178 auto_precharge
= !got_more_hits
&&
1179 (got_bank_conflict
|| pageMgmt
== Enums::close_adaptive
);
1182 // DRAMPower trace command to be written
1183 std::string mem_cmd
= dram_pkt
->isRead
? "RD" : "WR";
1185 // MemCommand required for DRAMPower library
1186 MemCommand::cmds command
= (mem_cmd
== "RD") ? MemCommand::RD
:
1189 // if this access should use auto-precharge, then we are
1191 if (auto_precharge
) {
1192 // if auto-precharge push a PRE command at the correct tick to the
1193 // list used by DRAMPower library to calculate power
1194 prechargeBank(rank
, bank
, std::max(curTick(), bank
.preAllowedAt
));
1196 DPRINTF(DRAM
, "Auto-precharged bank: %d\n", dram_pkt
->bankId
);
1200 busBusyUntil
= dram_pkt
->readyTime
;
1202 DPRINTF(DRAM
, "Access to %lld, ready at %lld bus busy until %lld.\n",
1203 dram_pkt
->addr
, dram_pkt
->readyTime
, busBusyUntil
);
1205 dram_pkt
->rankRef
.power
.powerlib
.doCommand(command
, dram_pkt
->bank
,
1206 divCeil(cmd_at
, tCK
) -
1209 DPRINTF(DRAMPower
, "%llu,%s,%d,%d\n", divCeil(cmd_at
, tCK
) -
1210 timeStampOffset
, mem_cmd
, dram_pkt
->bank
, dram_pkt
->rank
);
1212 // Update the minimum timing between the requests, this is a
1213 // conservative estimate of when we have to schedule the next
1214 // request to not introduce any unecessary bubbles. In most cases
1215 // we will wake up sooner than we have to.
1216 nextReqTime
= busBusyUntil
- (tRP
+ tRCD
+ tCL
);
1218 // Update the stats and schedule the next request
1219 if (dram_pkt
->isRead
) {
1223 bytesReadDRAM
+= burstSize
;
1224 perBankRdBursts
[dram_pkt
->bankId
]++;
1226 // Update latency stats
1227 totMemAccLat
+= dram_pkt
->readyTime
- dram_pkt
->entryTime
;
1228 totBusLat
+= tBURST
;
1229 totQLat
+= cmd_at
- dram_pkt
->entryTime
;
1234 bytesWritten
+= burstSize
;
1235 perBankWrBursts
[dram_pkt
->bankId
]++;
1240 DRAMCtrl::processNextReqEvent()
1243 for (auto r
: ranks
) {
1244 if (!r
->isAvailable()) {
1245 // rank is busy refreshing
1248 // let the rank know that if it was waiting to drain, it
1249 // is now done and ready to proceed
1250 r
->checkDrainDone();
1254 if (busyRanks
== ranksPerChannel
) {
1255 // if all ranks are refreshing wait for them to finish
1256 // and stall this state machine without taking any further
1257 // action, and do not schedule a new nextReqEvent
1261 // pre-emptively set to false. Overwrite if in READ_TO_WRITE
1262 // or WRITE_TO_READ state
1263 bool switched_cmd_type
= false;
1264 if (busState
== READ_TO_WRITE
) {
1265 DPRINTF(DRAM
, "Switching to writes after %d reads with %d reads "
1266 "waiting\n", readsThisTime
, readQueue
.size());
1268 // sample and reset the read-related stats as we are now
1269 // transitioning to writes, and all reads are done
1270 rdPerTurnAround
.sample(readsThisTime
);
1273 // now proceed to do the actual writes
1275 switched_cmd_type
= true;
1276 } else if (busState
== WRITE_TO_READ
) {
1277 DPRINTF(DRAM
, "Switching to reads after %d writes with %d writes "
1278 "waiting\n", writesThisTime
, writeQueue
.size());
1280 wrPerTurnAround
.sample(writesThisTime
);
1284 switched_cmd_type
= true;
1287 // when we get here it is either a read or a write
1288 if (busState
== READ
) {
1290 // track if we should switch or not
1291 bool switch_to_writes
= false;
1293 if (readQueue
.empty()) {
1294 // In the case there is no read request to go next,
1295 // trigger writes if we have passed the low threshold (or
1296 // if we are draining)
1297 if (!writeQueue
.empty() &&
1298 (drainManager
|| writeQueue
.size() > writeLowThreshold
)) {
1300 switch_to_writes
= true;
1302 // check if we are drained
1303 if (respQueue
.empty () && drainManager
) {
1304 DPRINTF(Drain
, "DRAM controller done draining\n");
1305 drainManager
->signalDrainDone();
1306 drainManager
= NULL
;
1309 // nothing to do, not even any point in scheduling an
1310 // event for the next request
1314 // bool to check if there is a read to a free rank
1315 bool found_read
= false;
1317 // Figure out which read request goes next, and move it to the
1318 // front of the read queue
1319 found_read
= chooseNext(readQueue
, switched_cmd_type
);
1321 // if no read to an available rank is found then return
1322 // at this point. There could be writes to the available ranks
1323 // which are above the required threshold. However, to
1324 // avoid adding more complexity to the code, return and wait
1325 // for a refresh event to kick things into action again.
1329 DRAMPacket
* dram_pkt
= readQueue
.front();
1330 assert(dram_pkt
->rankRef
.isAvailable());
1331 // here we get a bit creative and shift the bus busy time not
1332 // just the tWTR, but also a CAS latency to capture the fact
1333 // that we are allowed to prepare a new bank, but not issue a
1334 // read command until after tWTR, in essence we capture a
1335 // bubble on the data bus that is tWTR + tCL
1336 if (switched_cmd_type
&& dram_pkt
->rank
== activeRank
) {
1337 busBusyUntil
+= tWTR
+ tCL
;
1340 doDRAMAccess(dram_pkt
);
1342 // At this point we're done dealing with the request
1343 readQueue
.pop_front();
1346 assert(dram_pkt
->size
<= burstSize
);
1347 assert(dram_pkt
->readyTime
>= curTick());
1349 // Insert into response queue. It will be sent back to the
1350 // requestor at its readyTime
1351 if (respQueue
.empty()) {
1352 assert(!respondEvent
.scheduled());
1353 schedule(respondEvent
, dram_pkt
->readyTime
);
1355 assert(respQueue
.back()->readyTime
<= dram_pkt
->readyTime
);
1356 assert(respondEvent
.scheduled());
1359 respQueue
.push_back(dram_pkt
);
1361 // we have so many writes that we have to transition
1362 if (writeQueue
.size() > writeHighThreshold
) {
1363 switch_to_writes
= true;
1367 // switching to writes, either because the read queue is empty
1368 // and the writes have passed the low threshold (or we are
1369 // draining), or because the writes hit the hight threshold
1370 if (switch_to_writes
) {
1371 // transition to writing
1372 busState
= READ_TO_WRITE
;
1375 // bool to check if write to free rank is found
1376 bool found_write
= false;
1378 found_write
= chooseNext(writeQueue
, switched_cmd_type
);
1380 // if no writes to an available rank are found then return.
1381 // There could be reads to the available ranks. However, to avoid
1382 // adding more complexity to the code, return at this point and wait
1383 // for a refresh event to kick things into action again.
1387 DRAMPacket
* dram_pkt
= writeQueue
.front();
1388 assert(dram_pkt
->rankRef
.isAvailable());
1390 assert(dram_pkt
->size
<= burstSize
);
1392 // add a bubble to the data bus, as defined by the
1393 // tRTW when access is to the same rank as previous burst
1394 // Different rank timing is handled with tCS, which is
1395 // applied to colAllowedAt
1396 if (switched_cmd_type
&& dram_pkt
->rank
== activeRank
) {
1397 busBusyUntil
+= tRTW
;
1400 doDRAMAccess(dram_pkt
);
1402 writeQueue
.pop_front();
1405 // If we emptied the write queue, or got sufficiently below the
1406 // threshold (using the minWritesPerSwitch as the hysteresis) and
1407 // are not draining, or we have reads waiting and have done enough
1408 // writes, then switch to reads.
1409 if (writeQueue
.empty() ||
1410 (writeQueue
.size() + minWritesPerSwitch
< writeLowThreshold
&&
1412 (!readQueue
.empty() && writesThisTime
>= minWritesPerSwitch
)) {
1413 // turn the bus back around for reads again
1414 busState
= WRITE_TO_READ
;
1416 // note that the we switch back to reads also in the idle
1417 // case, which eventually will check for any draining and
1418 // also pause any further scheduling if there is really
1422 // It is possible that a refresh to another rank kicks things back into
1423 // action before reaching this point.
1424 if (!nextReqEvent
.scheduled())
1425 schedule(nextReqEvent
, std::max(nextReqTime
, curTick()));
1427 // If there is space available and we have writes waiting then let
1428 // them retry. This is done here to ensure that the retry does not
1429 // cause a nextReqEvent to be scheduled before we do so as part of
1430 // the next request processing
1431 if (retryWrReq
&& writeQueue
.size() < writeBufferSize
) {
1438 DRAMCtrl::minBankPrep(const deque
<DRAMPacket
*>& queue
,
1439 bool switched_cmd_type
) const
1441 uint64_t bank_mask
= 0;
1442 Tick min_act_at
= MaxTick
;
1444 uint64_t bank_mask_same_rank
= 0;
1445 Tick min_act_at_same_rank
= MaxTick
;
1447 // Give precedence to commands that access same rank as previous command
1448 bool same_rank_match
= false;
1450 // determine if we have queued transactions targetting the
1452 vector
<bool> got_waiting(ranksPerChannel
* banksPerRank
, false);
1453 for (const auto& p
: queue
) {
1454 if(p
->rankRef
.isAvailable())
1455 got_waiting
[p
->bankId
] = true;
1458 for (int i
= 0; i
< ranksPerChannel
; i
++) {
1459 for (int j
= 0; j
< banksPerRank
; j
++) {
1460 uint16_t bank_id
= i
* banksPerRank
+ j
;
1462 // if we have waiting requests for the bank, and it is
1463 // amongst the first available, update the mask
1464 if (got_waiting
[bank_id
]) {
1465 // make sure this rank is not currently refreshing.
1466 assert(ranks
[i
]->isAvailable());
1467 // simplistic approximation of when the bank can issue
1468 // an activate, ignoring any rank-to-rank switching
1469 // cost in this calculation
1470 Tick act_at
= ranks
[i
]->banks
[j
].openRow
== Bank::NO_ROW
?
1471 ranks
[i
]->banks
[j
].actAllowedAt
:
1472 std::max(ranks
[i
]->banks
[j
].preAllowedAt
, curTick()) + tRP
;
1474 // prioritize commands that access the
1475 // same rank as previous burst
1476 // Calculate bank mask separately for the case and
1477 // evaluate after loop iterations complete
1478 if (i
== activeRank
&& ranksPerChannel
> 1) {
1479 if (act_at
<= min_act_at_same_rank
) {
1480 // reset same rank bank mask if new minimum is found
1481 // and previous minimum could not immediately send ACT
1482 if (act_at
< min_act_at_same_rank
&&
1483 min_act_at_same_rank
> curTick())
1484 bank_mask_same_rank
= 0;
1486 // Set flag indicating that a same rank
1487 // opportunity was found
1488 same_rank_match
= true;
1490 // set the bit corresponding to the available bank
1491 replaceBits(bank_mask_same_rank
, bank_id
, bank_id
, 1);
1492 min_act_at_same_rank
= act_at
;
1495 if (act_at
<= min_act_at
) {
1496 // reset bank mask if new minimum is found
1497 // and either previous minimum could not immediately send ACT
1498 if (act_at
< min_act_at
&& min_act_at
> curTick())
1500 // set the bit corresponding to the available bank
1501 replaceBits(bank_mask
, bank_id
, bank_id
, 1);
1502 min_act_at
= act_at
;
1509 // Determine the earliest time when the next burst can issue based
1510 // on the current busBusyUntil delay.
1511 // Offset by tRCD to correlate with ACT timing variables
1512 Tick min_cmd_at
= busBusyUntil
- tCL
- tRCD
;
1514 // if we have multiple ranks and all
1515 // waiting packets are accessing a rank which was previously active
1516 // then bank_mask_same_rank will be set to a value while bank_mask will
1517 // remain 0. In this case, the function should return the value of
1518 // bank_mask_same_rank.
1519 // else if waiting packets access a rank which was previously active and
1520 // other ranks, prioritize same rank accesses that can issue B2B
1521 // Only optimize for same ranks when the command type
1522 // does not change; do not want to unnecessarily incur tWTR
1524 // Resulting FCFS prioritization Order is:
1525 // 1) Commands that access the same rank as previous burst
1526 // and can prep the bank seamlessly.
1527 // 2) Commands (any rank) with earliest bank prep
1528 if ((bank_mask
== 0) || (!switched_cmd_type
&& same_rank_match
&&
1529 min_act_at_same_rank
<= min_cmd_at
)) {
1530 bank_mask
= bank_mask_same_rank
;
1536 DRAMCtrl::Rank::Rank(DRAMCtrl
& _memory
, const DRAMCtrlParams
* _p
)
1537 : EventManager(&_memory
), memory(_memory
),
1538 pwrStateTrans(PWR_IDLE
), pwrState(PWR_IDLE
), pwrStateTick(0),
1539 refreshState(REF_IDLE
), refreshDueAt(0),
1540 power(_p
, false), numBanksActive(0),
1541 activateEvent(*this), prechargeEvent(*this),
1542 refreshEvent(*this), powerEvent(*this)
1546 DRAMCtrl::Rank::startup(Tick ref_tick
)
1548 assert(ref_tick
> curTick());
1550 pwrStateTick
= curTick();
1552 // kick off the refresh, and give ourselves enough time to
1554 schedule(refreshEvent
, ref_tick
);
1558 DRAMCtrl::Rank::checkDrainDone()
1560 // if this rank was waiting to drain it is now able to proceed to
1562 if (refreshState
== REF_DRAIN
) {
1563 DPRINTF(DRAM
, "Refresh drain done, now precharging\n");
1565 refreshState
= REF_PRE
;
1567 // hand control back to the refresh event loop
1568 schedule(refreshEvent
, curTick());
1573 DRAMCtrl::Rank::processActivateEvent()
1575 // we should transition to the active state as soon as any bank is active
1576 if (pwrState
!= PWR_ACT
)
1577 // note that at this point numBanksActive could be back at
1578 // zero again due to a precharge scheduled in the future
1579 schedulePowerEvent(PWR_ACT
, curTick());
1583 DRAMCtrl::Rank::processPrechargeEvent()
1585 // if we reached zero, then special conditions apply as we track
1586 // if all banks are precharged for the power models
1587 if (numBanksActive
== 0) {
1588 // we should transition to the idle state when the last bank
1590 schedulePowerEvent(PWR_IDLE
, curTick());
1595 DRAMCtrl::Rank::processRefreshEvent()
1597 // when first preparing the refresh, remember when it was due
1598 if (refreshState
== REF_IDLE
) {
1599 // remember when the refresh is due
1600 refreshDueAt
= curTick();
1603 refreshState
= REF_DRAIN
;
1605 DPRINTF(DRAM
, "Refresh due\n");
1608 // let any scheduled read or write to the same rank go ahead,
1609 // after which it will
1610 // hand control back to this event loop
1611 if (refreshState
== REF_DRAIN
) {
1612 // if a request is at the moment being handled and this request is
1613 // accessing the current rank then wait for it to finish
1614 if ((rank
== memory
.activeRank
)
1615 && (memory
.nextReqEvent
.scheduled())) {
1616 // hand control over to the request loop until it is
1618 DPRINTF(DRAM
, "Refresh awaiting draining\n");
1622 refreshState
= REF_PRE
;
1626 // at this point, ensure that all banks are precharged
1627 if (refreshState
== REF_PRE
) {
1628 // precharge any active bank if we are not already in the idle
1630 if (pwrState
!= PWR_IDLE
) {
1631 // at the moment, we use a precharge all even if there is
1632 // only a single bank open
1633 DPRINTF(DRAM
, "Precharging all\n");
1635 // first determine when we can precharge
1636 Tick pre_at
= curTick();
1638 for (auto &b
: banks
) {
1639 // respect both causality and any existing bank
1640 // constraints, some banks could already have a
1641 // (auto) precharge scheduled
1642 pre_at
= std::max(b
.preAllowedAt
, pre_at
);
1645 // make sure all banks per rank are precharged, and for those that
1646 // already are, update their availability
1647 Tick act_allowed_at
= pre_at
+ memory
.tRP
;
1649 for (auto &b
: banks
) {
1650 if (b
.openRow
!= Bank::NO_ROW
) {
1651 memory
.prechargeBank(*this, b
, pre_at
, false);
1653 b
.actAllowedAt
= std::max(b
.actAllowedAt
, act_allowed_at
);
1654 b
.preAllowedAt
= std::max(b
.preAllowedAt
, pre_at
);
1658 // precharge all banks in rank
1659 power
.powerlib
.doCommand(MemCommand::PREA
, 0,
1660 divCeil(pre_at
, memory
.tCK
) -
1661 memory
.timeStampOffset
);
1663 DPRINTF(DRAMPower
, "%llu,PREA,0,%d\n",
1664 divCeil(pre_at
, memory
.tCK
) -
1665 memory
.timeStampOffset
, rank
);
1667 DPRINTF(DRAM
, "All banks already precharged, starting refresh\n");
1669 // go ahead and kick the power state machine into gear if
1670 // we are already idle
1671 schedulePowerEvent(PWR_REF
, curTick());
1674 refreshState
= REF_RUN
;
1675 assert(numBanksActive
== 0);
1677 // wait for all banks to be precharged, at which point the
1678 // power state machine will transition to the idle state, and
1679 // automatically move to a refresh, at that point it will also
1680 // call this method to get the refresh event loop going again
1684 // last but not least we perform the actual refresh
1685 if (refreshState
== REF_RUN
) {
1686 // should never get here with any banks active
1687 assert(numBanksActive
== 0);
1688 assert(pwrState
== PWR_REF
);
1690 Tick ref_done_at
= curTick() + memory
.tRFC
;
1692 for (auto &b
: banks
) {
1693 b
.actAllowedAt
= ref_done_at
;
1696 // at the moment this affects all ranks
1697 power
.powerlib
.doCommand(MemCommand::REF
, 0,
1698 divCeil(curTick(), memory
.tCK
) -
1699 memory
.timeStampOffset
);
1701 // at the moment sort the list of commands and update the counters
1702 // for DRAMPower libray when doing a refresh
1703 sort(power
.powerlib
.cmdList
.begin(),
1704 power
.powerlib
.cmdList
.end(), DRAMCtrl::sortTime
);
1706 // update the counters for DRAMPower, passing false to
1707 // indicate that this is not the last command in the
1708 // list. DRAMPower requires this information for the
1709 // correct calculation of the background energy at the end
1710 // of the simulation. Ideally we would want to call this
1711 // function with true once at the end of the
1712 // simulation. However, the discarded energy is extremly
1713 // small and does not effect the final results.
1714 power
.powerlib
.updateCounters(false);
1716 // call the energy function
1717 power
.powerlib
.calcEnergy();
1722 DPRINTF(DRAMPower
, "%llu,REF,0,%d\n", divCeil(curTick(), memory
.tCK
) -
1723 memory
.timeStampOffset
, rank
);
1725 // make sure we did not wait so long that we cannot make up
1727 if (refreshDueAt
+ memory
.tREFI
< ref_done_at
) {
1728 fatal("Refresh was delayed so long we cannot catch up\n");
1731 // compensate for the delay in actually performing the refresh
1732 // when scheduling the next one
1733 schedule(refreshEvent
, refreshDueAt
+ memory
.tREFI
- memory
.tRP
);
1735 assert(!powerEvent
.scheduled());
1737 // move to the idle power state once the refresh is done, this
1738 // will also move the refresh state machine to the refresh
1740 schedulePowerEvent(PWR_IDLE
, ref_done_at
);
1742 DPRINTF(DRAMState
, "Refresh done at %llu and next refresh at %llu\n",
1743 ref_done_at
, refreshDueAt
+ memory
.tREFI
);
1748 DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state
, Tick tick
)
1750 // respect causality
1751 assert(tick
>= curTick());
1753 if (!powerEvent
.scheduled()) {
1754 DPRINTF(DRAMState
, "Scheduling power event at %llu to state %d\n",
1757 // insert the new transition
1758 pwrStateTrans
= pwr_state
;
1760 schedule(powerEvent
, tick
);
1762 panic("Scheduled power event at %llu to state %d, "
1763 "with scheduled event at %llu to %d\n", tick
, pwr_state
,
1764 powerEvent
.when(), pwrStateTrans
);
1769 DRAMCtrl::Rank::processPowerEvent()
1771 // remember where we were, and for how long
1772 Tick duration
= curTick() - pwrStateTick
;
1773 PowerState prev_state
= pwrState
;
1775 // update the accounting
1776 pwrStateTime
[prev_state
] += duration
;
1778 pwrState
= pwrStateTrans
;
1779 pwrStateTick
= curTick();
1781 if (pwrState
== PWR_IDLE
) {
1782 DPRINTF(DRAMState
, "All banks precharged\n");
1784 // if we were refreshing, make sure we start scheduling requests again
1785 if (prev_state
== PWR_REF
) {
1786 DPRINTF(DRAMState
, "Was refreshing for %llu ticks\n", duration
);
1787 assert(pwrState
== PWR_IDLE
);
1789 // kick things into action again
1790 refreshState
= REF_IDLE
;
1791 // a request event could be already scheduled by the state
1792 // machine of the other rank
1793 if (!memory
.nextReqEvent
.scheduled())
1794 schedule(memory
.nextReqEvent
, curTick());
1796 assert(prev_state
== PWR_ACT
);
1798 // if we have a pending refresh, and are now moving to
1799 // the idle state, direclty transition to a refresh
1800 if (refreshState
== REF_RUN
) {
1801 // there should be nothing waiting at this point
1802 assert(!powerEvent
.scheduled());
1804 // update the state in zero time and proceed below
1810 // we transition to the refresh state, let the refresh state
1811 // machine know of this state update and let it deal with the
1812 // scheduling of the next power state transition as well as the
1813 // following refresh
1814 if (pwrState
== PWR_REF
) {
1815 DPRINTF(DRAMState
, "Refreshing\n");
1816 // kick the refresh event loop into action again, and that
1817 // in turn will schedule a transition to the idle power
1818 // state once the refresh is done
1819 assert(refreshState
== REF_RUN
);
1820 processRefreshEvent();
1825 DRAMCtrl::Rank::updatePowerStats()
1827 // Get the energy and power from DRAMPower
1828 Data::MemoryPowerModel::Energy energy
=
1829 power
.powerlib
.getEnergy();
1830 Data::MemoryPowerModel::Power rank_power
=
1831 power
.powerlib
.getPower();
1833 actEnergy
= energy
.act_energy
* memory
.devicesPerRank
;
1834 preEnergy
= energy
.pre_energy
* memory
.devicesPerRank
;
1835 readEnergy
= energy
.read_energy
* memory
.devicesPerRank
;
1836 writeEnergy
= energy
.write_energy
* memory
.devicesPerRank
;
1837 refreshEnergy
= energy
.ref_energy
* memory
.devicesPerRank
;
1838 actBackEnergy
= energy
.act_stdby_energy
* memory
.devicesPerRank
;
1839 preBackEnergy
= energy
.pre_stdby_energy
* memory
.devicesPerRank
;
1840 totalEnergy
= energy
.total_energy
* memory
.devicesPerRank
;
1841 averagePower
= rank_power
.average_power
* memory
.devicesPerRank
;
1845 DRAMCtrl::Rank::regStats()
1847 using namespace Stats
;
1851 .name(name() + ".memoryStateTime")
1852 .desc("Time in different power states");
1853 pwrStateTime
.subname(0, "IDLE");
1854 pwrStateTime
.subname(1, "REF");
1855 pwrStateTime
.subname(2, "PRE_PDN");
1856 pwrStateTime
.subname(3, "ACT");
1857 pwrStateTime
.subname(4, "ACT_PDN");
1860 .name(name() + ".actEnergy")
1861 .desc("Energy for activate commands per rank (pJ)");
1864 .name(name() + ".preEnergy")
1865 .desc("Energy for precharge commands per rank (pJ)");
1868 .name(name() + ".readEnergy")
1869 .desc("Energy for read commands per rank (pJ)");
1872 .name(name() + ".writeEnergy")
1873 .desc("Energy for write commands per rank (pJ)");
1876 .name(name() + ".refreshEnergy")
1877 .desc("Energy for refresh commands per rank (pJ)");
1880 .name(name() + ".actBackEnergy")
1881 .desc("Energy for active background per rank (pJ)");
1884 .name(name() + ".preBackEnergy")
1885 .desc("Energy for precharge background per rank (pJ)");
1888 .name(name() + ".totalEnergy")
1889 .desc("Total energy per rank (pJ)");
1892 .name(name() + ".averagePower")
1893 .desc("Core power per rank (mW)");
1896 DRAMCtrl::regStats()
1898 using namespace Stats
;
1900 AbstractMemory::regStats();
1902 for (auto r
: ranks
) {
1907 .name(name() + ".readReqs")
1908 .desc("Number of read requests accepted");
1911 .name(name() + ".writeReqs")
1912 .desc("Number of write requests accepted");
1915 .name(name() + ".readBursts")
1916 .desc("Number of DRAM read bursts, "
1917 "including those serviced by the write queue");
1920 .name(name() + ".writeBursts")
1921 .desc("Number of DRAM write bursts, "
1922 "including those merged in the write queue");
1925 .name(name() + ".servicedByWrQ")
1926 .desc("Number of DRAM read bursts serviced by the write queue");
1929 .name(name() + ".mergedWrBursts")
1930 .desc("Number of DRAM write bursts merged with an existing one");
1933 .name(name() + ".neitherReadNorWriteReqs")
1934 .desc("Number of requests that are neither read nor write");
1937 .init(banksPerRank
* ranksPerChannel
)
1938 .name(name() + ".perBankRdBursts")
1939 .desc("Per bank write bursts");
1942 .init(banksPerRank
* ranksPerChannel
)
1943 .name(name() + ".perBankWrBursts")
1944 .desc("Per bank write bursts");
1947 .name(name() + ".avgRdQLen")
1948 .desc("Average read queue length when enqueuing")
1952 .name(name() + ".avgWrQLen")
1953 .desc("Average write queue length when enqueuing")
1957 .name(name() + ".totQLat")
1958 .desc("Total ticks spent queuing");
1961 .name(name() + ".totBusLat")
1962 .desc("Total ticks spent in databus transfers");
1965 .name(name() + ".totMemAccLat")
1966 .desc("Total ticks spent from burst creation until serviced "
1970 .name(name() + ".avgQLat")
1971 .desc("Average queueing delay per DRAM burst")
1974 avgQLat
= totQLat
/ (readBursts
- servicedByWrQ
);
1977 .name(name() + ".avgBusLat")
1978 .desc("Average bus latency per DRAM burst")
1981 avgBusLat
= totBusLat
/ (readBursts
- servicedByWrQ
);
1984 .name(name() + ".avgMemAccLat")
1985 .desc("Average memory access latency per DRAM burst")
1988 avgMemAccLat
= totMemAccLat
/ (readBursts
- servicedByWrQ
);
1991 .name(name() + ".numRdRetry")
1992 .desc("Number of times read queue was full causing retry");
1995 .name(name() + ".numWrRetry")
1996 .desc("Number of times write queue was full causing retry");
1999 .name(name() + ".readRowHits")
2000 .desc("Number of row buffer hits during reads");
2003 .name(name() + ".writeRowHits")
2004 .desc("Number of row buffer hits during writes");
2007 .name(name() + ".readRowHitRate")
2008 .desc("Row buffer hit rate for reads")
2011 readRowHitRate
= (readRowHits
/ (readBursts
- servicedByWrQ
)) * 100;
2014 .name(name() + ".writeRowHitRate")
2015 .desc("Row buffer hit rate for writes")
2018 writeRowHitRate
= (writeRowHits
/ (writeBursts
- mergedWrBursts
)) * 100;
2021 .init(ceilLog2(burstSize
) + 1)
2022 .name(name() + ".readPktSize")
2023 .desc("Read request sizes (log2)");
2026 .init(ceilLog2(burstSize
) + 1)
2027 .name(name() + ".writePktSize")
2028 .desc("Write request sizes (log2)");
2031 .init(readBufferSize
)
2032 .name(name() + ".rdQLenPdf")
2033 .desc("What read queue length does an incoming req see");
2036 .init(writeBufferSize
)
2037 .name(name() + ".wrQLenPdf")
2038 .desc("What write queue length does an incoming req see");
2041 .init(maxAccessesPerRow
)
2042 .name(name() + ".bytesPerActivate")
2043 .desc("Bytes accessed per row activation")
2047 .init(readBufferSize
)
2048 .name(name() + ".rdPerTurnAround")
2049 .desc("Reads before turning the bus around for writes")
2053 .init(writeBufferSize
)
2054 .name(name() + ".wrPerTurnAround")
2055 .desc("Writes before turning the bus around for reads")
2059 .name(name() + ".bytesReadDRAM")
2060 .desc("Total number of bytes read from DRAM");
2063 .name(name() + ".bytesReadWrQ")
2064 .desc("Total number of bytes read from write queue");
2067 .name(name() + ".bytesWritten")
2068 .desc("Total number of bytes written to DRAM");
2071 .name(name() + ".bytesReadSys")
2072 .desc("Total read bytes from the system interface side");
2075 .name(name() + ".bytesWrittenSys")
2076 .desc("Total written bytes from the system interface side");
2079 .name(name() + ".avgRdBW")
2080 .desc("Average DRAM read bandwidth in MiByte/s")
2083 avgRdBW
= (bytesReadDRAM
/ 1000000) / simSeconds
;
2086 .name(name() + ".avgWrBW")
2087 .desc("Average achieved write bandwidth in MiByte/s")
2090 avgWrBW
= (bytesWritten
/ 1000000) / simSeconds
;
2093 .name(name() + ".avgRdBWSys")
2094 .desc("Average system read bandwidth in MiByte/s")
2097 avgRdBWSys
= (bytesReadSys
/ 1000000) / simSeconds
;
2100 .name(name() + ".avgWrBWSys")
2101 .desc("Average system write bandwidth in MiByte/s")
2104 avgWrBWSys
= (bytesWrittenSys
/ 1000000) / simSeconds
;
2107 .name(name() + ".peakBW")
2108 .desc("Theoretical peak bandwidth in MiByte/s")
2111 peakBW
= (SimClock::Frequency
/ tBURST
) * burstSize
/ 1000000;
2114 .name(name() + ".busUtil")
2115 .desc("Data bus utilization in percentage")
2117 busUtil
= (avgRdBW
+ avgWrBW
) / peakBW
* 100;
2120 .name(name() + ".totGap")
2121 .desc("Total gap between requests");
2124 .name(name() + ".avgGap")
2125 .desc("Average gap between requests")
2128 avgGap
= totGap
/ (readReqs
+ writeReqs
);
2130 // Stats for DRAM Power calculation based on Micron datasheet
2132 .name(name() + ".busUtilRead")
2133 .desc("Data bus utilization in percentage for reads")
2136 busUtilRead
= avgRdBW
/ peakBW
* 100;
2139 .name(name() + ".busUtilWrite")
2140 .desc("Data bus utilization in percentage for writes")
2143 busUtilWrite
= avgWrBW
/ peakBW
* 100;
2146 .name(name() + ".pageHitRate")
2147 .desc("Row buffer hit rate, read and write combined")
2150 pageHitRate
= (writeRowHits
+ readRowHits
) /
2151 (writeBursts
- mergedWrBursts
+ readBursts
- servicedByWrQ
) * 100;
2155 DRAMCtrl::recvFunctional(PacketPtr pkt
)
2157 // rely on the abstract memory
2158 functionalAccess(pkt
);
2162 DRAMCtrl::getSlavePort(const string
&if_name
, PortID idx
)
2164 if (if_name
!= "port") {
2165 return MemObject::getSlavePort(if_name
, idx
);
2172 DRAMCtrl::drain(DrainManager
*dm
)
2174 unsigned int count
= port
.drain(dm
);
2176 // if there is anything in any of our internal queues, keep track
2178 if (!(writeQueue
.empty() && readQueue
.empty() &&
2179 respQueue
.empty())) {
2180 DPRINTF(Drain
, "DRAM controller not drained, write: %d, read: %d,"
2181 " resp: %d\n", writeQueue
.size(), readQueue
.size(),
2186 // the only part that is not drained automatically over time
2187 // is the write queue, thus kick things into action if needed
2188 if (!writeQueue
.empty() && !nextReqEvent
.scheduled()) {
2189 schedule(nextReqEvent
, curTick());
2194 setDrainState(Drainable::Draining
);
2196 setDrainState(Drainable::Drained
);
2200 DRAMCtrl::MemoryPort::MemoryPort(const std::string
& name
, DRAMCtrl
& _memory
)
2201 : QueuedSlavePort(name
, &_memory
, queue
), queue(_memory
, *this),
2206 DRAMCtrl::MemoryPort::getAddrRanges() const
2208 AddrRangeList ranges
;
2209 ranges
.push_back(memory
.getAddrRange());
2214 DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt
)
2216 pkt
->pushLabel(memory
.name());
2218 if (!queue
.checkFunctional(pkt
)) {
2219 // Default implementation of SimpleTimingPort::recvFunctional()
2220 // calls recvAtomic() and throws away the latency; we can save a
2221 // little here by just not calculating the latency.
2222 memory
.recvFunctional(pkt
);
2229 DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt
)
2231 return memory
.recvAtomic(pkt
);
2235 DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt
)
2237 // pass it to the memory controller
2238 return memory
.recvTimingReq(pkt
);
2242 DRAMCtrlParams::create()
2244 return new DRAMCtrl(this);