b6ec4653ddf451c97008a94a4b7f8d59c14e20e4
[gem5.git] / src / mem / dram_ctrl.cc
1 /*
2 * Copyright (c) 2010-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Andreas Hansson
41 * Ani Udipi
42 * Neha Agarwal
43 * Omar Naji
44 * Wendy Elsasser
45 * Radhika Jagtap
46 */
47
48 #include "mem/dram_ctrl.hh"
49
50 #include "base/bitfield.hh"
51 #include "base/trace.hh"
52 #include "debug/DRAM.hh"
53 #include "debug/DRAMPower.hh"
54 #include "debug/DRAMState.hh"
55 #include "debug/Drain.hh"
56 #include "debug/QOS.hh"
57 #include "sim/system.hh"
58
59 using namespace std;
60 using namespace Data;
61
62 DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
63 QoS::MemCtrl(p),
64 port(name() + ".port", *this), isTimingMode(false),
65 retryRdReq(false), retryWrReq(false),
66 nextReqEvent([this]{ processNextReqEvent(); }, name()),
67 respondEvent([this]{ processRespondEvent(); }, name()),
68 deviceSize(p->device_size),
69 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
70 deviceRowBufferSize(p->device_rowbuffer_size),
71 devicesPerRank(p->devices_per_rank),
72 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
73 rowBufferSize(devicesPerRank * deviceRowBufferSize),
74 columnsPerRowBuffer(rowBufferSize / burstSize),
75 columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1),
76 ranksPerChannel(p->ranks_per_channel),
77 bankGroupsPerRank(p->bank_groups_per_rank),
78 bankGroupArch(p->bank_groups_per_rank > 0),
79 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
80 readBufferSize(p->read_buffer_size),
81 writeBufferSize(p->write_buffer_size),
82 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
83 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
84 minWritesPerSwitch(p->min_writes_per_switch),
85 writesThisTime(0), readsThisTime(0),
86 tCK(p->tCK), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST),
87 tCCD_L_WR(p->tCCD_L_WR),
88 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
89 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
90 tRRD_L(p->tRRD_L), tXAW(p->tXAW), tXP(p->tXP), tXS(p->tXS),
91 activationLimit(p->activation_limit), rankToRankDly(tCS + tBURST),
92 wrToRdDly(tCL + tBURST + p->tWTR), rdToWrDly(tRTW + tBURST),
93 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
94 pageMgmt(p->page_policy),
95 maxAccessesPerRow(p->max_accesses_per_row),
96 frontendLatency(p->static_frontend_latency),
97 backendLatency(p->static_backend_latency),
98 nextBurstAt(0), prevArrival(0),
99 nextReqTime(0), activeRank(0), timeStampOffset(0),
100 lastStatsResetTick(0)
101 {
102 // sanity check the ranks since we rely on bit slicing for the
103 // address decoding
104 fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not "
105 "allowed, must be a power of two\n", ranksPerChannel);
106
107 fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, "
108 "must be a power of two\n", burstSize);
109 readQueue.resize(p->qos_priorities);
110 writeQueue.resize(p->qos_priorities);
111
112
113 for (int i = 0; i < ranksPerChannel; i++) {
114 Rank* rank = new Rank(*this, p, i);
115 ranks.push_back(rank);
116 }
117
118 // perform a basic check of the write thresholds
119 if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
120 fatal("Write buffer low threshold %d must be smaller than the "
121 "high threshold %d\n", p->write_low_thresh_perc,
122 p->write_high_thresh_perc);
123
124 // determine the rows per bank by looking at the total capacity
125 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
126
127 // determine the dram actual capacity from the DRAM config in Mbytes
128 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank *
129 ranksPerChannel;
130
131 // if actual DRAM size does not match memory capacity in system warn!
132 if (deviceCapacity != capacity / (1024 * 1024))
133 warn("DRAM device capacity (%d Mbytes) does not match the "
134 "address range assigned (%d Mbytes)\n", deviceCapacity,
135 capacity / (1024 * 1024));
136
137 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
138 AbstractMemory::size());
139
140 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n",
141 rowBufferSize, columnsPerRowBuffer);
142
143 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel);
144
145 // some basic sanity checks
146 if (tREFI <= tRP || tREFI <= tRFC) {
147 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
148 tREFI, tRP, tRFC);
149 }
150
151 // basic bank group architecture checks ->
152 if (bankGroupArch) {
153 // must have at least one bank per bank group
154 if (bankGroupsPerRank > banksPerRank) {
155 fatal("banks per rank (%d) must be equal to or larger than "
156 "banks groups per rank (%d)\n",
157 banksPerRank, bankGroupsPerRank);
158 }
159 // must have same number of banks in each bank group
160 if ((banksPerRank % bankGroupsPerRank) != 0) {
161 fatal("Banks per rank (%d) must be evenly divisible by bank groups "
162 "per rank (%d) for equal banks per bank group\n",
163 banksPerRank, bankGroupsPerRank);
164 }
165 // tCCD_L should be greater than minimal, back-to-back burst delay
166 if (tCCD_L <= tBURST) {
167 fatal("tCCD_L (%d) should be larger than tBURST (%d) when "
168 "bank groups per rank (%d) is greater than 1\n",
169 tCCD_L, tBURST, bankGroupsPerRank);
170 }
171 // tCCD_L_WR should be greater than minimal, back-to-back burst delay
172 if (tCCD_L_WR <= tBURST) {
173 fatal("tCCD_L_WR (%d) should be larger than tBURST (%d) when "
174 "bank groups per rank (%d) is greater than 1\n",
175 tCCD_L_WR, tBURST, bankGroupsPerRank);
176 }
177 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay
178 // some datasheets might specify it equal to tRRD
179 if (tRRD_L < tRRD) {
180 fatal("tRRD_L (%d) should be larger than tRRD (%d) when "
181 "bank groups per rank (%d) is greater than 1\n",
182 tRRD_L, tRRD, bankGroupsPerRank);
183 }
184 }
185
186 }
187
188 void
189 DRAMCtrl::init()
190 {
191 MemCtrl::init();
192
193 if (!port.isConnected()) {
194 fatal("DRAMCtrl %s is unconnected!\n", name());
195 } else {
196 port.sendRangeChange();
197 }
198
199 // a bit of sanity checks on the interleaving, save it for here to
200 // ensure that the system pointer is initialised
201 if (range.interleaved()) {
202 if (channels != range.stripes())
203 fatal("%s has %d interleaved address stripes but %d channel(s)\n",
204 name(), range.stripes(), channels);
205
206 if (addrMapping == Enums::RoRaBaChCo) {
207 if (rowBufferSize != range.granularity()) {
208 fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
209 "address map\n", name());
210 }
211 } else if (addrMapping == Enums::RoRaBaCoCh ||
212 addrMapping == Enums::RoCoRaBaCh) {
213 // for the interleavings with channel bits in the bottom,
214 // if the system uses a channel striping granularity that
215 // is larger than the DRAM burst size, then map the
216 // sequential accesses within a stripe to a number of
217 // columns in the DRAM, effectively placing some of the
218 // lower-order column bits as the least-significant bits
219 // of the address (above the ones denoting the burst size)
220 assert(columnsPerStripe >= 1);
221
222 // channel striping has to be done at a granularity that
223 // is equal or larger to a cache line
224 if (system()->cacheLineSize() > range.granularity()) {
225 fatal("Channel interleaving of %s must be at least as large "
226 "as the cache line size\n", name());
227 }
228
229 // ...and equal or smaller than the row-buffer size
230 if (rowBufferSize < range.granularity()) {
231 fatal("Channel interleaving of %s must be at most as large "
232 "as the row-buffer size\n", name());
233 }
234 // this is essentially the check above, so just to be sure
235 assert(columnsPerStripe <= columnsPerRowBuffer);
236 }
237 }
238 }
239
240 void
241 DRAMCtrl::startup()
242 {
243 // remember the memory system mode of operation
244 isTimingMode = system()->isTimingMode();
245
246 if (isTimingMode) {
247 // timestamp offset should be in clock cycles for DRAMPower
248 timeStampOffset = divCeil(curTick(), tCK);
249
250 // update the start tick for the precharge accounting to the
251 // current tick
252 for (auto r : ranks) {
253 r->startup(curTick() + tREFI - tRP);
254 }
255
256 // shift the bus busy time sufficiently far ahead that we never
257 // have to worry about negative values when computing the time for
258 // the next request, this will add an insignificant bubble at the
259 // start of simulation
260 nextBurstAt = curTick() + tRP + tRCD;
261 }
262 }
263
264 Tick
265 DRAMCtrl::recvAtomic(PacketPtr pkt)
266 {
267 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
268
269 panic_if(pkt->cacheResponding(), "Should not see packets where cache "
270 "is responding");
271
272 // do the actual memory access and turn the packet into a response
273 access(pkt);
274
275 Tick latency = 0;
276 if (pkt->hasData()) {
277 // this value is not supposed to be accurate, just enough to
278 // keep things going, mimic a closed page
279 latency = tRP + tRCD + tCL;
280 }
281 return latency;
282 }
283
284 bool
285 DRAMCtrl::readQueueFull(unsigned int neededEntries) const
286 {
287 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
288 readBufferSize, totalReadQueueSize + respQueue.size(),
289 neededEntries);
290
291 auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries;
292 return rdsize_new > readBufferSize;
293 }
294
295 bool
296 DRAMCtrl::writeQueueFull(unsigned int neededEntries) const
297 {
298 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
299 writeBufferSize, totalWriteQueueSize, neededEntries);
300
301 auto wrsize_new = (totalWriteQueueSize + neededEntries);
302 return wrsize_new > writeBufferSize;
303 }
304
305 DRAMCtrl::DRAMPacket*
306 DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size,
307 bool isRead)
308 {
309 // decode the address based on the address mapping scheme, with
310 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
311 // channel, respectively
312 uint8_t rank;
313 uint8_t bank;
314 // use a 64-bit unsigned during the computations as the row is
315 // always the top bits, and check before creating the DRAMPacket
316 uint64_t row;
317
318 // truncate the address to a DRAM burst, which makes it unique to
319 // a specific column, row, bank, rank and channel
320 Addr addr = dramPktAddr / burstSize;
321
322 // we have removed the lowest order address bits that denote the
323 // position within the column
324 if (addrMapping == Enums::RoRaBaChCo) {
325 // the lowest order bits denote the column to ensure that
326 // sequential cache lines occupy the same row
327 addr = addr / columnsPerRowBuffer;
328
329 // take out the channel part of the address
330 addr = addr / channels;
331
332 // after the channel bits, get the bank bits to interleave
333 // over the banks
334 bank = addr % banksPerRank;
335 addr = addr / banksPerRank;
336
337 // after the bank, we get the rank bits which thus interleaves
338 // over the ranks
339 rank = addr % ranksPerChannel;
340 addr = addr / ranksPerChannel;
341
342 // lastly, get the row bits, no need to remove them from addr
343 row = addr % rowsPerBank;
344 } else if (addrMapping == Enums::RoRaBaCoCh) {
345 // take out the lower-order column bits
346 addr = addr / columnsPerStripe;
347
348 // take out the channel part of the address
349 addr = addr / channels;
350
351 // next, the higher-order column bites
352 addr = addr / (columnsPerRowBuffer / columnsPerStripe);
353
354 // after the column bits, we get the bank bits to interleave
355 // over the banks
356 bank = addr % banksPerRank;
357 addr = addr / banksPerRank;
358
359 // after the bank, we get the rank bits which thus interleaves
360 // over the ranks
361 rank = addr % ranksPerChannel;
362 addr = addr / ranksPerChannel;
363
364 // lastly, get the row bits, no need to remove them from addr
365 row = addr % rowsPerBank;
366 } else if (addrMapping == Enums::RoCoRaBaCh) {
367 // optimise for closed page mode and utilise maximum
368 // parallelism of the DRAM (at the cost of power)
369
370 // take out the lower-order column bits
371 addr = addr / columnsPerStripe;
372
373 // take out the channel part of the address, not that this has
374 // to match with how accesses are interleaved between the
375 // controllers in the address mapping
376 addr = addr / channels;
377
378 // start with the bank bits, as this provides the maximum
379 // opportunity for parallelism between requests
380 bank = addr % banksPerRank;
381 addr = addr / banksPerRank;
382
383 // next get the rank bits
384 rank = addr % ranksPerChannel;
385 addr = addr / ranksPerChannel;
386
387 // next, the higher-order column bites
388 addr = addr / (columnsPerRowBuffer / columnsPerStripe);
389
390 // lastly, get the row bits, no need to remove them from addr
391 row = addr % rowsPerBank;
392 } else
393 panic("Unknown address mapping policy chosen!");
394
395 assert(rank < ranksPerChannel);
396 assert(bank < banksPerRank);
397 assert(row < rowsPerBank);
398 assert(row < Bank::NO_ROW);
399
400 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
401 dramPktAddr, rank, bank, row);
402
403 // create the corresponding DRAM packet with the entry time and
404 // ready time set to the current tick, the latter will be updated
405 // later
406 uint16_t bank_id = banksPerRank * rank + bank;
407 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
408 size, ranks[rank]->banks[bank], *ranks[rank]);
409 }
410
411 void
412 DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
413 {
414 // only add to the read queue here. whenever the request is
415 // eventually done, set the readyTime, and call schedule()
416 assert(!pkt->isWrite());
417
418 assert(pktCount != 0);
419
420 // if the request size is larger than burst size, the pkt is split into
421 // multiple DRAM packets
422 // Note if the pkt starting address is not aligened to burst size, the
423 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
424 // are aligned to burst size boundaries. This is to ensure we accurately
425 // check read packets against packets in write queue.
426 Addr addr = pkt->getAddr();
427 unsigned pktsServicedByWrQ = 0;
428 BurstHelper* burst_helper = NULL;
429 for (int cnt = 0; cnt < pktCount; ++cnt) {
430 unsigned size = std::min((addr | (burstSize - 1)) + 1,
431 pkt->getAddr() + pkt->getSize()) - addr;
432 readPktSize[ceilLog2(size)]++;
433 readBursts++;
434 masterReadAccesses[pkt->masterId()]++;
435
436 // First check write buffer to see if the data is already at
437 // the controller
438 bool foundInWrQ = false;
439 Addr burst_addr = burstAlign(addr);
440 // if the burst address is not present then there is no need
441 // looking any further
442 if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
443 for (const auto& vec : writeQueue) {
444 for (const auto& p : vec) {
445 // check if the read is subsumed in the write queue
446 // packet we are looking at
447 if (p->addr <= addr &&
448 ((addr + size) <= (p->addr + p->size))) {
449
450 foundInWrQ = true;
451 servicedByWrQ++;
452 pktsServicedByWrQ++;
453 DPRINTF(DRAM,
454 "Read to addr %lld with size %d serviced by "
455 "write queue\n",
456 addr, size);
457 bytesReadWrQ += burstSize;
458 break;
459 }
460 }
461 }
462 }
463
464 // If not found in the write q, make a DRAM packet and
465 // push it onto the read queue
466 if (!foundInWrQ) {
467
468 // Make the burst helper for split packets
469 if (pktCount > 1 && burst_helper == NULL) {
470 DPRINTF(DRAM, "Read to addr %lld translates to %d "
471 "dram requests\n", pkt->getAddr(), pktCount);
472 burst_helper = new BurstHelper(pktCount);
473 }
474
475 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true);
476 dram_pkt->burstHelper = burst_helper;
477
478 assert(!readQueueFull(1));
479 rdQLenPdf[totalReadQueueSize + respQueue.size()]++;
480
481 DPRINTF(DRAM, "Adding to read queue\n");
482
483 readQueue[dram_pkt->qosValue()].push_back(dram_pkt);
484
485 ++dram_pkt->rankRef.readEntries;
486
487 // log packet
488 logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(),
489 dram_pkt->addr, 1);
490
491 // Update stats
492 avgRdQLen = totalReadQueueSize + respQueue.size();
493 }
494
495 // Starting address of next dram pkt (aligend to burstSize boundary)
496 addr = (addr | (burstSize - 1)) + 1;
497 }
498
499 // If all packets are serviced by write queue, we send the repsonse back
500 if (pktsServicedByWrQ == pktCount) {
501 accessAndRespond(pkt, frontendLatency);
502 return;
503 }
504
505 // Update how many split packets are serviced by write queue
506 if (burst_helper != NULL)
507 burst_helper->burstsServiced = pktsServicedByWrQ;
508
509 // If we are not already scheduled to get a request out of the
510 // queue, do so now
511 if (!nextReqEvent.scheduled()) {
512 DPRINTF(DRAM, "Request scheduled immediately\n");
513 schedule(nextReqEvent, curTick());
514 }
515 }
516
517 void
518 DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
519 {
520 // only add to the write queue here. whenever the request is
521 // eventually done, set the readyTime, and call schedule()
522 assert(pkt->isWrite());
523
524 // if the request size is larger than burst size, the pkt is split into
525 // multiple DRAM packets
526 Addr addr = pkt->getAddr();
527 for (int cnt = 0; cnt < pktCount; ++cnt) {
528 unsigned size = std::min((addr | (burstSize - 1)) + 1,
529 pkt->getAddr() + pkt->getSize()) - addr;
530 writePktSize[ceilLog2(size)]++;
531 writeBursts++;
532 masterWriteAccesses[pkt->masterId()]++;
533
534 // see if we can merge with an existing item in the write
535 // queue and keep track of whether we have merged or not
536 bool merged = isInWriteQueue.find(burstAlign(addr)) !=
537 isInWriteQueue.end();
538
539 // if the item was not merged we need to create a new write
540 // and enqueue it
541 if (!merged) {
542 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false);
543
544 assert(totalWriteQueueSize < writeBufferSize);
545 wrQLenPdf[totalWriteQueueSize]++;
546
547 DPRINTF(DRAM, "Adding to write queue\n");
548
549 writeQueue[dram_pkt->qosValue()].push_back(dram_pkt);
550 isInWriteQueue.insert(burstAlign(addr));
551
552 // log packet
553 logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(),
554 dram_pkt->addr, 1);
555
556 assert(totalWriteQueueSize == isInWriteQueue.size());
557
558 // Update stats
559 avgWrQLen = totalWriteQueueSize;
560
561 // increment write entries of the rank
562 ++dram_pkt->rankRef.writeEntries;
563 } else {
564 DPRINTF(DRAM, "Merging write burst with existing queue entry\n");
565
566 // keep track of the fact that this burst effectively
567 // disappeared as it was merged with an existing one
568 mergedWrBursts++;
569 }
570
571 // Starting address of next dram pkt (aligend to burstSize boundary)
572 addr = (addr | (burstSize - 1)) + 1;
573 }
574
575 // we do not wait for the writes to be send to the actual memory,
576 // but instead take responsibility for the consistency here and
577 // snoop the write queue for any upcoming reads
578 // @todo, if a pkt size is larger than burst size, we might need a
579 // different front end latency
580 accessAndRespond(pkt, frontendLatency);
581
582 // If we are not already scheduled to get a request out of the
583 // queue, do so now
584 if (!nextReqEvent.scheduled()) {
585 DPRINTF(DRAM, "Request scheduled immediately\n");
586 schedule(nextReqEvent, curTick());
587 }
588 }
589
590 void
591 DRAMCtrl::printQs() const
592 {
593 #if TRACING_ON
594 DPRINTF(DRAM, "===READ QUEUE===\n\n");
595 for (const auto& queue : readQueue) {
596 for (const auto& packet : queue) {
597 DPRINTF(DRAM, "Read %lu\n", packet->addr);
598 }
599 }
600
601 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
602 for (const auto& packet : respQueue) {
603 DPRINTF(DRAM, "Response %lu\n", packet->addr);
604 }
605
606 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
607 for (const auto& queue : writeQueue) {
608 for (const auto& packet : queue) {
609 DPRINTF(DRAM, "Write %lu\n", packet->addr);
610 }
611 }
612 #endif // TRACING_ON
613 }
614
615 bool
616 DRAMCtrl::recvTimingReq(PacketPtr pkt)
617 {
618 // This is where we enter from the outside world
619 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
620 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
621
622 panic_if(pkt->cacheResponding(), "Should not see packets where cache "
623 "is responding");
624
625 panic_if(!(pkt->isRead() || pkt->isWrite()),
626 "Should only see read and writes at memory controller\n");
627
628 // Calc avg gap between requests
629 if (prevArrival != 0) {
630 totGap += curTick() - prevArrival;
631 }
632 prevArrival = curTick();
633
634
635 // Find out how many dram packets a pkt translates to
636 // If the burst size is equal or larger than the pkt size, then a pkt
637 // translates to only one dram packet. Otherwise, a pkt translates to
638 // multiple dram packets
639 unsigned size = pkt->getSize();
640 unsigned offset = pkt->getAddr() & (burstSize - 1);
641 unsigned int dram_pkt_count = divCeil(offset + size, burstSize);
642
643 // run the QoS scheduler and assign a QoS priority value to the packet
644 qosSchedule( { &readQueue, &writeQueue }, burstSize, pkt);
645
646 // check local buffers and do not accept if full
647 if (pkt->isRead()) {
648 assert(size != 0);
649 if (readQueueFull(dram_pkt_count)) {
650 DPRINTF(DRAM, "Read queue full, not accepting\n");
651 // remember that we have to retry this port
652 retryRdReq = true;
653 numRdRetry++;
654 return false;
655 } else {
656 addToReadQueue(pkt, dram_pkt_count);
657 readReqs++;
658 bytesReadSys += size;
659 }
660 } else {
661 assert(pkt->isWrite());
662 assert(size != 0);
663 if (writeQueueFull(dram_pkt_count)) {
664 DPRINTF(DRAM, "Write queue full, not accepting\n");
665 // remember that we have to retry this port
666 retryWrReq = true;
667 numWrRetry++;
668 return false;
669 } else {
670 addToWriteQueue(pkt, dram_pkt_count);
671 writeReqs++;
672 bytesWrittenSys += size;
673 }
674 }
675
676 return true;
677 }
678
679 void
680 DRAMCtrl::processRespondEvent()
681 {
682 DPRINTF(DRAM,
683 "processRespondEvent(): Some req has reached its readyTime\n");
684
685 DRAMPacket* dram_pkt = respQueue.front();
686
687 // if a read has reached its ready-time, decrement the number of reads
688 // At this point the packet has been handled and there is a possibility
689 // to switch to low-power mode if no other packet is available
690 --dram_pkt->rankRef.readEntries;
691 DPRINTF(DRAM, "number of read entries for rank %d is %d\n",
692 dram_pkt->rank, dram_pkt->rankRef.readEntries);
693
694 // counter should at least indicate one outstanding request
695 // for this read
696 assert(dram_pkt->rankRef.outstandingEvents > 0);
697 // read response received, decrement count
698 --dram_pkt->rankRef.outstandingEvents;
699
700 // at this moment should not have transitioned to a low-power state
701 assert((dram_pkt->rankRef.pwrState != PWR_SREF) &&
702 (dram_pkt->rankRef.pwrState != PWR_PRE_PDN) &&
703 (dram_pkt->rankRef.pwrState != PWR_ACT_PDN));
704
705 // track if this is the last packet before idling
706 // and that there are no outstanding commands to this rank
707 if (dram_pkt->rankRef.isQueueEmpty() &&
708 dram_pkt->rankRef.outstandingEvents == 0) {
709 // verify that there are no events scheduled
710 assert(!dram_pkt->rankRef.activateEvent.scheduled());
711 assert(!dram_pkt->rankRef.prechargeEvent.scheduled());
712
713 // if coming from active state, schedule power event to
714 // active power-down else go to precharge power-down
715 DPRINTF(DRAMState, "Rank %d sleep at tick %d; current power state is "
716 "%d\n", dram_pkt->rank, curTick(), dram_pkt->rankRef.pwrState);
717
718 // default to ACT power-down unless already in IDLE state
719 // could be in IDLE if PRE issued before data returned
720 PowerState next_pwr_state = PWR_ACT_PDN;
721 if (dram_pkt->rankRef.pwrState == PWR_IDLE) {
722 next_pwr_state = PWR_PRE_PDN;
723 }
724
725 dram_pkt->rankRef.powerDownSleep(next_pwr_state, curTick());
726 }
727
728 if (dram_pkt->burstHelper) {
729 // it is a split packet
730 dram_pkt->burstHelper->burstsServiced++;
731 if (dram_pkt->burstHelper->burstsServiced ==
732 dram_pkt->burstHelper->burstCount) {
733 // we have now serviced all children packets of a system packet
734 // so we can now respond to the requester
735 // @todo we probably want to have a different front end and back
736 // end latency for split packets
737 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
738 delete dram_pkt->burstHelper;
739 dram_pkt->burstHelper = NULL;
740 }
741 } else {
742 // it is not a split packet
743 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
744 }
745
746 delete respQueue.front();
747 respQueue.pop_front();
748
749 if (!respQueue.empty()) {
750 assert(respQueue.front()->readyTime >= curTick());
751 assert(!respondEvent.scheduled());
752 schedule(respondEvent, respQueue.front()->readyTime);
753 } else {
754 // if there is nothing left in any queue, signal a drain
755 if (drainState() == DrainState::Draining &&
756 !totalWriteQueueSize && !totalReadQueueSize && allRanksDrained()) {
757
758 DPRINTF(Drain, "DRAM controller done draining\n");
759 signalDrainDone();
760 }
761 }
762
763 // We have made a location in the queue available at this point,
764 // so if there is a read that was forced to wait, retry now
765 if (retryRdReq) {
766 retryRdReq = false;
767 port.sendRetryReq();
768 }
769 }
770
771 DRAMCtrl::DRAMPacketQueue::iterator
772 DRAMCtrl::chooseNext(DRAMPacketQueue& queue, Tick extra_col_delay)
773 {
774 // This method does the arbitration between requests.
775
776 DRAMCtrl::DRAMPacketQueue::iterator ret = queue.end();
777
778 if (!queue.empty()) {
779 if (queue.size() == 1) {
780 // available rank corresponds to state refresh idle
781 DRAMPacket* dram_pkt = *(queue.begin());
782 if (ranks[dram_pkt->rank]->inRefIdleState()) {
783 ret = queue.begin();
784 DPRINTF(DRAM, "Single request, going to a free rank\n");
785 } else {
786 DPRINTF(DRAM, "Single request, going to a busy rank\n");
787 }
788 } else if (memSchedPolicy == Enums::fcfs) {
789 // check if there is a packet going to a free rank
790 for (auto i = queue.begin(); i != queue.end(); ++i) {
791 DRAMPacket* dram_pkt = *i;
792 if (ranks[dram_pkt->rank]->inRefIdleState()) {
793 ret = i;
794 break;
795 }
796 }
797 } else if (memSchedPolicy == Enums::frfcfs) {
798 ret = chooseNextFRFCFS(queue, extra_col_delay);
799 } else {
800 panic("No scheduling policy chosen\n");
801 }
802 }
803 return ret;
804 }
805
806 DRAMCtrl::DRAMPacketQueue::iterator
807 DRAMCtrl::chooseNextFRFCFS(DRAMPacketQueue& queue, Tick extra_col_delay)
808 {
809 // Only determine this if needed
810 vector<uint32_t> earliest_banks(ranksPerChannel, 0);
811
812 // Has minBankPrep been called to populate earliest_banks?
813 bool filled_earliest_banks = false;
814 // can the PRE/ACT sequence be done without impacting utlization?
815 bool hidden_bank_prep = false;
816
817 // search for seamless row hits first, if no seamless row hit is
818 // found then determine if there are other packets that can be issued
819 // without incurring additional bus delay due to bank timing
820 // Will select closed rows first to enable more open row possibilies
821 // in future selections
822 bool found_hidden_bank = false;
823
824 // remember if we found a row hit, not seamless, but bank prepped
825 // and ready
826 bool found_prepped_pkt = false;
827
828 // if we have no row hit, prepped or not, and no seamless packet,
829 // just go for the earliest possible
830 bool found_earliest_pkt = false;
831
832 auto selected_pkt_it = queue.end();
833
834 // time we need to issue a column command to be seamless
835 const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick());
836
837 for (auto i = queue.begin(); i != queue.end() ; ++i) {
838 DRAMPacket* dram_pkt = *i;
839 const Bank& bank = dram_pkt->bankRef;
840 const Tick col_allowed_at = dram_pkt->isRead() ? bank.rdAllowedAt :
841 bank.wrAllowedAt;
842
843 DPRINTF(DRAM, "%s checking packet in bank %d\n",
844 __func__, dram_pkt->bankRef.bank);
845
846 // check if rank is not doing a refresh and thus is available, if not,
847 // jump to the next packet
848 if (dram_pkt->rankRef.inRefIdleState()) {
849
850 DPRINTF(DRAM,
851 "%s bank %d - Rank %d available\n", __func__,
852 dram_pkt->bankRef.bank, dram_pkt->rankRef.rank);
853
854 // check if it is a row hit
855 if (bank.openRow == dram_pkt->row) {
856 // no additional rank-to-rank or same bank-group
857 // delays, or we switched read/write and might as well
858 // go for the row hit
859 if (col_allowed_at <= min_col_at) {
860 // FCFS within the hits, giving priority to
861 // commands that can issue seamlessly, without
862 // additional delay, such as same rank accesses
863 // and/or different bank-group accesses
864 DPRINTF(DRAM, "%s Seamless row buffer hit\n", __func__);
865 selected_pkt_it = i;
866 // no need to look through the remaining queue entries
867 break;
868 } else if (!found_hidden_bank && !found_prepped_pkt) {
869 // if we did not find a packet to a closed row that can
870 // issue the bank commands without incurring delay, and
871 // did not yet find a packet to a prepped row, remember
872 // the current one
873 selected_pkt_it = i;
874 found_prepped_pkt = true;
875 DPRINTF(DRAM, "%s Prepped row buffer hit\n", __func__);
876 }
877 } else if (!found_earliest_pkt) {
878 // if we have not initialised the bank status, do it
879 // now, and only once per scheduling decisions
880 if (!filled_earliest_banks) {
881 // determine entries with earliest bank delay
882 std::tie(earliest_banks, hidden_bank_prep) =
883 minBankPrep(queue, min_col_at);
884 filled_earliest_banks = true;
885 }
886
887 // bank is amongst first available banks
888 // minBankPrep will give priority to packets that can
889 // issue seamlessly
890 if (bits(earliest_banks[dram_pkt->rank],
891 dram_pkt->bank, dram_pkt->bank)) {
892 found_earliest_pkt = true;
893 found_hidden_bank = hidden_bank_prep;
894
895 // give priority to packets that can issue
896 // bank commands 'behind the scenes'
897 // any additional delay if any will be due to
898 // col-to-col command requirements
899 if (hidden_bank_prep || !found_prepped_pkt)
900 selected_pkt_it = i;
901 }
902 }
903 } else {
904 DPRINTF(DRAM, "%s bank %d - Rank %d not available\n", __func__,
905 dram_pkt->bankRef.bank, dram_pkt->rankRef.rank);
906 }
907 }
908
909 if (selected_pkt_it == queue.end()) {
910 DPRINTF(DRAM, "%s no available ranks found\n", __func__);
911 }
912
913 return selected_pkt_it;
914 }
915
916 void
917 DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
918 {
919 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
920
921 bool needsResponse = pkt->needsResponse();
922 // do the actual memory access which also turns the packet into a
923 // response
924 access(pkt);
925
926 // turn packet around to go back to requester if response expected
927 if (needsResponse) {
928 // access already turned the packet into a response
929 assert(pkt->isResponse());
930 // response_time consumes the static latency and is charged also
931 // with headerDelay that takes into account the delay provided by
932 // the xbar and also the payloadDelay that takes into account the
933 // number of data beats.
934 Tick response_time = curTick() + static_latency + pkt->headerDelay +
935 pkt->payloadDelay;
936 // Here we reset the timing of the packet before sending it out.
937 pkt->headerDelay = pkt->payloadDelay = 0;
938
939 // queue the packet in the response queue to be sent out after
940 // the static latency has passed
941 port.schedTimingResp(pkt, response_time);
942 } else {
943 // @todo the packet is going to be deleted, and the DRAMPacket
944 // is still having a pointer to it
945 pendingDelete.reset(pkt);
946 }
947
948 DPRINTF(DRAM, "Done\n");
949
950 return;
951 }
952
953 void
954 DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref,
955 Tick act_tick, uint32_t row)
956 {
957 assert(rank_ref.actTicks.size() == activationLimit);
958
959 DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
960
961 // update the open row
962 assert(bank_ref.openRow == Bank::NO_ROW);
963 bank_ref.openRow = row;
964
965 // start counting anew, this covers both the case when we
966 // auto-precharged, and when this access is forced to
967 // precharge
968 bank_ref.bytesAccessed = 0;
969 bank_ref.rowAccesses = 0;
970
971 ++rank_ref.numBanksActive;
972 assert(rank_ref.numBanksActive <= banksPerRank);
973
974 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
975 bank_ref.bank, rank_ref.rank, act_tick,
976 ranks[rank_ref.rank]->numBanksActive);
977
978 rank_ref.cmdList.push_back(Command(MemCommand::ACT, bank_ref.bank,
979 act_tick));
980
981 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) -
982 timeStampOffset, bank_ref.bank, rank_ref.rank);
983
984 // The next access has to respect tRAS for this bank
985 bank_ref.preAllowedAt = act_tick + tRAS;
986
987 // Respect the row-to-column command delay for both read and write cmds
988 bank_ref.rdAllowedAt = std::max(act_tick + tRCD, bank_ref.rdAllowedAt);
989 bank_ref.wrAllowedAt = std::max(act_tick + tRCD, bank_ref.wrAllowedAt);
990
991 // start by enforcing tRRD
992 for (int i = 0; i < banksPerRank; i++) {
993 // next activate to any bank in this rank must not happen
994 // before tRRD
995 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) {
996 // bank group architecture requires longer delays between
997 // ACT commands within the same bank group. Use tRRD_L
998 // in this case
999 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L,
1000 rank_ref.banks[i].actAllowedAt);
1001 } else {
1002 // use shorter tRRD value when either
1003 // 1) bank group architecture is not supportted
1004 // 2) bank is in a different bank group
1005 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD,
1006 rank_ref.banks[i].actAllowedAt);
1007 }
1008 }
1009
1010 // next, we deal with tXAW, if the activation limit is disabled
1011 // then we directly schedule an activate power event
1012 if (!rank_ref.actTicks.empty()) {
1013 // sanity check
1014 if (rank_ref.actTicks.back() &&
1015 (act_tick - rank_ref.actTicks.back()) < tXAW) {
1016 panic("Got %d activates in window %d (%llu - %llu) which "
1017 "is smaller than %llu\n", activationLimit, act_tick -
1018 rank_ref.actTicks.back(), act_tick,
1019 rank_ref.actTicks.back(), tXAW);
1020 }
1021
1022 // shift the times used for the book keeping, the last element
1023 // (highest index) is the oldest one and hence the lowest value
1024 rank_ref.actTicks.pop_back();
1025
1026 // record an new activation (in the future)
1027 rank_ref.actTicks.push_front(act_tick);
1028
1029 // cannot activate more than X times in time window tXAW, push the
1030 // next one (the X + 1'st activate) to be tXAW away from the
1031 // oldest in our window of X
1032 if (rank_ref.actTicks.back() &&
1033 (act_tick - rank_ref.actTicks.back()) < tXAW) {
1034 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate "
1035 "no earlier than %llu\n", activationLimit,
1036 rank_ref.actTicks.back() + tXAW);
1037 for (int j = 0; j < banksPerRank; j++)
1038 // next activate must not happen before end of window
1039 rank_ref.banks[j].actAllowedAt =
1040 std::max(rank_ref.actTicks.back() + tXAW,
1041 rank_ref.banks[j].actAllowedAt);
1042 }
1043 }
1044
1045 // at the point when this activate takes place, make sure we
1046 // transition to the active power state
1047 if (!rank_ref.activateEvent.scheduled())
1048 schedule(rank_ref.activateEvent, act_tick);
1049 else if (rank_ref.activateEvent.when() > act_tick)
1050 // move it sooner in time
1051 reschedule(rank_ref.activateEvent, act_tick);
1052 }
1053
1054 void
1055 DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace)
1056 {
1057 // make sure the bank has an open row
1058 assert(bank.openRow != Bank::NO_ROW);
1059
1060 // sample the bytes per activate here since we are closing
1061 // the page
1062 bytesPerActivate.sample(bank.bytesAccessed);
1063
1064 bank.openRow = Bank::NO_ROW;
1065
1066 // no precharge allowed before this one
1067 bank.preAllowedAt = pre_at;
1068
1069 Tick pre_done_at = pre_at + tRP;
1070
1071 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at);
1072
1073 assert(rank_ref.numBanksActive != 0);
1074 --rank_ref.numBanksActive;
1075
1076 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got "
1077 "%d active\n", bank.bank, rank_ref.rank, pre_at,
1078 rank_ref.numBanksActive);
1079
1080 if (trace) {
1081
1082 rank_ref.cmdList.push_back(Command(MemCommand::PRE, bank.bank,
1083 pre_at));
1084 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) -
1085 timeStampOffset, bank.bank, rank_ref.rank);
1086 }
1087 // if we look at the current number of active banks we might be
1088 // tempted to think the DRAM is now idle, however this can be
1089 // undone by an activate that is scheduled to happen before we
1090 // would have reached the idle state, so schedule an event and
1091 // rather check once we actually make it to the point in time when
1092 // the (last) precharge takes place
1093 if (!rank_ref.prechargeEvent.scheduled()) {
1094 schedule(rank_ref.prechargeEvent, pre_done_at);
1095 // New event, increment count
1096 ++rank_ref.outstandingEvents;
1097 } else if (rank_ref.prechargeEvent.when() < pre_done_at) {
1098 reschedule(rank_ref.prechargeEvent, pre_done_at);
1099 }
1100 }
1101
1102 void
1103 DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt)
1104 {
1105 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1106 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1107
1108 // get the rank
1109 Rank& rank = dram_pkt->rankRef;
1110
1111 // are we in or transitioning to a low-power state and have not scheduled
1112 // a power-up event?
1113 // if so, wake up from power down to issue RD/WR burst
1114 if (rank.inLowPowerState) {
1115 assert(rank.pwrState != PWR_SREF);
1116 rank.scheduleWakeUpEvent(tXP);
1117 }
1118
1119 // get the bank
1120 Bank& bank = dram_pkt->bankRef;
1121
1122 // for the state we need to track if it is a row hit or not
1123 bool row_hit = true;
1124
1125 // Determine the access latency and update the bank state
1126 if (bank.openRow == dram_pkt->row) {
1127 // nothing to do
1128 } else {
1129 row_hit = false;
1130
1131 // If there is a page open, precharge it.
1132 if (bank.openRow != Bank::NO_ROW) {
1133 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick()));
1134 }
1135
1136 // next we need to account for the delay in activating the
1137 // page
1138 Tick act_tick = std::max(bank.actAllowedAt, curTick());
1139
1140 // Record the activation and deal with all the global timing
1141 // constraints caused be a new activation (tRRD and tXAW)
1142 activateBank(rank, bank, act_tick, dram_pkt->row);
1143 }
1144
1145 // respect any constraints on the command (e.g. tRCD or tCCD)
1146 const Tick col_allowed_at = dram_pkt->isRead() ?
1147 bank.rdAllowedAt : bank.wrAllowedAt;
1148
1149 // we need to wait until the bus is available before we can issue
1150 // the command; need minimum of tBURST between commands
1151 Tick cmd_at = std::max({col_allowed_at, nextBurstAt, curTick()});
1152
1153 // update the packet ready time
1154 dram_pkt->readyTime = cmd_at + tCL + tBURST;
1155
1156 // update the time for the next read/write burst for each
1157 // bank (add a max with tCCD/tCCD_L/tCCD_L_WR here)
1158 Tick dly_to_rd_cmd;
1159 Tick dly_to_wr_cmd;
1160 for (int j = 0; j < ranksPerChannel; j++) {
1161 for (int i = 0; i < banksPerRank; i++) {
1162 // next burst to same bank group in this rank must not happen
1163 // before tCCD_L. Different bank group timing requirement is
1164 // tBURST; Add tCS for different ranks
1165 if (dram_pkt->rank == j) {
1166 if (bankGroupArch &&
1167 (bank.bankgr == ranks[j]->banks[i].bankgr)) {
1168 // bank group architecture requires longer delays between
1169 // RD/WR burst commands to the same bank group.
1170 // tCCD_L is default requirement for same BG timing
1171 // tCCD_L_WR is required for write-to-write
1172 // Need to also take bus turnaround delays into account
1173 dly_to_rd_cmd = dram_pkt->isRead() ?
1174 tCCD_L : std::max(tCCD_L, wrToRdDly);
1175 dly_to_wr_cmd = dram_pkt->isRead() ?
1176 std::max(tCCD_L, rdToWrDly) : tCCD_L_WR;
1177 } else {
1178 // tBURST is default requirement for diff BG timing
1179 // Need to also take bus turnaround delays into account
1180 dly_to_rd_cmd = dram_pkt->isRead() ? tBURST : wrToRdDly;
1181 dly_to_wr_cmd = dram_pkt->isRead() ? rdToWrDly : tBURST;
1182 }
1183 } else {
1184 // different rank is by default in a different bank group and
1185 // doesn't require longer tCCD or additional RTW, WTR delays
1186 // Need to account for rank-to-rank switching with tCS
1187 dly_to_wr_cmd = rankToRankDly;
1188 dly_to_rd_cmd = rankToRankDly;
1189 }
1190 ranks[j]->banks[i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd,
1191 ranks[j]->banks[i].rdAllowedAt);
1192 ranks[j]->banks[i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd,
1193 ranks[j]->banks[i].wrAllowedAt);
1194 }
1195 }
1196
1197 // Save rank of current access
1198 activeRank = dram_pkt->rank;
1199
1200 // If this is a write, we also need to respect the write recovery
1201 // time before a precharge, in the case of a read, respect the
1202 // read to precharge constraint
1203 bank.preAllowedAt = std::max(bank.preAllowedAt,
1204 dram_pkt->isRead() ? cmd_at + tRTP :
1205 dram_pkt->readyTime + tWR);
1206
1207 // increment the bytes accessed and the accesses per row
1208 bank.bytesAccessed += burstSize;
1209 ++bank.rowAccesses;
1210
1211 // if we reached the max, then issue with an auto-precharge
1212 bool auto_precharge = pageMgmt == Enums::close ||
1213 bank.rowAccesses == maxAccessesPerRow;
1214
1215 // if we did not hit the limit, we might still want to
1216 // auto-precharge
1217 if (!auto_precharge &&
1218 (pageMgmt == Enums::open_adaptive ||
1219 pageMgmt == Enums::close_adaptive)) {
1220 // a twist on the open and close page policies:
1221 // 1) open_adaptive page policy does not blindly keep the
1222 // page open, but close it if there are no row hits, and there
1223 // are bank conflicts in the queue
1224 // 2) close_adaptive page policy does not blindly close the
1225 // page, but closes it only if there are no row hits in the queue.
1226 // In this case, only force an auto precharge when there
1227 // are no same page hits in the queue
1228 bool got_more_hits = false;
1229 bool got_bank_conflict = false;
1230
1231 // either look at the read queue or write queue
1232 const std::vector<DRAMPacketQueue>& queue =
1233 dram_pkt->isRead() ? readQueue : writeQueue;
1234
1235 for (uint8_t i = 0; i < numPriorities(); ++i) {
1236 auto p = queue[i].begin();
1237 // keep on looking until we find a hit or reach the end of the queue
1238 // 1) if a hit is found, then both open and close adaptive policies keep
1239 // the page open
1240 // 2) if no hit is found, got_bank_conflict is set to true if a bank
1241 // conflict request is waiting in the queue
1242 // 3) make sure we are not considering the packet that we are
1243 // currently dealing with
1244 while (!got_more_hits && p != queue[i].end()) {
1245 if (dram_pkt != (*p)) {
1246 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
1247 (dram_pkt->bank == (*p)->bank);
1248
1249 bool same_row = dram_pkt->row == (*p)->row;
1250 got_more_hits |= same_rank_bank && same_row;
1251 got_bank_conflict |= same_rank_bank && !same_row;
1252 }
1253 ++p;
1254 }
1255
1256 if (got_more_hits)
1257 break;
1258 }
1259
1260 // auto pre-charge when either
1261 // 1) open_adaptive policy, we have not got any more hits, and
1262 // have a bank conflict
1263 // 2) close_adaptive policy and we have not got any more hits
1264 auto_precharge = !got_more_hits &&
1265 (got_bank_conflict || pageMgmt == Enums::close_adaptive);
1266 }
1267
1268 // DRAMPower trace command to be written
1269 std::string mem_cmd = dram_pkt->isRead() ? "RD" : "WR";
1270
1271 // MemCommand required for DRAMPower library
1272 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD :
1273 MemCommand::WR;
1274
1275 // Update bus state to reflect when previous command was issued
1276 nextBurstAt = cmd_at + tBURST;
1277
1278 DPRINTF(DRAM, "Access to %lld, ready at %lld next burst at %lld.\n",
1279 dram_pkt->addr, dram_pkt->readyTime, nextBurstAt);
1280
1281 dram_pkt->rankRef.cmdList.push_back(Command(command, dram_pkt->bank,
1282 cmd_at));
1283
1284 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) -
1285 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank);
1286
1287 // if this access should use auto-precharge, then we are
1288 // closing the row after the read/write burst
1289 if (auto_precharge) {
1290 // if auto-precharge push a PRE command at the correct tick to the
1291 // list used by DRAMPower library to calculate power
1292 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt));
1293
1294 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
1295 }
1296
1297 // Update the minimum timing between the requests, this is a
1298 // conservative estimate of when we have to schedule the next
1299 // request to not introduce any unecessary bubbles. In most cases
1300 // we will wake up sooner than we have to.
1301 nextReqTime = nextBurstAt - (tRP + tRCD);
1302
1303 // Update the stats and schedule the next request
1304 if (dram_pkt->isRead()) {
1305 ++readsThisTime;
1306 if (row_hit)
1307 readRowHits++;
1308 bytesReadDRAM += burstSize;
1309 perBankRdBursts[dram_pkt->bankId]++;
1310
1311 // Update latency stats
1312 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
1313 masterReadTotalLat[dram_pkt->masterId()] +=
1314 dram_pkt->readyTime - dram_pkt->entryTime;
1315
1316 totBusLat += tBURST;
1317 totQLat += cmd_at - dram_pkt->entryTime;
1318 masterReadBytes[dram_pkt->masterId()] += dram_pkt->size;
1319 } else {
1320 ++writesThisTime;
1321 if (row_hit)
1322 writeRowHits++;
1323 bytesWritten += burstSize;
1324 perBankWrBursts[dram_pkt->bankId]++;
1325 masterWriteBytes[dram_pkt->masterId()] += dram_pkt->size;
1326 masterWriteTotalLat[dram_pkt->masterId()] +=
1327 dram_pkt->readyTime - dram_pkt->entryTime;
1328 }
1329 }
1330
1331 void
1332 DRAMCtrl::processNextReqEvent()
1333 {
1334 // transition is handled by QoS algorithm if enabled
1335 if (turnPolicy) {
1336 // select bus state - only done if QoS algorithms are in use
1337 busStateNext = selectNextBusState();
1338 }
1339
1340 // detect bus state change
1341 bool switched_cmd_type = (busState != busStateNext);
1342 // record stats
1343 recordTurnaroundStats();
1344
1345 DPRINTF(DRAM, "QoS Turnarounds selected state %s %s\n",
1346 (busState==MemCtrl::READ)?"READ":"WRITE",
1347 switched_cmd_type?"[turnaround triggered]":"");
1348
1349 if (switched_cmd_type) {
1350 if (busState == READ) {
1351 DPRINTF(DRAM,
1352 "Switching to writes after %d reads with %d reads "
1353 "waiting\n", readsThisTime, totalReadQueueSize);
1354 rdPerTurnAround.sample(readsThisTime);
1355 readsThisTime = 0;
1356 } else {
1357 DPRINTF(DRAM,
1358 "Switching to reads after %d writes with %d writes "
1359 "waiting\n", writesThisTime, totalWriteQueueSize);
1360 wrPerTurnAround.sample(writesThisTime);
1361 writesThisTime = 0;
1362 }
1363 }
1364
1365 // updates current state
1366 busState = busStateNext;
1367
1368 // check ranks for refresh/wakeup - uses busStateNext, so done after turnaround
1369 // decisions
1370 int busyRanks = 0;
1371 for (auto r : ranks) {
1372 if (!r->inRefIdleState()) {
1373 if (r->pwrState != PWR_SREF) {
1374 // rank is busy refreshing
1375 DPRINTF(DRAMState, "Rank %d is not available\n", r->rank);
1376 busyRanks++;
1377
1378 // let the rank know that if it was waiting to drain, it
1379 // is now done and ready to proceed
1380 r->checkDrainDone();
1381 }
1382
1383 // check if we were in self-refresh and haven't started
1384 // to transition out
1385 if ((r->pwrState == PWR_SREF) && r->inLowPowerState) {
1386 DPRINTF(DRAMState, "Rank %d is in self-refresh\n", r->rank);
1387 // if we have commands queued to this rank and we don't have
1388 // a minimum number of active commands enqueued,
1389 // exit self-refresh
1390 if (r->forceSelfRefreshExit()) {
1391 DPRINTF(DRAMState, "rank %d was in self refresh and"
1392 " should wake up\n", r->rank);
1393 //wake up from self-refresh
1394 r->scheduleWakeUpEvent(tXS);
1395 // things are brought back into action once a refresh is
1396 // performed after self-refresh
1397 // continue with selection for other ranks
1398 }
1399 }
1400 }
1401 }
1402
1403 if (busyRanks == ranksPerChannel) {
1404 // if all ranks are refreshing wait for them to finish
1405 // and stall this state machine without taking any further
1406 // action, and do not schedule a new nextReqEvent
1407 return;
1408 }
1409
1410 // when we get here it is either a read or a write
1411 if (busState == READ) {
1412
1413 // track if we should switch or not
1414 bool switch_to_writes = false;
1415
1416 if (totalReadQueueSize == 0) {
1417 // In the case there is no read request to go next,
1418 // trigger writes if we have passed the low threshold (or
1419 // if we are draining)
1420 if (!(totalWriteQueueSize == 0) &&
1421 (drainState() == DrainState::Draining ||
1422 totalWriteQueueSize > writeLowThreshold)) {
1423
1424 DPRINTF(DRAM, "Switching to writes due to read queue empty\n");
1425 switch_to_writes = true;
1426 } else {
1427 // check if we are drained
1428 // not done draining until in PWR_IDLE state
1429 // ensuring all banks are closed and
1430 // have exited low power states
1431 if (drainState() == DrainState::Draining &&
1432 respQueue.empty() && allRanksDrained()) {
1433
1434 DPRINTF(Drain, "DRAM controller done draining\n");
1435 signalDrainDone();
1436 }
1437
1438 // nothing to do, not even any point in scheduling an
1439 // event for the next request
1440 return;
1441 }
1442 } else {
1443
1444 bool read_found = false;
1445 DRAMPacketQueue::iterator to_read;
1446 uint8_t prio = numPriorities();
1447
1448 for (auto queue = readQueue.rbegin();
1449 queue != readQueue.rend(); ++queue) {
1450
1451 prio--;
1452
1453 DPRINTF(QOS,
1454 "DRAM controller checking READ queue [%d] priority [%d elements]\n",
1455 prio, queue->size());
1456
1457 // Figure out which read request goes next
1458 // If we are changing command type, incorporate the minimum
1459 // bus turnaround delay which will be tCS (different rank) case
1460 to_read = chooseNext((*queue), switched_cmd_type ? tCS : 0);
1461
1462 if (to_read != queue->end()) {
1463 // candidate read found
1464 read_found = true;
1465 break;
1466 }
1467 }
1468
1469 // if no read to an available rank is found then return
1470 // at this point. There could be writes to the available ranks
1471 // which are above the required threshold. However, to
1472 // avoid adding more complexity to the code, return and wait
1473 // for a refresh event to kick things into action again.
1474 if (!read_found) {
1475 DPRINTF(DRAM, "No Reads Found - exiting\n");
1476 return;
1477 }
1478
1479 auto dram_pkt = *to_read;
1480
1481 assert(dram_pkt->rankRef.inRefIdleState());
1482
1483 doDRAMAccess(dram_pkt);
1484
1485 // Every respQueue which will generate an event, increment count
1486 ++dram_pkt->rankRef.outstandingEvents;
1487 // sanity check
1488 assert(dram_pkt->size <= burstSize);
1489 assert(dram_pkt->readyTime >= curTick());
1490
1491 // log the response
1492 logResponse(MemCtrl::READ, (*to_read)->masterId(),
1493 dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
1494 dram_pkt->readyTime - dram_pkt->entryTime);
1495
1496
1497 // Insert into response queue. It will be sent back to the
1498 // requester at its readyTime
1499 if (respQueue.empty()) {
1500 assert(!respondEvent.scheduled());
1501 schedule(respondEvent, dram_pkt->readyTime);
1502 } else {
1503 assert(respQueue.back()->readyTime <= dram_pkt->readyTime);
1504 assert(respondEvent.scheduled());
1505 }
1506
1507 respQueue.push_back(dram_pkt);
1508
1509 // we have so many writes that we have to transition
1510 if (totalWriteQueueSize > writeHighThreshold) {
1511 switch_to_writes = true;
1512 }
1513
1514 // remove the request from the queue - the iterator is no longer valid .
1515 readQueue[dram_pkt->qosValue()].erase(to_read);
1516 }
1517
1518 // switching to writes, either because the read queue is empty
1519 // and the writes have passed the low threshold (or we are
1520 // draining), or because the writes hit the hight threshold
1521 if (switch_to_writes) {
1522 // transition to writing
1523 busStateNext = WRITE;
1524 }
1525 } else {
1526
1527 bool write_found = false;
1528 DRAMPacketQueue::iterator to_write;
1529 uint8_t prio = numPriorities();
1530
1531 for (auto queue = writeQueue.rbegin();
1532 queue != writeQueue.rend(); ++queue) {
1533
1534 prio--;
1535
1536 DPRINTF(QOS,
1537 "DRAM controller checking WRITE queue [%d] priority [%d elements]\n",
1538 prio, queue->size());
1539
1540 // If we are changing command type, incorporate the minimum
1541 // bus turnaround delay
1542 to_write = chooseNext((*queue),
1543 switched_cmd_type ? std::min(tRTW, tCS) : 0);
1544
1545 if (to_write != queue->end()) {
1546 write_found = true;
1547 break;
1548 }
1549 }
1550
1551 // if there are no writes to a rank that is available to service
1552 // requests (i.e. rank is in refresh idle state) are found then
1553 // return. There could be reads to the available ranks. However, to
1554 // avoid adding more complexity to the code, return at this point and
1555 // wait for a refresh event to kick things into action again.
1556 if (!write_found) {
1557 DPRINTF(DRAM, "No Writes Found - exiting\n");
1558 return;
1559 }
1560
1561 auto dram_pkt = *to_write;
1562
1563 assert(dram_pkt->rankRef.inRefIdleState());
1564 // sanity check
1565 assert(dram_pkt->size <= burstSize);
1566
1567 doDRAMAccess(dram_pkt);
1568
1569 // removed write from queue, decrement count
1570 --dram_pkt->rankRef.writeEntries;
1571
1572 // Schedule write done event to decrement event count
1573 // after the readyTime has been reached
1574 // Only schedule latest write event to minimize events
1575 // required; only need to ensure that final event scheduled covers
1576 // the time that writes are outstanding and bus is active
1577 // to holdoff power-down entry events
1578 if (!dram_pkt->rankRef.writeDoneEvent.scheduled()) {
1579 schedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime);
1580 // New event, increment count
1581 ++dram_pkt->rankRef.outstandingEvents;
1582
1583 } else if (dram_pkt->rankRef.writeDoneEvent.when() <
1584 dram_pkt->readyTime) {
1585
1586 reschedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime);
1587 }
1588
1589 isInWriteQueue.erase(burstAlign(dram_pkt->addr));
1590
1591 // log the response
1592 logResponse(MemCtrl::WRITE, dram_pkt->masterId(),
1593 dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
1594 dram_pkt->readyTime - dram_pkt->entryTime);
1595
1596
1597 // remove the request from the queue - the iterator is no longer valid
1598 writeQueue[dram_pkt->qosValue()].erase(to_write);
1599
1600 delete dram_pkt;
1601
1602 // If we emptied the write queue, or got sufficiently below the
1603 // threshold (using the minWritesPerSwitch as the hysteresis) and
1604 // are not draining, or we have reads waiting and have done enough
1605 // writes, then switch to reads.
1606 bool below_threshold =
1607 totalWriteQueueSize + minWritesPerSwitch < writeLowThreshold;
1608
1609 if (totalWriteQueueSize == 0 ||
1610 (below_threshold && drainState() != DrainState::Draining) ||
1611 (totalReadQueueSize && writesThisTime >= minWritesPerSwitch)) {
1612
1613 // turn the bus back around for reads again
1614 busStateNext = READ;
1615
1616 // note that the we switch back to reads also in the idle
1617 // case, which eventually will check for any draining and
1618 // also pause any further scheduling if there is really
1619 // nothing to do
1620 }
1621 }
1622 // It is possible that a refresh to another rank kicks things back into
1623 // action before reaching this point.
1624 if (!nextReqEvent.scheduled())
1625 schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1626
1627 // If there is space available and we have writes waiting then let
1628 // them retry. This is done here to ensure that the retry does not
1629 // cause a nextReqEvent to be scheduled before we do so as part of
1630 // the next request processing
1631 if (retryWrReq && totalWriteQueueSize < writeBufferSize) {
1632 retryWrReq = false;
1633 port.sendRetryReq();
1634 }
1635 }
1636
1637 pair<vector<uint32_t>, bool>
1638 DRAMCtrl::minBankPrep(const DRAMPacketQueue& queue,
1639 Tick min_col_at) const
1640 {
1641 Tick min_act_at = MaxTick;
1642 vector<uint32_t> bank_mask(ranksPerChannel, 0);
1643
1644 // latest Tick for which ACT can occur without incurring additoinal
1645 // delay on the data bus
1646 const Tick hidden_act_max = std::max(min_col_at - tRCD, curTick());
1647
1648 // Flag condition when burst can issue back-to-back with previous burst
1649 bool found_seamless_bank = false;
1650
1651 // Flag condition when bank can be opened without incurring additional
1652 // delay on the data bus
1653 bool hidden_bank_prep = false;
1654
1655 // determine if we have queued transactions targetting the
1656 // bank in question
1657 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1658 for (const auto& p : queue) {
1659 if (p->rankRef.inRefIdleState())
1660 got_waiting[p->bankId] = true;
1661 }
1662
1663 // Find command with optimal bank timing
1664 // Will prioritize commands that can issue seamlessly.
1665 for (int i = 0; i < ranksPerChannel; i++) {
1666 for (int j = 0; j < banksPerRank; j++) {
1667 uint16_t bank_id = i * banksPerRank + j;
1668
1669 // if we have waiting requests for the bank, and it is
1670 // amongst the first available, update the mask
1671 if (got_waiting[bank_id]) {
1672 // make sure this rank is not currently refreshing.
1673 assert(ranks[i]->inRefIdleState());
1674 // simplistic approximation of when the bank can issue
1675 // an activate, ignoring any rank-to-rank switching
1676 // cost in this calculation
1677 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ?
1678 std::max(ranks[i]->banks[j].actAllowedAt, curTick()) :
1679 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP;
1680
1681 // When is the earliest the R/W burst can issue?
1682 const Tick col_allowed_at = (busState == READ) ?
1683 ranks[i]->banks[j].rdAllowedAt :
1684 ranks[i]->banks[j].wrAllowedAt;
1685 Tick col_at = std::max(col_allowed_at, act_at + tRCD);
1686
1687 // bank can issue burst back-to-back (seamlessly) with
1688 // previous burst
1689 bool new_seamless_bank = col_at <= min_col_at;
1690
1691 // if we found a new seamless bank or we have no
1692 // seamless banks, and got a bank with an earlier
1693 // activate time, it should be added to the bit mask
1694 if (new_seamless_bank ||
1695 (!found_seamless_bank && act_at <= min_act_at)) {
1696 // if we did not have a seamless bank before, and
1697 // we do now, reset the bank mask, also reset it
1698 // if we have not yet found a seamless bank and
1699 // the activate time is smaller than what we have
1700 // seen so far
1701 if (!found_seamless_bank &&
1702 (new_seamless_bank || act_at < min_act_at)) {
1703 std::fill(bank_mask.begin(), bank_mask.end(), 0);
1704 }
1705
1706 found_seamless_bank |= new_seamless_bank;
1707
1708 // ACT can occur 'behind the scenes'
1709 hidden_bank_prep = act_at <= hidden_act_max;
1710
1711 // set the bit corresponding to the available bank
1712 replaceBits(bank_mask[i], j, j, 1);
1713 min_act_at = act_at;
1714 }
1715 }
1716 }
1717 }
1718
1719 return make_pair(bank_mask, hidden_bank_prep);
1720 }
1721
1722 DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p, int rank)
1723 : EventManager(&_memory), memory(_memory),
1724 pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE),
1725 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE),
1726 refreshState(REF_IDLE), inLowPowerState(false), rank(rank),
1727 readEntries(0), writeEntries(0), outstandingEvents(0),
1728 wakeUpAllowedAt(0), power(_p, false), banks(_p->banks_per_rank),
1729 numBanksActive(0), actTicks(_p->activation_limit, 0),
1730 writeDoneEvent([this]{ processWriteDoneEvent(); }, name()),
1731 activateEvent([this]{ processActivateEvent(); }, name()),
1732 prechargeEvent([this]{ processPrechargeEvent(); }, name()),
1733 refreshEvent([this]{ processRefreshEvent(); }, name()),
1734 powerEvent([this]{ processPowerEvent(); }, name()),
1735 wakeUpEvent([this]{ processWakeUpEvent(); }, name())
1736 {
1737 for (int b = 0; b < _p->banks_per_rank; b++) {
1738 banks[b].bank = b;
1739 // GDDR addressing of banks to BG is linear.
1740 // Here we assume that all DRAM generations address bank groups as
1741 // follows:
1742 if (_p->bank_groups_per_rank > 0) {
1743 // Simply assign lower bits to bank group in order to
1744 // rotate across bank groups as banks are incremented
1745 // e.g. with 4 banks per bank group and 16 banks total:
1746 // banks 0,4,8,12 are in bank group 0
1747 // banks 1,5,9,13 are in bank group 1
1748 // banks 2,6,10,14 are in bank group 2
1749 // banks 3,7,11,15 are in bank group 3
1750 banks[b].bankgr = b % _p->bank_groups_per_rank;
1751 } else {
1752 // No bank groups; simply assign to bank number
1753 banks[b].bankgr = b;
1754 }
1755 }
1756 }
1757
1758 void
1759 DRAMCtrl::Rank::startup(Tick ref_tick)
1760 {
1761 assert(ref_tick > curTick());
1762
1763 pwrStateTick = curTick();
1764
1765 // kick off the refresh, and give ourselves enough time to
1766 // precharge
1767 schedule(refreshEvent, ref_tick);
1768 }
1769
1770 void
1771 DRAMCtrl::Rank::suspend()
1772 {
1773 deschedule(refreshEvent);
1774
1775 // Update the stats
1776 updatePowerStats();
1777
1778 // don't automatically transition back to LP state after next REF
1779 pwrStatePostRefresh = PWR_IDLE;
1780 }
1781
1782 bool
1783 DRAMCtrl::Rank::isQueueEmpty() const
1784 {
1785 // check commmands in Q based on current bus direction
1786 bool no_queued_cmds = ((memory.busStateNext == READ) && (readEntries == 0))
1787 || ((memory.busStateNext == WRITE) &&
1788 (writeEntries == 0));
1789 return no_queued_cmds;
1790 }
1791
1792 void
1793 DRAMCtrl::Rank::checkDrainDone()
1794 {
1795 // if this rank was waiting to drain it is now able to proceed to
1796 // precharge
1797 if (refreshState == REF_DRAIN) {
1798 DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1799
1800 refreshState = REF_PD_EXIT;
1801
1802 // hand control back to the refresh event loop
1803 schedule(refreshEvent, curTick());
1804 }
1805 }
1806
1807 void
1808 DRAMCtrl::Rank::flushCmdList()
1809 {
1810 // at the moment sort the list of commands and update the counters
1811 // for DRAMPower libray when doing a refresh
1812 sort(cmdList.begin(), cmdList.end(), DRAMCtrl::sortTime);
1813
1814 auto next_iter = cmdList.begin();
1815 // push to commands to DRAMPower
1816 for ( ; next_iter != cmdList.end() ; ++next_iter) {
1817 Command cmd = *next_iter;
1818 if (cmd.timeStamp <= curTick()) {
1819 // Move all commands at or before curTick to DRAMPower
1820 power.powerlib.doCommand(cmd.type, cmd.bank,
1821 divCeil(cmd.timeStamp, memory.tCK) -
1822 memory.timeStampOffset);
1823 } else {
1824 // done - found all commands at or before curTick()
1825 // next_iter references the 1st command after curTick
1826 break;
1827 }
1828 }
1829 // reset cmdList to only contain commands after curTick
1830 // if there are no commands after curTick, updated cmdList will be empty
1831 // in this case, next_iter is cmdList.end()
1832 cmdList.assign(next_iter, cmdList.end());
1833 }
1834
1835 void
1836 DRAMCtrl::Rank::processActivateEvent()
1837 {
1838 // we should transition to the active state as soon as any bank is active
1839 if (pwrState != PWR_ACT)
1840 // note that at this point numBanksActive could be back at
1841 // zero again due to a precharge scheduled in the future
1842 schedulePowerEvent(PWR_ACT, curTick());
1843 }
1844
1845 void
1846 DRAMCtrl::Rank::processPrechargeEvent()
1847 {
1848 // counter should at least indicate one outstanding request
1849 // for this precharge
1850 assert(outstandingEvents > 0);
1851 // precharge complete, decrement count
1852 --outstandingEvents;
1853
1854 // if we reached zero, then special conditions apply as we track
1855 // if all banks are precharged for the power models
1856 if (numBanksActive == 0) {
1857 // no reads to this rank in the Q and no pending
1858 // RD/WR or refresh commands
1859 if (isQueueEmpty() && outstandingEvents == 0) {
1860 // should still be in ACT state since bank still open
1861 assert(pwrState == PWR_ACT);
1862
1863 // All banks closed - switch to precharge power down state.
1864 DPRINTF(DRAMState, "Rank %d sleep at tick %d\n",
1865 rank, curTick());
1866 powerDownSleep(PWR_PRE_PDN, curTick());
1867 } else {
1868 // we should transition to the idle state when the last bank
1869 // is precharged
1870 schedulePowerEvent(PWR_IDLE, curTick());
1871 }
1872 }
1873 }
1874
1875 void
1876 DRAMCtrl::Rank::processWriteDoneEvent()
1877 {
1878 // counter should at least indicate one outstanding request
1879 // for this write
1880 assert(outstandingEvents > 0);
1881 // Write transfer on bus has completed
1882 // decrement per rank counter
1883 --outstandingEvents;
1884 }
1885
1886 void
1887 DRAMCtrl::Rank::processRefreshEvent()
1888 {
1889 // when first preparing the refresh, remember when it was due
1890 if ((refreshState == REF_IDLE) || (refreshState == REF_SREF_EXIT)) {
1891 // remember when the refresh is due
1892 refreshDueAt = curTick();
1893
1894 // proceed to drain
1895 refreshState = REF_DRAIN;
1896
1897 // make nonzero while refresh is pending to ensure
1898 // power down and self-refresh are not entered
1899 ++outstandingEvents;
1900
1901 DPRINTF(DRAM, "Refresh due\n");
1902 }
1903
1904 // let any scheduled read or write to the same rank go ahead,
1905 // after which it will
1906 // hand control back to this event loop
1907 if (refreshState == REF_DRAIN) {
1908 // if a request is at the moment being handled and this request is
1909 // accessing the current rank then wait for it to finish
1910 if ((rank == memory.activeRank)
1911 && (memory.nextReqEvent.scheduled())) {
1912 // hand control over to the request loop until it is
1913 // evaluated next
1914 DPRINTF(DRAM, "Refresh awaiting draining\n");
1915
1916 return;
1917 } else {
1918 refreshState = REF_PD_EXIT;
1919 }
1920 }
1921
1922 // at this point, ensure that rank is not in a power-down state
1923 if (refreshState == REF_PD_EXIT) {
1924 // if rank was sleeping and we have't started exit process,
1925 // wake-up for refresh
1926 if (inLowPowerState) {
1927 DPRINTF(DRAM, "Wake Up for refresh\n");
1928 // save state and return after refresh completes
1929 scheduleWakeUpEvent(memory.tXP);
1930 return;
1931 } else {
1932 refreshState = REF_PRE;
1933 }
1934 }
1935
1936 // at this point, ensure that all banks are precharged
1937 if (refreshState == REF_PRE) {
1938 // precharge any active bank
1939 if (numBanksActive != 0) {
1940 // at the moment, we use a precharge all even if there is
1941 // only a single bank open
1942 DPRINTF(DRAM, "Precharging all\n");
1943
1944 // first determine when we can precharge
1945 Tick pre_at = curTick();
1946
1947 for (auto &b : banks) {
1948 // respect both causality and any existing bank
1949 // constraints, some banks could already have a
1950 // (auto) precharge scheduled
1951 pre_at = std::max(b.preAllowedAt, pre_at);
1952 }
1953
1954 // make sure all banks per rank are precharged, and for those that
1955 // already are, update their availability
1956 Tick act_allowed_at = pre_at + memory.tRP;
1957
1958 for (auto &b : banks) {
1959 if (b.openRow != Bank::NO_ROW) {
1960 memory.prechargeBank(*this, b, pre_at, false);
1961 } else {
1962 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at);
1963 b.preAllowedAt = std::max(b.preAllowedAt, pre_at);
1964 }
1965 }
1966
1967 // precharge all banks in rank
1968 cmdList.push_back(Command(MemCommand::PREA, 0, pre_at));
1969
1970 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n",
1971 divCeil(pre_at, memory.tCK) -
1972 memory.timeStampOffset, rank);
1973 } else if ((pwrState == PWR_IDLE) && (outstandingEvents == 1)) {
1974 // Banks are closed, have transitioned to IDLE state, and
1975 // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1976 DPRINTF(DRAM, "All banks already precharged, starting refresh\n");
1977
1978 // go ahead and kick the power state machine into gear since
1979 // we are already idle
1980 schedulePowerEvent(PWR_REF, curTick());
1981 } else {
1982 // banks state is closed but haven't transitioned pwrState to IDLE
1983 // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1984 // should have outstanding precharge event in this case
1985 assert(prechargeEvent.scheduled());
1986 // will start refresh when pwrState transitions to IDLE
1987 }
1988
1989 assert(numBanksActive == 0);
1990
1991 // wait for all banks to be precharged, at which point the
1992 // power state machine will transition to the idle state, and
1993 // automatically move to a refresh, at that point it will also
1994 // call this method to get the refresh event loop going again
1995 return;
1996 }
1997
1998 // last but not least we perform the actual refresh
1999 if (refreshState == REF_START) {
2000 // should never get here with any banks active
2001 assert(numBanksActive == 0);
2002 assert(pwrState == PWR_REF);
2003
2004 Tick ref_done_at = curTick() + memory.tRFC;
2005
2006 for (auto &b : banks) {
2007 b.actAllowedAt = ref_done_at;
2008 }
2009
2010 // at the moment this affects all ranks
2011 cmdList.push_back(Command(MemCommand::REF, 0, curTick()));
2012
2013 // Update the stats
2014 updatePowerStats();
2015
2016 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) -
2017 memory.timeStampOffset, rank);
2018
2019 // Update for next refresh
2020 refreshDueAt += memory.tREFI;
2021
2022 // make sure we did not wait so long that we cannot make up
2023 // for it
2024 if (refreshDueAt < ref_done_at) {
2025 fatal("Refresh was delayed so long we cannot catch up\n");
2026 }
2027
2028 // Run the refresh and schedule event to transition power states
2029 // when refresh completes
2030 refreshState = REF_RUN;
2031 schedule(refreshEvent, ref_done_at);
2032 return;
2033 }
2034
2035 if (refreshState == REF_RUN) {
2036 // should never get here with any banks active
2037 assert(numBanksActive == 0);
2038 assert(pwrState == PWR_REF);
2039
2040 assert(!powerEvent.scheduled());
2041
2042 if ((memory.drainState() == DrainState::Draining) ||
2043 (memory.drainState() == DrainState::Drained)) {
2044 // if draining, do not re-enter low-power mode.
2045 // simply go to IDLE and wait
2046 schedulePowerEvent(PWR_IDLE, curTick());
2047 } else {
2048 // At the moment, we sleep when the refresh ends and wait to be
2049 // woken up again if previously in a low-power state.
2050 if (pwrStatePostRefresh != PWR_IDLE) {
2051 // power State should be power Refresh
2052 assert(pwrState == PWR_REF);
2053 DPRINTF(DRAMState, "Rank %d sleeping after refresh and was in "
2054 "power state %d before refreshing\n", rank,
2055 pwrStatePostRefresh);
2056 powerDownSleep(pwrState, curTick());
2057
2058 // Force PRE power-down if there are no outstanding commands
2059 // in Q after refresh.
2060 } else if (isQueueEmpty()) {
2061 // still have refresh event outstanding but there should
2062 // be no other events outstanding
2063 assert(outstandingEvents == 1);
2064 DPRINTF(DRAMState, "Rank %d sleeping after refresh but was NOT"
2065 " in a low power state before refreshing\n", rank);
2066 powerDownSleep(PWR_PRE_PDN, curTick());
2067
2068 } else {
2069 // move to the idle power state once the refresh is done, this
2070 // will also move the refresh state machine to the refresh
2071 // idle state
2072 schedulePowerEvent(PWR_IDLE, curTick());
2073 }
2074 }
2075
2076 // At this point, we have completed the current refresh.
2077 // In the SREF bypass case, we do not get to this state in the
2078 // refresh STM and therefore can always schedule next event.
2079 // Compensate for the delay in actually performing the refresh
2080 // when scheduling the next one
2081 schedule(refreshEvent, refreshDueAt - memory.tRP);
2082
2083 DPRINTF(DRAMState, "Refresh done at %llu and next refresh"
2084 " at %llu\n", curTick(), refreshDueAt);
2085 }
2086 }
2087
2088 void
2089 DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick)
2090 {
2091 // respect causality
2092 assert(tick >= curTick());
2093
2094 if (!powerEvent.scheduled()) {
2095 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n",
2096 tick, pwr_state);
2097
2098 // insert the new transition
2099 pwrStateTrans = pwr_state;
2100
2101 schedule(powerEvent, tick);
2102 } else {
2103 panic("Scheduled power event at %llu to state %d, "
2104 "with scheduled event at %llu to %d\n", tick, pwr_state,
2105 powerEvent.when(), pwrStateTrans);
2106 }
2107 }
2108
2109 void
2110 DRAMCtrl::Rank::powerDownSleep(PowerState pwr_state, Tick tick)
2111 {
2112 // if low power state is active low, schedule to active low power state.
2113 // in reality tCKE is needed to enter active low power. This is neglected
2114 // here and could be added in the future.
2115 if (pwr_state == PWR_ACT_PDN) {
2116 schedulePowerEvent(pwr_state, tick);
2117 // push command to DRAMPower
2118 cmdList.push_back(Command(MemCommand::PDN_F_ACT, 0, tick));
2119 DPRINTF(DRAMPower, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick,
2120 memory.tCK) - memory.timeStampOffset, rank);
2121 } else if (pwr_state == PWR_PRE_PDN) {
2122 // if low power state is precharge low, schedule to precharge low
2123 // power state. In reality tCKE is needed to enter active low power.
2124 // This is neglected here.
2125 schedulePowerEvent(pwr_state, tick);
2126 //push Command to DRAMPower
2127 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick));
2128 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick,
2129 memory.tCK) - memory.timeStampOffset, rank);
2130 } else if (pwr_state == PWR_REF) {
2131 // if a refresh just occurred
2132 // transition to PRE_PDN now that all banks are closed
2133 // precharge power down requires tCKE to enter. For simplicity
2134 // this is not considered.
2135 schedulePowerEvent(PWR_PRE_PDN, tick);
2136 //push Command to DRAMPower
2137 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick));
2138 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick,
2139 memory.tCK) - memory.timeStampOffset, rank);
2140 } else if (pwr_state == PWR_SREF) {
2141 // should only enter SREF after PRE-PD wakeup to do a refresh
2142 assert(pwrStatePostRefresh == PWR_PRE_PDN);
2143 // self refresh requires time tCKESR to enter. For simplicity,
2144 // this is not considered.
2145 schedulePowerEvent(PWR_SREF, tick);
2146 // push Command to DRAMPower
2147 cmdList.push_back(Command(MemCommand::SREN, 0, tick));
2148 DPRINTF(DRAMPower, "%llu,SREN,0,%d\n", divCeil(tick,
2149 memory.tCK) - memory.timeStampOffset, rank);
2150 }
2151 // Ensure that we don't power-down and back up in same tick
2152 // Once we commit to PD entry, do it and wait for at least 1tCK
2153 // This could be replaced with tCKE if/when that is added to the model
2154 wakeUpAllowedAt = tick + memory.tCK;
2155
2156 // Transitioning to a low power state, set flag
2157 inLowPowerState = true;
2158 }
2159
2160 void
2161 DRAMCtrl::Rank::scheduleWakeUpEvent(Tick exit_delay)
2162 {
2163 Tick wake_up_tick = std::max(curTick(), wakeUpAllowedAt);
2164
2165 DPRINTF(DRAMState, "Scheduling wake-up for rank %d at tick %d\n",
2166 rank, wake_up_tick);
2167
2168 // if waking for refresh, hold previous state
2169 // else reset state back to IDLE
2170 if (refreshState == REF_PD_EXIT) {
2171 pwrStatePostRefresh = pwrState;
2172 } else {
2173 // don't automatically transition back to LP state after next REF
2174 pwrStatePostRefresh = PWR_IDLE;
2175 }
2176
2177 // schedule wake-up with event to ensure entry has completed before
2178 // we try to wake-up
2179 schedule(wakeUpEvent, wake_up_tick);
2180
2181 for (auto &b : banks) {
2182 // respect both causality and any existing bank
2183 // constraints, some banks could already have a
2184 // (auto) precharge scheduled
2185 b.wrAllowedAt = std::max(wake_up_tick + exit_delay, b.wrAllowedAt);
2186 b.rdAllowedAt = std::max(wake_up_tick + exit_delay, b.rdAllowedAt);
2187 b.preAllowedAt = std::max(wake_up_tick + exit_delay, b.preAllowedAt);
2188 b.actAllowedAt = std::max(wake_up_tick + exit_delay, b.actAllowedAt);
2189 }
2190 // Transitioning out of low power state, clear flag
2191 inLowPowerState = false;
2192
2193 // push to DRAMPower
2194 // use pwrStateTrans for cases where we have a power event scheduled
2195 // to enter low power that has not yet been processed
2196 if (pwrStateTrans == PWR_ACT_PDN) {
2197 cmdList.push_back(Command(MemCommand::PUP_ACT, 0, wake_up_tick));
2198 DPRINTF(DRAMPower, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick,
2199 memory.tCK) - memory.timeStampOffset, rank);
2200
2201 } else if (pwrStateTrans == PWR_PRE_PDN) {
2202 cmdList.push_back(Command(MemCommand::PUP_PRE, 0, wake_up_tick));
2203 DPRINTF(DRAMPower, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick,
2204 memory.tCK) - memory.timeStampOffset, rank);
2205 } else if (pwrStateTrans == PWR_SREF) {
2206 cmdList.push_back(Command(MemCommand::SREX, 0, wake_up_tick));
2207 DPRINTF(DRAMPower, "%llu,SREX,0,%d\n", divCeil(wake_up_tick,
2208 memory.tCK) - memory.timeStampOffset, rank);
2209 }
2210 }
2211
2212 void
2213 DRAMCtrl::Rank::processWakeUpEvent()
2214 {
2215 // Should be in a power-down or self-refresh state
2216 assert((pwrState == PWR_ACT_PDN) || (pwrState == PWR_PRE_PDN) ||
2217 (pwrState == PWR_SREF));
2218
2219 // Check current state to determine transition state
2220 if (pwrState == PWR_ACT_PDN) {
2221 // banks still open, transition to PWR_ACT
2222 schedulePowerEvent(PWR_ACT, curTick());
2223 } else {
2224 // transitioning from a precharge power-down or self-refresh state
2225 // banks are closed - transition to PWR_IDLE
2226 schedulePowerEvent(PWR_IDLE, curTick());
2227 }
2228 }
2229
2230 void
2231 DRAMCtrl::Rank::processPowerEvent()
2232 {
2233 assert(curTick() >= pwrStateTick);
2234 // remember where we were, and for how long
2235 Tick duration = curTick() - pwrStateTick;
2236 PowerState prev_state = pwrState;
2237
2238 // update the accounting
2239 pwrStateTime[prev_state] += duration;
2240
2241 // track to total idle time
2242 if ((prev_state == PWR_PRE_PDN) || (prev_state == PWR_ACT_PDN) ||
2243 (prev_state == PWR_SREF)) {
2244 totalIdleTime += duration;
2245 }
2246
2247 pwrState = pwrStateTrans;
2248 pwrStateTick = curTick();
2249
2250 // if rank was refreshing, make sure to start scheduling requests again
2251 if (prev_state == PWR_REF) {
2252 // bus IDLED prior to REF
2253 // counter should be one for refresh command only
2254 assert(outstandingEvents == 1);
2255 // REF complete, decrement count and go back to IDLE
2256 --outstandingEvents;
2257 refreshState = REF_IDLE;
2258
2259 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration);
2260 // if moving back to power-down after refresh
2261 if (pwrState != PWR_IDLE) {
2262 assert(pwrState == PWR_PRE_PDN);
2263 DPRINTF(DRAMState, "Switching to power down state after refreshing"
2264 " rank %d at %llu tick\n", rank, curTick());
2265 }
2266
2267 // completed refresh event, ensure next request is scheduled
2268 if (!memory.nextReqEvent.scheduled()) {
2269 DPRINTF(DRAM, "Scheduling next request after refreshing"
2270 " rank %d\n", rank);
2271 schedule(memory.nextReqEvent, curTick());
2272 }
2273 }
2274
2275 if ((pwrState == PWR_ACT) && (refreshState == REF_PD_EXIT)) {
2276 // have exited ACT PD
2277 assert(prev_state == PWR_ACT_PDN);
2278
2279 // go back to REF event and close banks
2280 refreshState = REF_PRE;
2281 schedule(refreshEvent, curTick());
2282 } else if (pwrState == PWR_IDLE) {
2283 DPRINTF(DRAMState, "All banks precharged\n");
2284 if (prev_state == PWR_SREF) {
2285 // set refresh state to REF_SREF_EXIT, ensuring inRefIdleState
2286 // continues to return false during tXS after SREF exit
2287 // Schedule a refresh which kicks things back into action
2288 // when it finishes
2289 refreshState = REF_SREF_EXIT;
2290 schedule(refreshEvent, curTick() + memory.tXS);
2291 } else {
2292 // if we have a pending refresh, and are now moving to
2293 // the idle state, directly transition to, or schedule refresh
2294 if ((refreshState == REF_PRE) || (refreshState == REF_PD_EXIT)) {
2295 // ensure refresh is restarted only after final PRE command.
2296 // do not restart refresh if controller is in an intermediate
2297 // state, after PRE_PDN exit, when banks are IDLE but an
2298 // ACT is scheduled.
2299 if (!activateEvent.scheduled()) {
2300 // there should be nothing waiting at this point
2301 assert(!powerEvent.scheduled());
2302 if (refreshState == REF_PD_EXIT) {
2303 // exiting PRE PD, will be in IDLE until tXP expires
2304 // and then should transition to PWR_REF state
2305 assert(prev_state == PWR_PRE_PDN);
2306 schedulePowerEvent(PWR_REF, curTick() + memory.tXP);
2307 } else if (refreshState == REF_PRE) {
2308 // can directly move to PWR_REF state and proceed below
2309 pwrState = PWR_REF;
2310 }
2311 } else {
2312 // must have PRE scheduled to transition back to IDLE
2313 // and re-kick off refresh
2314 assert(prechargeEvent.scheduled());
2315 }
2316 }
2317 }
2318 }
2319
2320 // transition to the refresh state and re-start refresh process
2321 // refresh state machine will schedule the next power state transition
2322 if (pwrState == PWR_REF) {
2323 // completed final PRE for refresh or exiting power-down
2324 assert(refreshState == REF_PRE || refreshState == REF_PD_EXIT);
2325
2326 // exited PRE PD for refresh, with no pending commands
2327 // bypass auto-refresh and go straight to SREF, where memory
2328 // will issue refresh immediately upon entry
2329 if (pwrStatePostRefresh == PWR_PRE_PDN && isQueueEmpty() &&
2330 (memory.drainState() != DrainState::Draining) &&
2331 (memory.drainState() != DrainState::Drained)) {
2332 DPRINTF(DRAMState, "Rank %d bypassing refresh and transitioning "
2333 "to self refresh at %11u tick\n", rank, curTick());
2334 powerDownSleep(PWR_SREF, curTick());
2335
2336 // Since refresh was bypassed, remove event by decrementing count
2337 assert(outstandingEvents == 1);
2338 --outstandingEvents;
2339
2340 // reset state back to IDLE temporarily until SREF is entered
2341 pwrState = PWR_IDLE;
2342
2343 // Not bypassing refresh for SREF entry
2344 } else {
2345 DPRINTF(DRAMState, "Refreshing\n");
2346
2347 // there should be nothing waiting at this point
2348 assert(!powerEvent.scheduled());
2349
2350 // kick the refresh event loop into action again, and that
2351 // in turn will schedule a transition to the idle power
2352 // state once the refresh is done
2353 schedule(refreshEvent, curTick());
2354
2355 // Banks transitioned to IDLE, start REF
2356 refreshState = REF_START;
2357 }
2358 }
2359
2360 }
2361
2362 void
2363 DRAMCtrl::Rank::updatePowerStats()
2364 {
2365 // All commands up to refresh have completed
2366 // flush cmdList to DRAMPower
2367 flushCmdList();
2368
2369 // Call the function that calculates window energy at intermediate update
2370 // events like at refresh, stats dump as well as at simulation exit.
2371 // Window starts at the last time the calcWindowEnergy function was called
2372 // and is upto current time.
2373 power.powerlib.calcWindowEnergy(divCeil(curTick(), memory.tCK) -
2374 memory.timeStampOffset);
2375
2376 // Get the energy from DRAMPower
2377 Data::MemoryPowerModel::Energy energy = power.powerlib.getEnergy();
2378
2379 // The energy components inside the power lib are calculated over
2380 // the window so accumulate into the corresponding gem5 stat
2381 actEnergy += energy.act_energy * memory.devicesPerRank;
2382 preEnergy += energy.pre_energy * memory.devicesPerRank;
2383 readEnergy += energy.read_energy * memory.devicesPerRank;
2384 writeEnergy += energy.write_energy * memory.devicesPerRank;
2385 refreshEnergy += energy.ref_energy * memory.devicesPerRank;
2386 actBackEnergy += energy.act_stdby_energy * memory.devicesPerRank;
2387 preBackEnergy += energy.pre_stdby_energy * memory.devicesPerRank;
2388 actPowerDownEnergy += energy.f_act_pd_energy * memory.devicesPerRank;
2389 prePowerDownEnergy += energy.f_pre_pd_energy * memory.devicesPerRank;
2390 selfRefreshEnergy += energy.sref_energy * memory.devicesPerRank;
2391
2392 // Accumulate window energy into the total energy.
2393 totalEnergy += energy.window_energy * memory.devicesPerRank;
2394 // Average power must not be accumulated but calculated over the time
2395 // since last stats reset. SimClock::Frequency is tick period not tick
2396 // frequency.
2397 // energy (pJ) 1e-9
2398 // power (mW) = ----------- * ----------
2399 // time (tick) tick_frequency
2400 averagePower = (totalEnergy.value() /
2401 (curTick() - memory.lastStatsResetTick)) *
2402 (SimClock::Frequency / 1000000000.0);
2403 }
2404
2405 void
2406 DRAMCtrl::Rank::computeStats()
2407 {
2408 DPRINTF(DRAM,"Computing stats due to a dump callback\n");
2409
2410 // Update the stats
2411 updatePowerStats();
2412
2413 // final update of power state times
2414 pwrStateTime[pwrState] += (curTick() - pwrStateTick);
2415 pwrStateTick = curTick();
2416
2417 }
2418
2419 void
2420 DRAMCtrl::Rank::resetStats() {
2421 // The only way to clear the counters in DRAMPower is to call
2422 // calcWindowEnergy function as that then calls clearCounters. The
2423 // clearCounters method itself is private.
2424 power.powerlib.calcWindowEnergy(divCeil(curTick(), memory.tCK) -
2425 memory.timeStampOffset);
2426
2427 }
2428
2429 void
2430 DRAMCtrl::Rank::regStats()
2431 {
2432 pwrStateTime
2433 .init(6)
2434 .name(name() + ".memoryStateTime")
2435 .desc("Time in different power states");
2436 pwrStateTime.subname(0, "IDLE");
2437 pwrStateTime.subname(1, "REF");
2438 pwrStateTime.subname(2, "SREF");
2439 pwrStateTime.subname(3, "PRE_PDN");
2440 pwrStateTime.subname(4, "ACT");
2441 pwrStateTime.subname(5, "ACT_PDN");
2442
2443 actEnergy
2444 .name(name() + ".actEnergy")
2445 .desc("Energy for activate commands per rank (pJ)");
2446
2447 preEnergy
2448 .name(name() + ".preEnergy")
2449 .desc("Energy for precharge commands per rank (pJ)");
2450
2451 readEnergy
2452 .name(name() + ".readEnergy")
2453 .desc("Energy for read commands per rank (pJ)");
2454
2455 writeEnergy
2456 .name(name() + ".writeEnergy")
2457 .desc("Energy for write commands per rank (pJ)");
2458
2459 refreshEnergy
2460 .name(name() + ".refreshEnergy")
2461 .desc("Energy for refresh commands per rank (pJ)");
2462
2463 actBackEnergy
2464 .name(name() + ".actBackEnergy")
2465 .desc("Energy for active background per rank (pJ)");
2466
2467 preBackEnergy
2468 .name(name() + ".preBackEnergy")
2469 .desc("Energy for precharge background per rank (pJ)");
2470
2471 actPowerDownEnergy
2472 .name(name() + ".actPowerDownEnergy")
2473 .desc("Energy for active power-down per rank (pJ)");
2474
2475 prePowerDownEnergy
2476 .name(name() + ".prePowerDownEnergy")
2477 .desc("Energy for precharge power-down per rank (pJ)");
2478
2479 selfRefreshEnergy
2480 .name(name() + ".selfRefreshEnergy")
2481 .desc("Energy for self refresh per rank (pJ)");
2482
2483 totalEnergy
2484 .name(name() + ".totalEnergy")
2485 .desc("Total energy per rank (pJ)");
2486
2487 averagePower
2488 .name(name() + ".averagePower")
2489 .desc("Core power per rank (mW)");
2490
2491 totalIdleTime
2492 .name(name() + ".totalIdleTime")
2493 .desc("Total Idle time Per DRAM Rank");
2494
2495 Stats::registerDumpCallback(new RankDumpCallback(this));
2496 Stats::registerResetCallback(new RankResetCallback(this));
2497 }
2498 void
2499 DRAMCtrl::regStats()
2500 {
2501 using namespace Stats;
2502
2503 MemCtrl::regStats();
2504
2505 for (auto r : ranks) {
2506 r->regStats();
2507 }
2508
2509 registerResetCallback(new MemResetCallback(this));
2510
2511 readReqs
2512 .name(name() + ".readReqs")
2513 .desc("Number of read requests accepted");
2514
2515 writeReqs
2516 .name(name() + ".writeReqs")
2517 .desc("Number of write requests accepted");
2518
2519 readBursts
2520 .name(name() + ".readBursts")
2521 .desc("Number of DRAM read bursts, "
2522 "including those serviced by the write queue");
2523
2524 writeBursts
2525 .name(name() + ".writeBursts")
2526 .desc("Number of DRAM write bursts, "
2527 "including those merged in the write queue");
2528
2529 servicedByWrQ
2530 .name(name() + ".servicedByWrQ")
2531 .desc("Number of DRAM read bursts serviced by the write queue");
2532
2533 mergedWrBursts
2534 .name(name() + ".mergedWrBursts")
2535 .desc("Number of DRAM write bursts merged with an existing one");
2536
2537 neitherReadNorWrite
2538 .name(name() + ".neitherReadNorWriteReqs")
2539 .desc("Number of requests that are neither read nor write");
2540
2541 perBankRdBursts
2542 .init(banksPerRank * ranksPerChannel)
2543 .name(name() + ".perBankRdBursts")
2544 .desc("Per bank write bursts");
2545
2546 perBankWrBursts
2547 .init(banksPerRank * ranksPerChannel)
2548 .name(name() + ".perBankWrBursts")
2549 .desc("Per bank write bursts");
2550
2551 avgRdQLen
2552 .name(name() + ".avgRdQLen")
2553 .desc("Average read queue length when enqueuing")
2554 .precision(2);
2555
2556 avgWrQLen
2557 .name(name() + ".avgWrQLen")
2558 .desc("Average write queue length when enqueuing")
2559 .precision(2);
2560
2561 totQLat
2562 .name(name() + ".totQLat")
2563 .desc("Total ticks spent queuing");
2564
2565 totBusLat
2566 .name(name() + ".totBusLat")
2567 .desc("Total ticks spent in databus transfers");
2568
2569 totMemAccLat
2570 .name(name() + ".totMemAccLat")
2571 .desc("Total ticks spent from burst creation until serviced "
2572 "by the DRAM");
2573
2574 avgQLat
2575 .name(name() + ".avgQLat")
2576 .desc("Average queueing delay per DRAM burst")
2577 .precision(2);
2578
2579 avgQLat = totQLat / (readBursts - servicedByWrQ);
2580
2581 avgBusLat
2582 .name(name() + ".avgBusLat")
2583 .desc("Average bus latency per DRAM burst")
2584 .precision(2);
2585
2586 avgBusLat = totBusLat / (readBursts - servicedByWrQ);
2587
2588 avgMemAccLat
2589 .name(name() + ".avgMemAccLat")
2590 .desc("Average memory access latency per DRAM burst")
2591 .precision(2);
2592
2593 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ);
2594
2595 numRdRetry
2596 .name(name() + ".numRdRetry")
2597 .desc("Number of times read queue was full causing retry");
2598
2599 numWrRetry
2600 .name(name() + ".numWrRetry")
2601 .desc("Number of times write queue was full causing retry");
2602
2603 readRowHits
2604 .name(name() + ".readRowHits")
2605 .desc("Number of row buffer hits during reads");
2606
2607 writeRowHits
2608 .name(name() + ".writeRowHits")
2609 .desc("Number of row buffer hits during writes");
2610
2611 readRowHitRate
2612 .name(name() + ".readRowHitRate")
2613 .desc("Row buffer hit rate for reads")
2614 .precision(2);
2615
2616 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100;
2617
2618 writeRowHitRate
2619 .name(name() + ".writeRowHitRate")
2620 .desc("Row buffer hit rate for writes")
2621 .precision(2);
2622
2623 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100;
2624
2625 readPktSize
2626 .init(ceilLog2(burstSize) + 1)
2627 .name(name() + ".readPktSize")
2628 .desc("Read request sizes (log2)");
2629
2630 writePktSize
2631 .init(ceilLog2(burstSize) + 1)
2632 .name(name() + ".writePktSize")
2633 .desc("Write request sizes (log2)");
2634
2635 rdQLenPdf
2636 .init(readBufferSize)
2637 .name(name() + ".rdQLenPdf")
2638 .desc("What read queue length does an incoming req see");
2639
2640 wrQLenPdf
2641 .init(writeBufferSize)
2642 .name(name() + ".wrQLenPdf")
2643 .desc("What write queue length does an incoming req see");
2644
2645 bytesPerActivate
2646 .init(maxAccessesPerRow ? maxAccessesPerRow : rowBufferSize)
2647 .name(name() + ".bytesPerActivate")
2648 .desc("Bytes accessed per row activation")
2649 .flags(nozero);
2650
2651 rdPerTurnAround
2652 .init(readBufferSize)
2653 .name(name() + ".rdPerTurnAround")
2654 .desc("Reads before turning the bus around for writes")
2655 .flags(nozero);
2656
2657 wrPerTurnAround
2658 .init(writeBufferSize)
2659 .name(name() + ".wrPerTurnAround")
2660 .desc("Writes before turning the bus around for reads")
2661 .flags(nozero);
2662
2663 bytesReadDRAM
2664 .name(name() + ".bytesReadDRAM")
2665 .desc("Total number of bytes read from DRAM");
2666
2667 bytesReadWrQ
2668 .name(name() + ".bytesReadWrQ")
2669 .desc("Total number of bytes read from write queue");
2670
2671 bytesWritten
2672 .name(name() + ".bytesWritten")
2673 .desc("Total number of bytes written to DRAM");
2674
2675 bytesReadSys
2676 .name(name() + ".bytesReadSys")
2677 .desc("Total read bytes from the system interface side");
2678
2679 bytesWrittenSys
2680 .name(name() + ".bytesWrittenSys")
2681 .desc("Total written bytes from the system interface side");
2682
2683 avgRdBW
2684 .name(name() + ".avgRdBW")
2685 .desc("Average DRAM read bandwidth in MiByte/s")
2686 .precision(2);
2687
2688 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds;
2689
2690 avgWrBW
2691 .name(name() + ".avgWrBW")
2692 .desc("Average achieved write bandwidth in MiByte/s")
2693 .precision(2);
2694
2695 avgWrBW = (bytesWritten / 1000000) / simSeconds;
2696
2697 avgRdBWSys
2698 .name(name() + ".avgRdBWSys")
2699 .desc("Average system read bandwidth in MiByte/s")
2700 .precision(2);
2701
2702 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
2703
2704 avgWrBWSys
2705 .name(name() + ".avgWrBWSys")
2706 .desc("Average system write bandwidth in MiByte/s")
2707 .precision(2);
2708
2709 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
2710
2711 peakBW
2712 .name(name() + ".peakBW")
2713 .desc("Theoretical peak bandwidth in MiByte/s")
2714 .precision(2);
2715
2716 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000;
2717
2718 busUtil
2719 .name(name() + ".busUtil")
2720 .desc("Data bus utilization in percentage")
2721 .precision(2);
2722 busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
2723
2724 totGap
2725 .name(name() + ".totGap")
2726 .desc("Total gap between requests");
2727
2728 avgGap
2729 .name(name() + ".avgGap")
2730 .desc("Average gap between requests")
2731 .precision(2);
2732
2733 avgGap = totGap / (readReqs + writeReqs);
2734
2735 // Stats for DRAM Power calculation based on Micron datasheet
2736 busUtilRead
2737 .name(name() + ".busUtilRead")
2738 .desc("Data bus utilization in percentage for reads")
2739 .precision(2);
2740
2741 busUtilRead = avgRdBW / peakBW * 100;
2742
2743 busUtilWrite
2744 .name(name() + ".busUtilWrite")
2745 .desc("Data bus utilization in percentage for writes")
2746 .precision(2);
2747
2748 busUtilWrite = avgWrBW / peakBW * 100;
2749
2750 pageHitRate
2751 .name(name() + ".pageHitRate")
2752 .desc("Row buffer hit rate, read and write combined")
2753 .precision(2);
2754
2755 pageHitRate = (writeRowHits + readRowHits) /
2756 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
2757
2758 // per-master bytes read and written to memory
2759 masterReadBytes
2760 .init(_system->maxMasters())
2761 .name(name() + ".masterReadBytes")
2762 .desc("Per-master bytes read from memory")
2763 .flags(nozero | nonan);
2764
2765 masterWriteBytes
2766 .init(_system->maxMasters())
2767 .name(name() + ".masterWriteBytes")
2768 .desc("Per-master bytes write to memory")
2769 .flags(nozero | nonan);
2770
2771 // per-master bytes read and written to memory rate
2772 masterReadRate.name(name() + ".masterReadRate")
2773 .desc("Per-master bytes read from memory rate (Bytes/sec)")
2774 .flags(nozero | nonan)
2775 .precision(12);
2776
2777 masterReadRate = masterReadBytes/simSeconds;
2778
2779 masterWriteRate
2780 .name(name() + ".masterWriteRate")
2781 .desc("Per-master bytes write to memory rate (Bytes/sec)")
2782 .flags(nozero | nonan)
2783 .precision(12);
2784
2785 masterWriteRate = masterWriteBytes/simSeconds;
2786
2787 masterReadAccesses
2788 .init(_system->maxMasters())
2789 .name(name() + ".masterReadAccesses")
2790 .desc("Per-master read serviced memory accesses")
2791 .flags(nozero);
2792
2793 masterWriteAccesses
2794 .init(_system->maxMasters())
2795 .name(name() + ".masterWriteAccesses")
2796 .desc("Per-master write serviced memory accesses")
2797 .flags(nozero);
2798
2799
2800 masterReadTotalLat
2801 .init(_system->maxMasters())
2802 .name(name() + ".masterReadTotalLat")
2803 .desc("Per-master read total memory access latency")
2804 .flags(nozero | nonan);
2805
2806 masterReadAvgLat.name(name() + ".masterReadAvgLat")
2807 .desc("Per-master read average memory access latency")
2808 .flags(nonan)
2809 .precision(2);
2810
2811 masterReadAvgLat = masterReadTotalLat/masterReadAccesses;
2812
2813 masterWriteTotalLat
2814 .init(_system->maxMasters())
2815 .name(name() + ".masterWriteTotalLat")
2816 .desc("Per-master write total memory access latency")
2817 .flags(nozero | nonan);
2818
2819 masterWriteAvgLat.name(name() + ".masterWriteAvgLat")
2820 .desc("Per-master write average memory access latency")
2821 .flags(nonan)
2822 .precision(2);
2823
2824 masterWriteAvgLat = masterWriteTotalLat/masterWriteAccesses;
2825
2826 for (int i = 0; i < _system->maxMasters(); i++) {
2827 const std::string master = _system->getMasterName(i);
2828 masterReadBytes.subname(i, master);
2829 masterReadRate.subname(i, master);
2830 masterWriteBytes.subname(i, master);
2831 masterWriteRate.subname(i, master);
2832 masterReadAccesses.subname(i, master);
2833 masterWriteAccesses.subname(i, master);
2834 masterReadTotalLat.subname(i, master);
2835 masterReadAvgLat.subname(i, master);
2836 masterWriteTotalLat.subname(i, master);
2837 masterWriteAvgLat.subname(i, master);
2838 }
2839 }
2840
2841 void
2842 DRAMCtrl::recvFunctional(PacketPtr pkt)
2843 {
2844 // rely on the abstract memory
2845 functionalAccess(pkt);
2846 }
2847
2848 BaseSlavePort&
2849 DRAMCtrl::getSlavePort(const string &if_name, PortID idx)
2850 {
2851 if (if_name != "port") {
2852 return MemObject::getSlavePort(if_name, idx);
2853 } else {
2854 return port;
2855 }
2856 }
2857
2858 DrainState
2859 DRAMCtrl::drain()
2860 {
2861 // if there is anything in any of our internal queues, keep track
2862 // of that as well
2863 if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
2864 allRanksDrained())) {
2865
2866 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
2867 " resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
2868 respQueue.size());
2869
2870 // the only queue that is not drained automatically over time
2871 // is the write queue, thus kick things into action if needed
2872 if (!totalWriteQueueSize && !nextReqEvent.scheduled()) {
2873 schedule(nextReqEvent, curTick());
2874 }
2875
2876 // also need to kick off events to exit self-refresh
2877 for (auto r : ranks) {
2878 // force self-refresh exit, which in turn will issue auto-refresh
2879 if (r->pwrState == PWR_SREF) {
2880 DPRINTF(DRAM,"Rank%d: Forcing self-refresh wakeup in drain\n",
2881 r->rank);
2882 r->scheduleWakeUpEvent(tXS);
2883 }
2884 }
2885
2886 return DrainState::Draining;
2887 } else {
2888 return DrainState::Drained;
2889 }
2890 }
2891
2892 bool
2893 DRAMCtrl::allRanksDrained() const
2894 {
2895 // true until proven false
2896 bool all_ranks_drained = true;
2897 for (auto r : ranks) {
2898 // then verify that the power state is IDLE ensuring all banks are
2899 // closed and rank is not in a low power state. Also verify that rank
2900 // is idle from a refresh point of view.
2901 all_ranks_drained = r->inPwrIdleState() && r->inRefIdleState() &&
2902 all_ranks_drained;
2903 }
2904 return all_ranks_drained;
2905 }
2906
2907 void
2908 DRAMCtrl::drainResume()
2909 {
2910 if (!isTimingMode && system()->isTimingMode()) {
2911 // if we switched to timing mode, kick things into action,
2912 // and behave as if we restored from a checkpoint
2913 startup();
2914 } else if (isTimingMode && !system()->isTimingMode()) {
2915 // if we switch from timing mode, stop the refresh events to
2916 // not cause issues with KVM
2917 for (auto r : ranks) {
2918 r->suspend();
2919 }
2920 }
2921
2922 // update the mode
2923 isTimingMode = system()->isTimingMode();
2924 }
2925
2926 DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory)
2927 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this, true),
2928 memory(_memory)
2929 { }
2930
2931 AddrRangeList
2932 DRAMCtrl::MemoryPort::getAddrRanges() const
2933 {
2934 AddrRangeList ranges;
2935 ranges.push_back(memory.getAddrRange());
2936 return ranges;
2937 }
2938
2939 void
2940 DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
2941 {
2942 pkt->pushLabel(memory.name());
2943
2944 if (!queue.trySatisfyFunctional(pkt)) {
2945 // Default implementation of SimpleTimingPort::recvFunctional()
2946 // calls recvAtomic() and throws away the latency; we can save a
2947 // little here by just not calculating the latency.
2948 memory.recvFunctional(pkt);
2949 }
2950
2951 pkt->popLabel();
2952 }
2953
2954 Tick
2955 DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
2956 {
2957 return memory.recvAtomic(pkt);
2958 }
2959
2960 bool
2961 DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
2962 {
2963 // pass it to the memory controller
2964 return memory.recvTimingReq(pkt);
2965 }
2966
2967 DRAMCtrl*
2968 DRAMCtrlParams::create()
2969 {
2970 return new DRAMCtrl(this);
2971 }