mem: Avoid DRAM write queue iteration for merging and read lookup
[gem5.git] / src / mem / dram_ctrl.cc
1 /*
2 * Copyright (c) 2010-2015 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Andreas Hansson
41 * Ani Udipi
42 * Neha Agarwal
43 * Omar Naji
44 */
45
46 #include "base/bitfield.hh"
47 #include "base/trace.hh"
48 #include "debug/DRAM.hh"
49 #include "debug/DRAMPower.hh"
50 #include "debug/DRAMState.hh"
51 #include "debug/Drain.hh"
52 #include "mem/dram_ctrl.hh"
53 #include "sim/system.hh"
54
55 using namespace std;
56 using namespace Data;
57
58 DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
59 AbstractMemory(p),
60 port(name() + ".port", *this), isTimingMode(false),
61 retryRdReq(false), retryWrReq(false),
62 busState(READ),
63 nextReqEvent(this), respondEvent(this),
64 drainManager(NULL),
65 deviceSize(p->device_size),
66 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
67 deviceRowBufferSize(p->device_rowbuffer_size),
68 devicesPerRank(p->devices_per_rank),
69 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
70 rowBufferSize(devicesPerRank * deviceRowBufferSize),
71 columnsPerRowBuffer(rowBufferSize / burstSize),
72 columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1),
73 ranksPerChannel(p->ranks_per_channel),
74 bankGroupsPerRank(p->bank_groups_per_rank),
75 bankGroupArch(p->bank_groups_per_rank > 0),
76 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
77 readBufferSize(p->read_buffer_size),
78 writeBufferSize(p->write_buffer_size),
79 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
80 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
81 minWritesPerSwitch(p->min_writes_per_switch),
82 writesThisTime(0), readsThisTime(0),
83 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST),
84 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
85 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
86 tRRD_L(p->tRRD_L), tXAW(p->tXAW), activationLimit(p->activation_limit),
87 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
88 pageMgmt(p->page_policy),
89 maxAccessesPerRow(p->max_accesses_per_row),
90 frontendLatency(p->static_frontend_latency),
91 backendLatency(p->static_backend_latency),
92 busBusyUntil(0), prevArrival(0),
93 nextReqTime(0), activeRank(0), timeStampOffset(0)
94 {
95 // sanity check the ranks since we rely on bit slicing for the
96 // address decoding
97 fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not "
98 "allowed, must be a power of two\n", ranksPerChannel);
99
100 fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, "
101 "must be a power of two\n", burstSize);
102
103 for (int i = 0; i < ranksPerChannel; i++) {
104 Rank* rank = new Rank(*this, p);
105 ranks.push_back(rank);
106
107 rank->actTicks.resize(activationLimit, 0);
108 rank->banks.resize(banksPerRank);
109 rank->rank = i;
110
111 for (int b = 0; b < banksPerRank; b++) {
112 rank->banks[b].bank = b;
113 // GDDR addressing of banks to BG is linear.
114 // Here we assume that all DRAM generations address bank groups as
115 // follows:
116 if (bankGroupArch) {
117 // Simply assign lower bits to bank group in order to
118 // rotate across bank groups as banks are incremented
119 // e.g. with 4 banks per bank group and 16 banks total:
120 // banks 0,4,8,12 are in bank group 0
121 // banks 1,5,9,13 are in bank group 1
122 // banks 2,6,10,14 are in bank group 2
123 // banks 3,7,11,15 are in bank group 3
124 rank->banks[b].bankgr = b % bankGroupsPerRank;
125 } else {
126 // No bank groups; simply assign to bank number
127 rank->banks[b].bankgr = b;
128 }
129 }
130 }
131
132 // perform a basic check of the write thresholds
133 if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
134 fatal("Write buffer low threshold %d must be smaller than the "
135 "high threshold %d\n", p->write_low_thresh_perc,
136 p->write_high_thresh_perc);
137
138 // determine the rows per bank by looking at the total capacity
139 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
140
141 // determine the dram actual capacity from the DRAM config in Mbytes
142 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank *
143 ranksPerChannel;
144
145 // if actual DRAM size does not match memory capacity in system warn!
146 if (deviceCapacity != capacity / (1024 * 1024))
147 warn("DRAM device capacity (%d Mbytes) does not match the "
148 "address range assigned (%d Mbytes)\n", deviceCapacity,
149 capacity / (1024 * 1024));
150
151 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
152 AbstractMemory::size());
153
154 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n",
155 rowBufferSize, columnsPerRowBuffer);
156
157 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel);
158
159 // some basic sanity checks
160 if (tREFI <= tRP || tREFI <= tRFC) {
161 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
162 tREFI, tRP, tRFC);
163 }
164
165 // basic bank group architecture checks ->
166 if (bankGroupArch) {
167 // must have at least one bank per bank group
168 if (bankGroupsPerRank > banksPerRank) {
169 fatal("banks per rank (%d) must be equal to or larger than "
170 "banks groups per rank (%d)\n",
171 banksPerRank, bankGroupsPerRank);
172 }
173 // must have same number of banks in each bank group
174 if ((banksPerRank % bankGroupsPerRank) != 0) {
175 fatal("Banks per rank (%d) must be evenly divisible by bank groups "
176 "per rank (%d) for equal banks per bank group\n",
177 banksPerRank, bankGroupsPerRank);
178 }
179 // tCCD_L should be greater than minimal, back-to-back burst delay
180 if (tCCD_L <= tBURST) {
181 fatal("tCCD_L (%d) should be larger than tBURST (%d) when "
182 "bank groups per rank (%d) is greater than 1\n",
183 tCCD_L, tBURST, bankGroupsPerRank);
184 }
185 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay
186 // some datasheets might specify it equal to tRRD
187 if (tRRD_L < tRRD) {
188 fatal("tRRD_L (%d) should be larger than tRRD (%d) when "
189 "bank groups per rank (%d) is greater than 1\n",
190 tRRD_L, tRRD, bankGroupsPerRank);
191 }
192 }
193
194 }
195
196 void
197 DRAMCtrl::init()
198 {
199 AbstractMemory::init();
200
201 if (!port.isConnected()) {
202 fatal("DRAMCtrl %s is unconnected!\n", name());
203 } else {
204 port.sendRangeChange();
205 }
206
207 // a bit of sanity checks on the interleaving, save it for here to
208 // ensure that the system pointer is initialised
209 if (range.interleaved()) {
210 if (channels != range.stripes())
211 fatal("%s has %d interleaved address stripes but %d channel(s)\n",
212 name(), range.stripes(), channels);
213
214 if (addrMapping == Enums::RoRaBaChCo) {
215 if (rowBufferSize != range.granularity()) {
216 fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
217 "address map\n", name());
218 }
219 } else if (addrMapping == Enums::RoRaBaCoCh ||
220 addrMapping == Enums::RoCoRaBaCh) {
221 // for the interleavings with channel bits in the bottom,
222 // if the system uses a channel striping granularity that
223 // is larger than the DRAM burst size, then map the
224 // sequential accesses within a stripe to a number of
225 // columns in the DRAM, effectively placing some of the
226 // lower-order column bits as the least-significant bits
227 // of the address (above the ones denoting the burst size)
228 assert(columnsPerStripe >= 1);
229
230 // channel striping has to be done at a granularity that
231 // is equal or larger to a cache line
232 if (system()->cacheLineSize() > range.granularity()) {
233 fatal("Channel interleaving of %s must be at least as large "
234 "as the cache line size\n", name());
235 }
236
237 // ...and equal or smaller than the row-buffer size
238 if (rowBufferSize < range.granularity()) {
239 fatal("Channel interleaving of %s must be at most as large "
240 "as the row-buffer size\n", name());
241 }
242 // this is essentially the check above, so just to be sure
243 assert(columnsPerStripe <= columnsPerRowBuffer);
244 }
245 }
246 }
247
248 void
249 DRAMCtrl::startup()
250 {
251 // remember the memory system mode of operation
252 isTimingMode = system()->isTimingMode();
253
254 if (isTimingMode) {
255 // timestamp offset should be in clock cycles for DRAMPower
256 timeStampOffset = divCeil(curTick(), tCK);
257
258 // update the start tick for the precharge accounting to the
259 // current tick
260 for (auto r : ranks) {
261 r->startup(curTick() + tREFI - tRP);
262 }
263
264 // shift the bus busy time sufficiently far ahead that we never
265 // have to worry about negative values when computing the time for
266 // the next request, this will add an insignificant bubble at the
267 // start of simulation
268 busBusyUntil = curTick() + tRP + tRCD + tCL;
269 }
270 }
271
272 Tick
273 DRAMCtrl::recvAtomic(PacketPtr pkt)
274 {
275 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
276
277 // do the actual memory access and turn the packet into a response
278 access(pkt);
279
280 Tick latency = 0;
281 if (!pkt->memInhibitAsserted() && pkt->hasData()) {
282 // this value is not supposed to be accurate, just enough to
283 // keep things going, mimic a closed page
284 latency = tRP + tRCD + tCL;
285 }
286 return latency;
287 }
288
289 bool
290 DRAMCtrl::readQueueFull(unsigned int neededEntries) const
291 {
292 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
293 readBufferSize, readQueue.size() + respQueue.size(),
294 neededEntries);
295
296 return
297 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize;
298 }
299
300 bool
301 DRAMCtrl::writeQueueFull(unsigned int neededEntries) const
302 {
303 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
304 writeBufferSize, writeQueue.size(), neededEntries);
305 return (writeQueue.size() + neededEntries) > writeBufferSize;
306 }
307
308 DRAMCtrl::DRAMPacket*
309 DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size,
310 bool isRead)
311 {
312 // decode the address based on the address mapping scheme, with
313 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
314 // channel, respectively
315 uint8_t rank;
316 uint8_t bank;
317 // use a 64-bit unsigned during the computations as the row is
318 // always the top bits, and check before creating the DRAMPacket
319 uint64_t row;
320
321 // truncate the address to a DRAM burst, which makes it unique to
322 // a specific column, row, bank, rank and channel
323 Addr addr = dramPktAddr / burstSize;
324
325 // we have removed the lowest order address bits that denote the
326 // position within the column
327 if (addrMapping == Enums::RoRaBaChCo) {
328 // the lowest order bits denote the column to ensure that
329 // sequential cache lines occupy the same row
330 addr = addr / columnsPerRowBuffer;
331
332 // take out the channel part of the address
333 addr = addr / channels;
334
335 // after the channel bits, get the bank bits to interleave
336 // over the banks
337 bank = addr % banksPerRank;
338 addr = addr / banksPerRank;
339
340 // after the bank, we get the rank bits which thus interleaves
341 // over the ranks
342 rank = addr % ranksPerChannel;
343 addr = addr / ranksPerChannel;
344
345 // lastly, get the row bits
346 row = addr % rowsPerBank;
347 addr = addr / rowsPerBank;
348 } else if (addrMapping == Enums::RoRaBaCoCh) {
349 // take out the lower-order column bits
350 addr = addr / columnsPerStripe;
351
352 // take out the channel part of the address
353 addr = addr / channels;
354
355 // next, the higher-order column bites
356 addr = addr / (columnsPerRowBuffer / columnsPerStripe);
357
358 // after the column bits, we get the bank bits to interleave
359 // over the banks
360 bank = addr % banksPerRank;
361 addr = addr / banksPerRank;
362
363 // after the bank, we get the rank bits which thus interleaves
364 // over the ranks
365 rank = addr % ranksPerChannel;
366 addr = addr / ranksPerChannel;
367
368 // lastly, get the row bits
369 row = addr % rowsPerBank;
370 addr = addr / rowsPerBank;
371 } else if (addrMapping == Enums::RoCoRaBaCh) {
372 // optimise for closed page mode and utilise maximum
373 // parallelism of the DRAM (at the cost of power)
374
375 // take out the lower-order column bits
376 addr = addr / columnsPerStripe;
377
378 // take out the channel part of the address, not that this has
379 // to match with how accesses are interleaved between the
380 // controllers in the address mapping
381 addr = addr / channels;
382
383 // start with the bank bits, as this provides the maximum
384 // opportunity for parallelism between requests
385 bank = addr % banksPerRank;
386 addr = addr / banksPerRank;
387
388 // next get the rank bits
389 rank = addr % ranksPerChannel;
390 addr = addr / ranksPerChannel;
391
392 // next, the higher-order column bites
393 addr = addr / (columnsPerRowBuffer / columnsPerStripe);
394
395 // lastly, get the row bits
396 row = addr % rowsPerBank;
397 addr = addr / rowsPerBank;
398 } else
399 panic("Unknown address mapping policy chosen!");
400
401 assert(rank < ranksPerChannel);
402 assert(bank < banksPerRank);
403 assert(row < rowsPerBank);
404 assert(row < Bank::NO_ROW);
405
406 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
407 dramPktAddr, rank, bank, row);
408
409 // create the corresponding DRAM packet with the entry time and
410 // ready time set to the current tick, the latter will be updated
411 // later
412 uint16_t bank_id = banksPerRank * rank + bank;
413 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
414 size, ranks[rank]->banks[bank], *ranks[rank]);
415 }
416
417 void
418 DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
419 {
420 // only add to the read queue here. whenever the request is
421 // eventually done, set the readyTime, and call schedule()
422 assert(!pkt->isWrite());
423
424 assert(pktCount != 0);
425
426 // if the request size is larger than burst size, the pkt is split into
427 // multiple DRAM packets
428 // Note if the pkt starting address is not aligened to burst size, the
429 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
430 // are aligned to burst size boundaries. This is to ensure we accurately
431 // check read packets against packets in write queue.
432 Addr addr = pkt->getAddr();
433 unsigned pktsServicedByWrQ = 0;
434 BurstHelper* burst_helper = NULL;
435 for (int cnt = 0; cnt < pktCount; ++cnt) {
436 unsigned size = std::min((addr | (burstSize - 1)) + 1,
437 pkt->getAddr() + pkt->getSize()) - addr;
438 readPktSize[ceilLog2(size)]++;
439 readBursts++;
440
441 // First check write buffer to see if the data is already at
442 // the controller
443 bool foundInWrQ = false;
444 Addr burst_addr = burstAlign(addr);
445 // if the burst address is not present then there is no need
446 // looking any further
447 if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
448 for (const auto& p : writeQueue) {
449 // check if the read is subsumed in the write queue
450 // packet we are looking at
451 if (p->addr <= addr && (addr + size) <= (p->addr + p->size)) {
452 foundInWrQ = true;
453 servicedByWrQ++;
454 pktsServicedByWrQ++;
455 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by "
456 "write queue\n", addr, size);
457 bytesReadWrQ += burstSize;
458 break;
459 }
460 }
461 }
462
463 // If not found in the write q, make a DRAM packet and
464 // push it onto the read queue
465 if (!foundInWrQ) {
466
467 // Make the burst helper for split packets
468 if (pktCount > 1 && burst_helper == NULL) {
469 DPRINTF(DRAM, "Read to addr %lld translates to %d "
470 "dram requests\n", pkt->getAddr(), pktCount);
471 burst_helper = new BurstHelper(pktCount);
472 }
473
474 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true);
475 dram_pkt->burstHelper = burst_helper;
476
477 assert(!readQueueFull(1));
478 rdQLenPdf[readQueue.size() + respQueue.size()]++;
479
480 DPRINTF(DRAM, "Adding to read queue\n");
481
482 readQueue.push_back(dram_pkt);
483
484 // Update stats
485 avgRdQLen = readQueue.size() + respQueue.size();
486 }
487
488 // Starting address of next dram pkt (aligend to burstSize boundary)
489 addr = (addr | (burstSize - 1)) + 1;
490 }
491
492 // If all packets are serviced by write queue, we send the repsonse back
493 if (pktsServicedByWrQ == pktCount) {
494 accessAndRespond(pkt, frontendLatency);
495 return;
496 }
497
498 // Update how many split packets are serviced by write queue
499 if (burst_helper != NULL)
500 burst_helper->burstsServiced = pktsServicedByWrQ;
501
502 // If we are not already scheduled to get a request out of the
503 // queue, do so now
504 if (!nextReqEvent.scheduled()) {
505 DPRINTF(DRAM, "Request scheduled immediately\n");
506 schedule(nextReqEvent, curTick());
507 }
508 }
509
510 void
511 DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
512 {
513 // only add to the write queue here. whenever the request is
514 // eventually done, set the readyTime, and call schedule()
515 assert(pkt->isWrite());
516
517 // if the request size is larger than burst size, the pkt is split into
518 // multiple DRAM packets
519 Addr addr = pkt->getAddr();
520 for (int cnt = 0; cnt < pktCount; ++cnt) {
521 unsigned size = std::min((addr | (burstSize - 1)) + 1,
522 pkt->getAddr() + pkt->getSize()) - addr;
523 writePktSize[ceilLog2(size)]++;
524 writeBursts++;
525
526 // see if we can merge with an existing item in the write
527 // queue and keep track of whether we have merged or not
528 bool merged = isInWriteQueue.find(burstAlign(addr)) !=
529 isInWriteQueue.end();
530
531 // if the item was not merged we need to create a new write
532 // and enqueue it
533 if (!merged) {
534 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false);
535
536 assert(writeQueue.size() < writeBufferSize);
537 wrQLenPdf[writeQueue.size()]++;
538
539 DPRINTF(DRAM, "Adding to write queue\n");
540
541 writeQueue.push_back(dram_pkt);
542 isInWriteQueue.insert(burstAlign(addr));
543 assert(writeQueue.size() == isInWriteQueue.size());
544
545 // Update stats
546 avgWrQLen = writeQueue.size();
547 } else {
548 DPRINTF(DRAM, "Merging write burst with existing queue entry\n");
549
550 // keep track of the fact that this burst effectively
551 // disappeared as it was merged with an existing one
552 mergedWrBursts++;
553 }
554
555 // Starting address of next dram pkt (aligend to burstSize boundary)
556 addr = (addr | (burstSize - 1)) + 1;
557 }
558
559 // we do not wait for the writes to be send to the actual memory,
560 // but instead take responsibility for the consistency here and
561 // snoop the write queue for any upcoming reads
562 // @todo, if a pkt size is larger than burst size, we might need a
563 // different front end latency
564 accessAndRespond(pkt, frontendLatency);
565
566 // If we are not already scheduled to get a request out of the
567 // queue, do so now
568 if (!nextReqEvent.scheduled()) {
569 DPRINTF(DRAM, "Request scheduled immediately\n");
570 schedule(nextReqEvent, curTick());
571 }
572 }
573
574 void
575 DRAMCtrl::printQs() const {
576 DPRINTF(DRAM, "===READ QUEUE===\n\n");
577 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) {
578 DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
579 }
580 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
581 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) {
582 DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
583 }
584 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
585 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) {
586 DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
587 }
588 }
589
590 bool
591 DRAMCtrl::recvTimingReq(PacketPtr pkt)
592 {
593 /// @todo temporary hack to deal with memory corruption issues until
594 /// 4-phase transactions are complete
595 for (int x = 0; x < pendingDelete.size(); x++)
596 delete pendingDelete[x];
597 pendingDelete.clear();
598
599 // This is where we enter from the outside world
600 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
601 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
602
603 // simply drop inhibited packets and clean evictions
604 if (pkt->memInhibitAsserted() ||
605 pkt->cmd == MemCmd::CleanEvict) {
606 DPRINTF(DRAM, "Inhibited packet or clean evict -- Dropping it now\n");
607 pendingDelete.push_back(pkt);
608 return true;
609 }
610
611 // Calc avg gap between requests
612 if (prevArrival != 0) {
613 totGap += curTick() - prevArrival;
614 }
615 prevArrival = curTick();
616
617
618 // Find out how many dram packets a pkt translates to
619 // If the burst size is equal or larger than the pkt size, then a pkt
620 // translates to only one dram packet. Otherwise, a pkt translates to
621 // multiple dram packets
622 unsigned size = pkt->getSize();
623 unsigned offset = pkt->getAddr() & (burstSize - 1);
624 unsigned int dram_pkt_count = divCeil(offset + size, burstSize);
625
626 // check local buffers and do not accept if full
627 if (pkt->isRead()) {
628 assert(size != 0);
629 if (readQueueFull(dram_pkt_count)) {
630 DPRINTF(DRAM, "Read queue full, not accepting\n");
631 // remember that we have to retry this port
632 retryRdReq = true;
633 numRdRetry++;
634 return false;
635 } else {
636 addToReadQueue(pkt, dram_pkt_count);
637 readReqs++;
638 bytesReadSys += size;
639 }
640 } else if (pkt->isWrite()) {
641 assert(size != 0);
642 if (writeQueueFull(dram_pkt_count)) {
643 DPRINTF(DRAM, "Write queue full, not accepting\n");
644 // remember that we have to retry this port
645 retryWrReq = true;
646 numWrRetry++;
647 return false;
648 } else {
649 addToWriteQueue(pkt, dram_pkt_count);
650 writeReqs++;
651 bytesWrittenSys += size;
652 }
653 } else {
654 DPRINTF(DRAM,"Neither read nor write, ignore timing\n");
655 neitherReadNorWrite++;
656 accessAndRespond(pkt, 1);
657 }
658
659 return true;
660 }
661
662 void
663 DRAMCtrl::processRespondEvent()
664 {
665 DPRINTF(DRAM,
666 "processRespondEvent(): Some req has reached its readyTime\n");
667
668 DRAMPacket* dram_pkt = respQueue.front();
669
670 if (dram_pkt->burstHelper) {
671 // it is a split packet
672 dram_pkt->burstHelper->burstsServiced++;
673 if (dram_pkt->burstHelper->burstsServiced ==
674 dram_pkt->burstHelper->burstCount) {
675 // we have now serviced all children packets of a system packet
676 // so we can now respond to the requester
677 // @todo we probably want to have a different front end and back
678 // end latency for split packets
679 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
680 delete dram_pkt->burstHelper;
681 dram_pkt->burstHelper = NULL;
682 }
683 } else {
684 // it is not a split packet
685 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
686 }
687
688 delete respQueue.front();
689 respQueue.pop_front();
690
691 if (!respQueue.empty()) {
692 assert(respQueue.front()->readyTime >= curTick());
693 assert(!respondEvent.scheduled());
694 schedule(respondEvent, respQueue.front()->readyTime);
695 } else {
696 // if there is nothing left in any queue, signal a drain
697 if (writeQueue.empty() && readQueue.empty() &&
698 drainManager) {
699 DPRINTF(Drain, "DRAM controller done draining\n");
700 drainManager->signalDrainDone();
701 drainManager = NULL;
702 }
703 }
704
705 // We have made a location in the queue available at this point,
706 // so if there is a read that was forced to wait, retry now
707 if (retryRdReq) {
708 retryRdReq = false;
709 port.sendRetryReq();
710 }
711 }
712
713 bool
714 DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
715 {
716 // This method does the arbitration between requests. The chosen
717 // packet is simply moved to the head of the queue. The other
718 // methods know that this is the place to look. For example, with
719 // FCFS, this method does nothing
720 assert(!queue.empty());
721
722 // bool to indicate if a packet to an available rank is found
723 bool found_packet = false;
724 if (queue.size() == 1) {
725 DRAMPacket* dram_pkt = queue.front();
726 // available rank corresponds to state refresh idle
727 if (ranks[dram_pkt->rank]->isAvailable()) {
728 found_packet = true;
729 DPRINTF(DRAM, "Single request, going to a free rank\n");
730 } else {
731 DPRINTF(DRAM, "Single request, going to a busy rank\n");
732 }
733 return found_packet;
734 }
735
736 if (memSchedPolicy == Enums::fcfs) {
737 // check if there is a packet going to a free rank
738 for(auto i = queue.begin(); i != queue.end() ; ++i) {
739 DRAMPacket* dram_pkt = *i;
740 if (ranks[dram_pkt->rank]->isAvailable()) {
741 queue.erase(i);
742 queue.push_front(dram_pkt);
743 found_packet = true;
744 break;
745 }
746 }
747 } else if (memSchedPolicy == Enums::frfcfs) {
748 found_packet = reorderQueue(queue, switched_cmd_type);
749 } else
750 panic("No scheduling policy chosen\n");
751 return found_packet;
752 }
753
754 bool
755 DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
756 {
757 // Only determine this when needed
758 uint64_t earliest_banks = 0;
759
760 // Search for row hits first, if no row hit is found then schedule the
761 // packet to one of the earliest banks available
762 bool found_packet = false;
763 bool found_earliest_pkt = false;
764 bool found_prepped_diff_rank_pkt = false;
765 auto selected_pkt_it = queue.end();
766
767 for (auto i = queue.begin(); i != queue.end() ; ++i) {
768 DRAMPacket* dram_pkt = *i;
769 const Bank& bank = dram_pkt->bankRef;
770 // check if rank is busy. If this is the case jump to the next packet
771 // Check if it is a row hit
772 if (dram_pkt->rankRef.isAvailable()) {
773 if (bank.openRow == dram_pkt->row) {
774 if (dram_pkt->rank == activeRank || switched_cmd_type) {
775 // FCFS within the hits, giving priority to commands
776 // that access the same rank as the previous burst
777 // to minimize bus turnaround delays
778 // Only give rank prioity when command type is
779 // not changing
780 DPRINTF(DRAM, "Row buffer hit\n");
781 selected_pkt_it = i;
782 break;
783 } else if (!found_prepped_diff_rank_pkt) {
784 // found row hit for command on different rank
785 // than prev burst
786 selected_pkt_it = i;
787 found_prepped_diff_rank_pkt = true;
788 }
789 } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) {
790 // packet going to a rank which is currently not waiting for a
791 // refresh, No row hit and
792 // haven't found an entry with a row hit to a new rank
793 if (earliest_banks == 0)
794 // Determine entries with earliest bank prep delay
795 // Function will give priority to commands that access the
796 // same rank as previous burst and can prep
797 // the bank seamlessly
798 earliest_banks = minBankPrep(queue, switched_cmd_type);
799
800 // FCFS - Bank is first available bank
801 if (bits(earliest_banks, dram_pkt->bankId,
802 dram_pkt->bankId)) {
803 // Remember the packet to be scheduled to one of
804 // the earliest banks available, FCFS amongst the
805 // earliest banks
806 selected_pkt_it = i;
807 //if the packet found is going to a rank that is currently
808 //not busy then update the found_packet to true
809 found_earliest_pkt = true;
810 }
811 }
812 }
813 }
814
815 if (selected_pkt_it != queue.end()) {
816 DRAMPacket* selected_pkt = *selected_pkt_it;
817 queue.erase(selected_pkt_it);
818 queue.push_front(selected_pkt);
819 found_packet = true;
820 }
821 return found_packet;
822 }
823
824 void
825 DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
826 {
827 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
828
829 bool needsResponse = pkt->needsResponse();
830 // do the actual memory access which also turns the packet into a
831 // response
832 access(pkt);
833
834 // turn packet around to go back to requester if response expected
835 if (needsResponse) {
836 // access already turned the packet into a response
837 assert(pkt->isResponse());
838 // response_time consumes the static latency and is charged also
839 // with headerDelay that takes into account the delay provided by
840 // the xbar and also the payloadDelay that takes into account the
841 // number of data beats.
842 Tick response_time = curTick() + static_latency + pkt->headerDelay +
843 pkt->payloadDelay;
844 // Here we reset the timing of the packet before sending it out.
845 pkt->headerDelay = pkt->payloadDelay = 0;
846
847 // queue the packet in the response queue to be sent out after
848 // the static latency has passed
849 port.schedTimingResp(pkt, response_time);
850 } else {
851 // @todo the packet is going to be deleted, and the DRAMPacket
852 // is still having a pointer to it
853 pendingDelete.push_back(pkt);
854 }
855
856 DPRINTF(DRAM, "Done\n");
857
858 return;
859 }
860
861 void
862 DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref,
863 Tick act_tick, uint32_t row)
864 {
865 assert(rank_ref.actTicks.size() == activationLimit);
866
867 DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
868
869 // update the open row
870 assert(bank_ref.openRow == Bank::NO_ROW);
871 bank_ref.openRow = row;
872
873 // start counting anew, this covers both the case when we
874 // auto-precharged, and when this access is forced to
875 // precharge
876 bank_ref.bytesAccessed = 0;
877 bank_ref.rowAccesses = 0;
878
879 ++rank_ref.numBanksActive;
880 assert(rank_ref.numBanksActive <= banksPerRank);
881
882 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
883 bank_ref.bank, rank_ref.rank, act_tick,
884 ranks[rank_ref.rank]->numBanksActive);
885
886 rank_ref.power.powerlib.doCommand(MemCommand::ACT, bank_ref.bank,
887 divCeil(act_tick, tCK) -
888 timeStampOffset);
889
890 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) -
891 timeStampOffset, bank_ref.bank, rank_ref.rank);
892
893 // The next access has to respect tRAS for this bank
894 bank_ref.preAllowedAt = act_tick + tRAS;
895
896 // Respect the row-to-column command delay
897 bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt);
898
899 // start by enforcing tRRD
900 for(int i = 0; i < banksPerRank; i++) {
901 // next activate to any bank in this rank must not happen
902 // before tRRD
903 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) {
904 // bank group architecture requires longer delays between
905 // ACT commands within the same bank group. Use tRRD_L
906 // in this case
907 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L,
908 rank_ref.banks[i].actAllowedAt);
909 } else {
910 // use shorter tRRD value when either
911 // 1) bank group architecture is not supportted
912 // 2) bank is in a different bank group
913 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD,
914 rank_ref.banks[i].actAllowedAt);
915 }
916 }
917
918 // next, we deal with tXAW, if the activation limit is disabled
919 // then we directly schedule an activate power event
920 if (!rank_ref.actTicks.empty()) {
921 // sanity check
922 if (rank_ref.actTicks.back() &&
923 (act_tick - rank_ref.actTicks.back()) < tXAW) {
924 panic("Got %d activates in window %d (%llu - %llu) which "
925 "is smaller than %llu\n", activationLimit, act_tick -
926 rank_ref.actTicks.back(), act_tick,
927 rank_ref.actTicks.back(), tXAW);
928 }
929
930 // shift the times used for the book keeping, the last element
931 // (highest index) is the oldest one and hence the lowest value
932 rank_ref.actTicks.pop_back();
933
934 // record an new activation (in the future)
935 rank_ref.actTicks.push_front(act_tick);
936
937 // cannot activate more than X times in time window tXAW, push the
938 // next one (the X + 1'st activate) to be tXAW away from the
939 // oldest in our window of X
940 if (rank_ref.actTicks.back() &&
941 (act_tick - rank_ref.actTicks.back()) < tXAW) {
942 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate "
943 "no earlier than %llu\n", activationLimit,
944 rank_ref.actTicks.back() + tXAW);
945 for(int j = 0; j < banksPerRank; j++)
946 // next activate must not happen before end of window
947 rank_ref.banks[j].actAllowedAt =
948 std::max(rank_ref.actTicks.back() + tXAW,
949 rank_ref.banks[j].actAllowedAt);
950 }
951 }
952
953 // at the point when this activate takes place, make sure we
954 // transition to the active power state
955 if (!rank_ref.activateEvent.scheduled())
956 schedule(rank_ref.activateEvent, act_tick);
957 else if (rank_ref.activateEvent.when() > act_tick)
958 // move it sooner in time
959 reschedule(rank_ref.activateEvent, act_tick);
960 }
961
962 void
963 DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace)
964 {
965 // make sure the bank has an open row
966 assert(bank.openRow != Bank::NO_ROW);
967
968 // sample the bytes per activate here since we are closing
969 // the page
970 bytesPerActivate.sample(bank.bytesAccessed);
971
972 bank.openRow = Bank::NO_ROW;
973
974 // no precharge allowed before this one
975 bank.preAllowedAt = pre_at;
976
977 Tick pre_done_at = pre_at + tRP;
978
979 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at);
980
981 assert(rank_ref.numBanksActive != 0);
982 --rank_ref.numBanksActive;
983
984 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got "
985 "%d active\n", bank.bank, rank_ref.rank, pre_at,
986 rank_ref.numBanksActive);
987
988 if (trace) {
989
990 rank_ref.power.powerlib.doCommand(MemCommand::PRE, bank.bank,
991 divCeil(pre_at, tCK) -
992 timeStampOffset);
993 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) -
994 timeStampOffset, bank.bank, rank_ref.rank);
995 }
996 // if we look at the current number of active banks we might be
997 // tempted to think the DRAM is now idle, however this can be
998 // undone by an activate that is scheduled to happen before we
999 // would have reached the idle state, so schedule an event and
1000 // rather check once we actually make it to the point in time when
1001 // the (last) precharge takes place
1002 if (!rank_ref.prechargeEvent.scheduled())
1003 schedule(rank_ref.prechargeEvent, pre_done_at);
1004 else if (rank_ref.prechargeEvent.when() < pre_done_at)
1005 reschedule(rank_ref.prechargeEvent, pre_done_at);
1006 }
1007
1008 void
1009 DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt)
1010 {
1011 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1012 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1013
1014 // get the rank
1015 Rank& rank = dram_pkt->rankRef;
1016
1017 // get the bank
1018 Bank& bank = dram_pkt->bankRef;
1019
1020 // for the state we need to track if it is a row hit or not
1021 bool row_hit = true;
1022
1023 // respect any constraints on the command (e.g. tRCD or tCCD)
1024 Tick cmd_at = std::max(bank.colAllowedAt, curTick());
1025
1026 // Determine the access latency and update the bank state
1027 if (bank.openRow == dram_pkt->row) {
1028 // nothing to do
1029 } else {
1030 row_hit = false;
1031
1032 // If there is a page open, precharge it.
1033 if (bank.openRow != Bank::NO_ROW) {
1034 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick()));
1035 }
1036
1037 // next we need to account for the delay in activating the
1038 // page
1039 Tick act_tick = std::max(bank.actAllowedAt, curTick());
1040
1041 // Record the activation and deal with all the global timing
1042 // constraints caused be a new activation (tRRD and tXAW)
1043 activateBank(rank, bank, act_tick, dram_pkt->row);
1044
1045 // issue the command as early as possible
1046 cmd_at = bank.colAllowedAt;
1047 }
1048
1049 // we need to wait until the bus is available before we can issue
1050 // the command
1051 cmd_at = std::max(cmd_at, busBusyUntil - tCL);
1052
1053 // update the packet ready time
1054 dram_pkt->readyTime = cmd_at + tCL + tBURST;
1055
1056 // only one burst can use the bus at any one point in time
1057 assert(dram_pkt->readyTime - busBusyUntil >= tBURST);
1058
1059 // update the time for the next read/write burst for each
1060 // bank (add a max with tCCD/tCCD_L here)
1061 Tick cmd_dly;
1062 for(int j = 0; j < ranksPerChannel; j++) {
1063 for(int i = 0; i < banksPerRank; i++) {
1064 // next burst to same bank group in this rank must not happen
1065 // before tCCD_L. Different bank group timing requirement is
1066 // tBURST; Add tCS for different ranks
1067 if (dram_pkt->rank == j) {
1068 if (bankGroupArch &&
1069 (bank.bankgr == ranks[j]->banks[i].bankgr)) {
1070 // bank group architecture requires longer delays between
1071 // RD/WR burst commands to the same bank group.
1072 // Use tCCD_L in this case
1073 cmd_dly = tCCD_L;
1074 } else {
1075 // use tBURST (equivalent to tCCD_S), the shorter
1076 // cas-to-cas delay value, when either:
1077 // 1) bank group architecture is not supportted
1078 // 2) bank is in a different bank group
1079 cmd_dly = tBURST;
1080 }
1081 } else {
1082 // different rank is by default in a different bank group
1083 // use tBURST (equivalent to tCCD_S), which is the shorter
1084 // cas-to-cas delay in this case
1085 // Add tCS to account for rank-to-rank bus delay requirements
1086 cmd_dly = tBURST + tCS;
1087 }
1088 ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly,
1089 ranks[j]->banks[i].colAllowedAt);
1090 }
1091 }
1092
1093 // Save rank of current access
1094 activeRank = dram_pkt->rank;
1095
1096 // If this is a write, we also need to respect the write recovery
1097 // time before a precharge, in the case of a read, respect the
1098 // read to precharge constraint
1099 bank.preAllowedAt = std::max(bank.preAllowedAt,
1100 dram_pkt->isRead ? cmd_at + tRTP :
1101 dram_pkt->readyTime + tWR);
1102
1103 // increment the bytes accessed and the accesses per row
1104 bank.bytesAccessed += burstSize;
1105 ++bank.rowAccesses;
1106
1107 // if we reached the max, then issue with an auto-precharge
1108 bool auto_precharge = pageMgmt == Enums::close ||
1109 bank.rowAccesses == maxAccessesPerRow;
1110
1111 // if we did not hit the limit, we might still want to
1112 // auto-precharge
1113 if (!auto_precharge &&
1114 (pageMgmt == Enums::open_adaptive ||
1115 pageMgmt == Enums::close_adaptive)) {
1116 // a twist on the open and close page policies:
1117 // 1) open_adaptive page policy does not blindly keep the
1118 // page open, but close it if there are no row hits, and there
1119 // are bank conflicts in the queue
1120 // 2) close_adaptive page policy does not blindly close the
1121 // page, but closes it only if there are no row hits in the queue.
1122 // In this case, only force an auto precharge when there
1123 // are no same page hits in the queue
1124 bool got_more_hits = false;
1125 bool got_bank_conflict = false;
1126
1127 // either look at the read queue or write queue
1128 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue :
1129 writeQueue;
1130 auto p = queue.begin();
1131 // make sure we are not considering the packet that we are
1132 // currently dealing with (which is the head of the queue)
1133 ++p;
1134
1135 // keep on looking until we find a hit or reach the end of the queue
1136 // 1) if a hit is found, then both open and close adaptive policies keep
1137 // the page open
1138 // 2) if no hit is found, got_bank_conflict is set to true if a bank
1139 // conflict request is waiting in the queue
1140 while (!got_more_hits && p != queue.end()) {
1141 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
1142 (dram_pkt->bank == (*p)->bank);
1143 bool same_row = dram_pkt->row == (*p)->row;
1144 got_more_hits |= same_rank_bank && same_row;
1145 got_bank_conflict |= same_rank_bank && !same_row;
1146 ++p;
1147 }
1148
1149 // auto pre-charge when either
1150 // 1) open_adaptive policy, we have not got any more hits, and
1151 // have a bank conflict
1152 // 2) close_adaptive policy and we have not got any more hits
1153 auto_precharge = !got_more_hits &&
1154 (got_bank_conflict || pageMgmt == Enums::close_adaptive);
1155 }
1156
1157 // DRAMPower trace command to be written
1158 std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR";
1159
1160 // MemCommand required for DRAMPower library
1161 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD :
1162 MemCommand::WR;
1163
1164 // if this access should use auto-precharge, then we are
1165 // closing the row
1166 if (auto_precharge) {
1167 // if auto-precharge push a PRE command at the correct tick to the
1168 // list used by DRAMPower library to calculate power
1169 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt));
1170
1171 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
1172 }
1173
1174 // Update bus state
1175 busBusyUntil = dram_pkt->readyTime;
1176
1177 DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n",
1178 dram_pkt->addr, dram_pkt->readyTime, busBusyUntil);
1179
1180 dram_pkt->rankRef.power.powerlib.doCommand(command, dram_pkt->bank,
1181 divCeil(cmd_at, tCK) -
1182 timeStampOffset);
1183
1184 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) -
1185 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank);
1186
1187 // Update the minimum timing between the requests, this is a
1188 // conservative estimate of when we have to schedule the next
1189 // request to not introduce any unecessary bubbles. In most cases
1190 // we will wake up sooner than we have to.
1191 nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1192
1193 // Update the stats and schedule the next request
1194 if (dram_pkt->isRead) {
1195 ++readsThisTime;
1196 if (row_hit)
1197 readRowHits++;
1198 bytesReadDRAM += burstSize;
1199 perBankRdBursts[dram_pkt->bankId]++;
1200
1201 // Update latency stats
1202 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
1203 totBusLat += tBURST;
1204 totQLat += cmd_at - dram_pkt->entryTime;
1205 } else {
1206 ++writesThisTime;
1207 if (row_hit)
1208 writeRowHits++;
1209 bytesWritten += burstSize;
1210 perBankWrBursts[dram_pkt->bankId]++;
1211 }
1212 }
1213
1214 void
1215 DRAMCtrl::processNextReqEvent()
1216 {
1217 int busyRanks = 0;
1218 for (auto r : ranks) {
1219 if (!r->isAvailable()) {
1220 // rank is busy refreshing
1221 busyRanks++;
1222
1223 // let the rank know that if it was waiting to drain, it
1224 // is now done and ready to proceed
1225 r->checkDrainDone();
1226 }
1227 }
1228
1229 if (busyRanks == ranksPerChannel) {
1230 // if all ranks are refreshing wait for them to finish
1231 // and stall this state machine without taking any further
1232 // action, and do not schedule a new nextReqEvent
1233 return;
1234 }
1235
1236 // pre-emptively set to false. Overwrite if in READ_TO_WRITE
1237 // or WRITE_TO_READ state
1238 bool switched_cmd_type = false;
1239 if (busState == READ_TO_WRITE) {
1240 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
1241 "waiting\n", readsThisTime, readQueue.size());
1242
1243 // sample and reset the read-related stats as we are now
1244 // transitioning to writes, and all reads are done
1245 rdPerTurnAround.sample(readsThisTime);
1246 readsThisTime = 0;
1247
1248 // now proceed to do the actual writes
1249 busState = WRITE;
1250 switched_cmd_type = true;
1251 } else if (busState == WRITE_TO_READ) {
1252 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes "
1253 "waiting\n", writesThisTime, writeQueue.size());
1254
1255 wrPerTurnAround.sample(writesThisTime);
1256 writesThisTime = 0;
1257
1258 busState = READ;
1259 switched_cmd_type = true;
1260 }
1261
1262 // when we get here it is either a read or a write
1263 if (busState == READ) {
1264
1265 // track if we should switch or not
1266 bool switch_to_writes = false;
1267
1268 if (readQueue.empty()) {
1269 // In the case there is no read request to go next,
1270 // trigger writes if we have passed the low threshold (or
1271 // if we are draining)
1272 if (!writeQueue.empty() &&
1273 (drainManager || writeQueue.size() > writeLowThreshold)) {
1274
1275 switch_to_writes = true;
1276 } else {
1277 // check if we are drained
1278 if (respQueue.empty () && drainManager) {
1279 DPRINTF(Drain, "DRAM controller done draining\n");
1280 drainManager->signalDrainDone();
1281 drainManager = NULL;
1282 }
1283
1284 // nothing to do, not even any point in scheduling an
1285 // event for the next request
1286 return;
1287 }
1288 } else {
1289 // bool to check if there is a read to a free rank
1290 bool found_read = false;
1291
1292 // Figure out which read request goes next, and move it to the
1293 // front of the read queue
1294 found_read = chooseNext(readQueue, switched_cmd_type);
1295
1296 // if no read to an available rank is found then return
1297 // at this point. There could be writes to the available ranks
1298 // which are above the required threshold. However, to
1299 // avoid adding more complexity to the code, return and wait
1300 // for a refresh event to kick things into action again.
1301 if (!found_read)
1302 return;
1303
1304 DRAMPacket* dram_pkt = readQueue.front();
1305 assert(dram_pkt->rankRef.isAvailable());
1306 // here we get a bit creative and shift the bus busy time not
1307 // just the tWTR, but also a CAS latency to capture the fact
1308 // that we are allowed to prepare a new bank, but not issue a
1309 // read command until after tWTR, in essence we capture a
1310 // bubble on the data bus that is tWTR + tCL
1311 if (switched_cmd_type && dram_pkt->rank == activeRank) {
1312 busBusyUntil += tWTR + tCL;
1313 }
1314
1315 doDRAMAccess(dram_pkt);
1316
1317 // At this point we're done dealing with the request
1318 readQueue.pop_front();
1319
1320 // sanity check
1321 assert(dram_pkt->size <= burstSize);
1322 assert(dram_pkt->readyTime >= curTick());
1323
1324 // Insert into response queue. It will be sent back to the
1325 // requestor at its readyTime
1326 if (respQueue.empty()) {
1327 assert(!respondEvent.scheduled());
1328 schedule(respondEvent, dram_pkt->readyTime);
1329 } else {
1330 assert(respQueue.back()->readyTime <= dram_pkt->readyTime);
1331 assert(respondEvent.scheduled());
1332 }
1333
1334 respQueue.push_back(dram_pkt);
1335
1336 // we have so many writes that we have to transition
1337 if (writeQueue.size() > writeHighThreshold) {
1338 switch_to_writes = true;
1339 }
1340 }
1341
1342 // switching to writes, either because the read queue is empty
1343 // and the writes have passed the low threshold (or we are
1344 // draining), or because the writes hit the hight threshold
1345 if (switch_to_writes) {
1346 // transition to writing
1347 busState = READ_TO_WRITE;
1348 }
1349 } else {
1350 // bool to check if write to free rank is found
1351 bool found_write = false;
1352
1353 found_write = chooseNext(writeQueue, switched_cmd_type);
1354
1355 // if no writes to an available rank are found then return.
1356 // There could be reads to the available ranks. However, to avoid
1357 // adding more complexity to the code, return at this point and wait
1358 // for a refresh event to kick things into action again.
1359 if (!found_write)
1360 return;
1361
1362 DRAMPacket* dram_pkt = writeQueue.front();
1363 assert(dram_pkt->rankRef.isAvailable());
1364 // sanity check
1365 assert(dram_pkt->size <= burstSize);
1366
1367 // add a bubble to the data bus, as defined by the
1368 // tRTW when access is to the same rank as previous burst
1369 // Different rank timing is handled with tCS, which is
1370 // applied to colAllowedAt
1371 if (switched_cmd_type && dram_pkt->rank == activeRank) {
1372 busBusyUntil += tRTW;
1373 }
1374
1375 doDRAMAccess(dram_pkt);
1376
1377 writeQueue.pop_front();
1378 isInWriteQueue.erase(burstAlign(dram_pkt->addr));
1379 delete dram_pkt;
1380
1381 // If we emptied the write queue, or got sufficiently below the
1382 // threshold (using the minWritesPerSwitch as the hysteresis) and
1383 // are not draining, or we have reads waiting and have done enough
1384 // writes, then switch to reads.
1385 if (writeQueue.empty() ||
1386 (writeQueue.size() + minWritesPerSwitch < writeLowThreshold &&
1387 !drainManager) ||
1388 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
1389 // turn the bus back around for reads again
1390 busState = WRITE_TO_READ;
1391
1392 // note that the we switch back to reads also in the idle
1393 // case, which eventually will check for any draining and
1394 // also pause any further scheduling if there is really
1395 // nothing to do
1396 }
1397 }
1398 // It is possible that a refresh to another rank kicks things back into
1399 // action before reaching this point.
1400 if (!nextReqEvent.scheduled())
1401 schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1402
1403 // If there is space available and we have writes waiting then let
1404 // them retry. This is done here to ensure that the retry does not
1405 // cause a nextReqEvent to be scheduled before we do so as part of
1406 // the next request processing
1407 if (retryWrReq && writeQueue.size() < writeBufferSize) {
1408 retryWrReq = false;
1409 port.sendRetryReq();
1410 }
1411 }
1412
1413 uint64_t
1414 DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue,
1415 bool switched_cmd_type) const
1416 {
1417 uint64_t bank_mask = 0;
1418 Tick min_act_at = MaxTick;
1419
1420 uint64_t bank_mask_same_rank = 0;
1421 Tick min_act_at_same_rank = MaxTick;
1422
1423 // Give precedence to commands that access same rank as previous command
1424 bool same_rank_match = false;
1425
1426 // determine if we have queued transactions targetting the
1427 // bank in question
1428 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1429 for (const auto& p : queue) {
1430 if(p->rankRef.isAvailable())
1431 got_waiting[p->bankId] = true;
1432 }
1433
1434 for (int i = 0; i < ranksPerChannel; i++) {
1435 for (int j = 0; j < banksPerRank; j++) {
1436 uint16_t bank_id = i * banksPerRank + j;
1437
1438 // if we have waiting requests for the bank, and it is
1439 // amongst the first available, update the mask
1440 if (got_waiting[bank_id]) {
1441 // make sure this rank is not currently refreshing.
1442 assert(ranks[i]->isAvailable());
1443 // simplistic approximation of when the bank can issue
1444 // an activate, ignoring any rank-to-rank switching
1445 // cost in this calculation
1446 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ?
1447 ranks[i]->banks[j].actAllowedAt :
1448 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP;
1449
1450 // prioritize commands that access the
1451 // same rank as previous burst
1452 // Calculate bank mask separately for the case and
1453 // evaluate after loop iterations complete
1454 if (i == activeRank && ranksPerChannel > 1) {
1455 if (act_at <= min_act_at_same_rank) {
1456 // reset same rank bank mask if new minimum is found
1457 // and previous minimum could not immediately send ACT
1458 if (act_at < min_act_at_same_rank &&
1459 min_act_at_same_rank > curTick())
1460 bank_mask_same_rank = 0;
1461
1462 // Set flag indicating that a same rank
1463 // opportunity was found
1464 same_rank_match = true;
1465
1466 // set the bit corresponding to the available bank
1467 replaceBits(bank_mask_same_rank, bank_id, bank_id, 1);
1468 min_act_at_same_rank = act_at;
1469 }
1470 } else {
1471 if (act_at <= min_act_at) {
1472 // reset bank mask if new minimum is found
1473 // and either previous minimum could not immediately send ACT
1474 if (act_at < min_act_at && min_act_at > curTick())
1475 bank_mask = 0;
1476 // set the bit corresponding to the available bank
1477 replaceBits(bank_mask, bank_id, bank_id, 1);
1478 min_act_at = act_at;
1479 }
1480 }
1481 }
1482 }
1483 }
1484
1485 // Determine the earliest time when the next burst can issue based
1486 // on the current busBusyUntil delay.
1487 // Offset by tRCD to correlate with ACT timing variables
1488 Tick min_cmd_at = busBusyUntil - tCL - tRCD;
1489
1490 // if we have multiple ranks and all
1491 // waiting packets are accessing a rank which was previously active
1492 // then bank_mask_same_rank will be set to a value while bank_mask will
1493 // remain 0. In this case, the function should return the value of
1494 // bank_mask_same_rank.
1495 // else if waiting packets access a rank which was previously active and
1496 // other ranks, prioritize same rank accesses that can issue B2B
1497 // Only optimize for same ranks when the command type
1498 // does not change; do not want to unnecessarily incur tWTR
1499 //
1500 // Resulting FCFS prioritization Order is:
1501 // 1) Commands that access the same rank as previous burst
1502 // and can prep the bank seamlessly.
1503 // 2) Commands (any rank) with earliest bank prep
1504 if ((bank_mask == 0) || (!switched_cmd_type && same_rank_match &&
1505 min_act_at_same_rank <= min_cmd_at)) {
1506 bank_mask = bank_mask_same_rank;
1507 }
1508
1509 return bank_mask;
1510 }
1511
1512 DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p)
1513 : EventManager(&_memory), memory(_memory),
1514 pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), pwrStateTick(0),
1515 refreshState(REF_IDLE), refreshDueAt(0),
1516 power(_p, false), numBanksActive(0),
1517 activateEvent(*this), prechargeEvent(*this),
1518 refreshEvent(*this), powerEvent(*this)
1519 { }
1520
1521 void
1522 DRAMCtrl::Rank::startup(Tick ref_tick)
1523 {
1524 assert(ref_tick > curTick());
1525
1526 pwrStateTick = curTick();
1527
1528 // kick off the refresh, and give ourselves enough time to
1529 // precharge
1530 schedule(refreshEvent, ref_tick);
1531 }
1532
1533 void
1534 DRAMCtrl::Rank::suspend()
1535 {
1536 deschedule(refreshEvent);
1537 }
1538
1539 void
1540 DRAMCtrl::Rank::checkDrainDone()
1541 {
1542 // if this rank was waiting to drain it is now able to proceed to
1543 // precharge
1544 if (refreshState == REF_DRAIN) {
1545 DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1546
1547 refreshState = REF_PRE;
1548
1549 // hand control back to the refresh event loop
1550 schedule(refreshEvent, curTick());
1551 }
1552 }
1553
1554 void
1555 DRAMCtrl::Rank::processActivateEvent()
1556 {
1557 // we should transition to the active state as soon as any bank is active
1558 if (pwrState != PWR_ACT)
1559 // note that at this point numBanksActive could be back at
1560 // zero again due to a precharge scheduled in the future
1561 schedulePowerEvent(PWR_ACT, curTick());
1562 }
1563
1564 void
1565 DRAMCtrl::Rank::processPrechargeEvent()
1566 {
1567 // if we reached zero, then special conditions apply as we track
1568 // if all banks are precharged for the power models
1569 if (numBanksActive == 0) {
1570 // we should transition to the idle state when the last bank
1571 // is precharged
1572 schedulePowerEvent(PWR_IDLE, curTick());
1573 }
1574 }
1575
1576 void
1577 DRAMCtrl::Rank::processRefreshEvent()
1578 {
1579 // when first preparing the refresh, remember when it was due
1580 if (refreshState == REF_IDLE) {
1581 // remember when the refresh is due
1582 refreshDueAt = curTick();
1583
1584 // proceed to drain
1585 refreshState = REF_DRAIN;
1586
1587 DPRINTF(DRAM, "Refresh due\n");
1588 }
1589
1590 // let any scheduled read or write to the same rank go ahead,
1591 // after which it will
1592 // hand control back to this event loop
1593 if (refreshState == REF_DRAIN) {
1594 // if a request is at the moment being handled and this request is
1595 // accessing the current rank then wait for it to finish
1596 if ((rank == memory.activeRank)
1597 && (memory.nextReqEvent.scheduled())) {
1598 // hand control over to the request loop until it is
1599 // evaluated next
1600 DPRINTF(DRAM, "Refresh awaiting draining\n");
1601
1602 return;
1603 } else {
1604 refreshState = REF_PRE;
1605 }
1606 }
1607
1608 // at this point, ensure that all banks are precharged
1609 if (refreshState == REF_PRE) {
1610 // precharge any active bank if we are not already in the idle
1611 // state
1612 if (pwrState != PWR_IDLE) {
1613 // at the moment, we use a precharge all even if there is
1614 // only a single bank open
1615 DPRINTF(DRAM, "Precharging all\n");
1616
1617 // first determine when we can precharge
1618 Tick pre_at = curTick();
1619
1620 for (auto &b : banks) {
1621 // respect both causality and any existing bank
1622 // constraints, some banks could already have a
1623 // (auto) precharge scheduled
1624 pre_at = std::max(b.preAllowedAt, pre_at);
1625 }
1626
1627 // make sure all banks per rank are precharged, and for those that
1628 // already are, update their availability
1629 Tick act_allowed_at = pre_at + memory.tRP;
1630
1631 for (auto &b : banks) {
1632 if (b.openRow != Bank::NO_ROW) {
1633 memory.prechargeBank(*this, b, pre_at, false);
1634 } else {
1635 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at);
1636 b.preAllowedAt = std::max(b.preAllowedAt, pre_at);
1637 }
1638 }
1639
1640 // precharge all banks in rank
1641 power.powerlib.doCommand(MemCommand::PREA, 0,
1642 divCeil(pre_at, memory.tCK) -
1643 memory.timeStampOffset);
1644
1645 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n",
1646 divCeil(pre_at, memory.tCK) -
1647 memory.timeStampOffset, rank);
1648 } else {
1649 DPRINTF(DRAM, "All banks already precharged, starting refresh\n");
1650
1651 // go ahead and kick the power state machine into gear if
1652 // we are already idle
1653 schedulePowerEvent(PWR_REF, curTick());
1654 }
1655
1656 refreshState = REF_RUN;
1657 assert(numBanksActive == 0);
1658
1659 // wait for all banks to be precharged, at which point the
1660 // power state machine will transition to the idle state, and
1661 // automatically move to a refresh, at that point it will also
1662 // call this method to get the refresh event loop going again
1663 return;
1664 }
1665
1666 // last but not least we perform the actual refresh
1667 if (refreshState == REF_RUN) {
1668 // should never get here with any banks active
1669 assert(numBanksActive == 0);
1670 assert(pwrState == PWR_REF);
1671
1672 Tick ref_done_at = curTick() + memory.tRFC;
1673
1674 for (auto &b : banks) {
1675 b.actAllowedAt = ref_done_at;
1676 }
1677
1678 // at the moment this affects all ranks
1679 power.powerlib.doCommand(MemCommand::REF, 0,
1680 divCeil(curTick(), memory.tCK) -
1681 memory.timeStampOffset);
1682
1683 // at the moment sort the list of commands and update the counters
1684 // for DRAMPower libray when doing a refresh
1685 sort(power.powerlib.cmdList.begin(),
1686 power.powerlib.cmdList.end(), DRAMCtrl::sortTime);
1687
1688 // update the counters for DRAMPower, passing false to
1689 // indicate that this is not the last command in the
1690 // list. DRAMPower requires this information for the
1691 // correct calculation of the background energy at the end
1692 // of the simulation. Ideally we would want to call this
1693 // function with true once at the end of the
1694 // simulation. However, the discarded energy is extremly
1695 // small and does not effect the final results.
1696 power.powerlib.updateCounters(false);
1697
1698 // call the energy function
1699 power.powerlib.calcEnergy();
1700
1701 // Update the stats
1702 updatePowerStats();
1703
1704 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) -
1705 memory.timeStampOffset, rank);
1706
1707 // make sure we did not wait so long that we cannot make up
1708 // for it
1709 if (refreshDueAt + memory.tREFI < ref_done_at) {
1710 fatal("Refresh was delayed so long we cannot catch up\n");
1711 }
1712
1713 // compensate for the delay in actually performing the refresh
1714 // when scheduling the next one
1715 schedule(refreshEvent, refreshDueAt + memory.tREFI - memory.tRP);
1716
1717 assert(!powerEvent.scheduled());
1718
1719 // move to the idle power state once the refresh is done, this
1720 // will also move the refresh state machine to the refresh
1721 // idle state
1722 schedulePowerEvent(PWR_IDLE, ref_done_at);
1723
1724 DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n",
1725 ref_done_at, refreshDueAt + memory.tREFI);
1726 }
1727 }
1728
1729 void
1730 DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick)
1731 {
1732 // respect causality
1733 assert(tick >= curTick());
1734
1735 if (!powerEvent.scheduled()) {
1736 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n",
1737 tick, pwr_state);
1738
1739 // insert the new transition
1740 pwrStateTrans = pwr_state;
1741
1742 schedule(powerEvent, tick);
1743 } else {
1744 panic("Scheduled power event at %llu to state %d, "
1745 "with scheduled event at %llu to %d\n", tick, pwr_state,
1746 powerEvent.when(), pwrStateTrans);
1747 }
1748 }
1749
1750 void
1751 DRAMCtrl::Rank::processPowerEvent()
1752 {
1753 // remember where we were, and for how long
1754 Tick duration = curTick() - pwrStateTick;
1755 PowerState prev_state = pwrState;
1756
1757 // update the accounting
1758 pwrStateTime[prev_state] += duration;
1759
1760 pwrState = pwrStateTrans;
1761 pwrStateTick = curTick();
1762
1763 if (pwrState == PWR_IDLE) {
1764 DPRINTF(DRAMState, "All banks precharged\n");
1765
1766 // if we were refreshing, make sure we start scheduling requests again
1767 if (prev_state == PWR_REF) {
1768 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration);
1769 assert(pwrState == PWR_IDLE);
1770
1771 // kick things into action again
1772 refreshState = REF_IDLE;
1773 // a request event could be already scheduled by the state
1774 // machine of the other rank
1775 if (!memory.nextReqEvent.scheduled())
1776 schedule(memory.nextReqEvent, curTick());
1777 } else {
1778 assert(prev_state == PWR_ACT);
1779
1780 // if we have a pending refresh, and are now moving to
1781 // the idle state, direclty transition to a refresh
1782 if (refreshState == REF_RUN) {
1783 // there should be nothing waiting at this point
1784 assert(!powerEvent.scheduled());
1785
1786 // update the state in zero time and proceed below
1787 pwrState = PWR_REF;
1788 }
1789 }
1790 }
1791
1792 // we transition to the refresh state, let the refresh state
1793 // machine know of this state update and let it deal with the
1794 // scheduling of the next power state transition as well as the
1795 // following refresh
1796 if (pwrState == PWR_REF) {
1797 DPRINTF(DRAMState, "Refreshing\n");
1798 // kick the refresh event loop into action again, and that
1799 // in turn will schedule a transition to the idle power
1800 // state once the refresh is done
1801 assert(refreshState == REF_RUN);
1802 processRefreshEvent();
1803 }
1804 }
1805
1806 void
1807 DRAMCtrl::Rank::updatePowerStats()
1808 {
1809 // Get the energy and power from DRAMPower
1810 Data::MemoryPowerModel::Energy energy =
1811 power.powerlib.getEnergy();
1812 Data::MemoryPowerModel::Power rank_power =
1813 power.powerlib.getPower();
1814
1815 actEnergy = energy.act_energy * memory.devicesPerRank;
1816 preEnergy = energy.pre_energy * memory.devicesPerRank;
1817 readEnergy = energy.read_energy * memory.devicesPerRank;
1818 writeEnergy = energy.write_energy * memory.devicesPerRank;
1819 refreshEnergy = energy.ref_energy * memory.devicesPerRank;
1820 actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank;
1821 preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank;
1822 totalEnergy = energy.total_energy * memory.devicesPerRank;
1823 averagePower = rank_power.average_power * memory.devicesPerRank;
1824 }
1825
1826 void
1827 DRAMCtrl::Rank::regStats()
1828 {
1829 using namespace Stats;
1830
1831 pwrStateTime
1832 .init(5)
1833 .name(name() + ".memoryStateTime")
1834 .desc("Time in different power states");
1835 pwrStateTime.subname(0, "IDLE");
1836 pwrStateTime.subname(1, "REF");
1837 pwrStateTime.subname(2, "PRE_PDN");
1838 pwrStateTime.subname(3, "ACT");
1839 pwrStateTime.subname(4, "ACT_PDN");
1840
1841 actEnergy
1842 .name(name() + ".actEnergy")
1843 .desc("Energy for activate commands per rank (pJ)");
1844
1845 preEnergy
1846 .name(name() + ".preEnergy")
1847 .desc("Energy for precharge commands per rank (pJ)");
1848
1849 readEnergy
1850 .name(name() + ".readEnergy")
1851 .desc("Energy for read commands per rank (pJ)");
1852
1853 writeEnergy
1854 .name(name() + ".writeEnergy")
1855 .desc("Energy for write commands per rank (pJ)");
1856
1857 refreshEnergy
1858 .name(name() + ".refreshEnergy")
1859 .desc("Energy for refresh commands per rank (pJ)");
1860
1861 actBackEnergy
1862 .name(name() + ".actBackEnergy")
1863 .desc("Energy for active background per rank (pJ)");
1864
1865 preBackEnergy
1866 .name(name() + ".preBackEnergy")
1867 .desc("Energy for precharge background per rank (pJ)");
1868
1869 totalEnergy
1870 .name(name() + ".totalEnergy")
1871 .desc("Total energy per rank (pJ)");
1872
1873 averagePower
1874 .name(name() + ".averagePower")
1875 .desc("Core power per rank (mW)");
1876 }
1877 void
1878 DRAMCtrl::regStats()
1879 {
1880 using namespace Stats;
1881
1882 AbstractMemory::regStats();
1883
1884 for (auto r : ranks) {
1885 r->regStats();
1886 }
1887
1888 readReqs
1889 .name(name() + ".readReqs")
1890 .desc("Number of read requests accepted");
1891
1892 writeReqs
1893 .name(name() + ".writeReqs")
1894 .desc("Number of write requests accepted");
1895
1896 readBursts
1897 .name(name() + ".readBursts")
1898 .desc("Number of DRAM read bursts, "
1899 "including those serviced by the write queue");
1900
1901 writeBursts
1902 .name(name() + ".writeBursts")
1903 .desc("Number of DRAM write bursts, "
1904 "including those merged in the write queue");
1905
1906 servicedByWrQ
1907 .name(name() + ".servicedByWrQ")
1908 .desc("Number of DRAM read bursts serviced by the write queue");
1909
1910 mergedWrBursts
1911 .name(name() + ".mergedWrBursts")
1912 .desc("Number of DRAM write bursts merged with an existing one");
1913
1914 neitherReadNorWrite
1915 .name(name() + ".neitherReadNorWriteReqs")
1916 .desc("Number of requests that are neither read nor write");
1917
1918 perBankRdBursts
1919 .init(banksPerRank * ranksPerChannel)
1920 .name(name() + ".perBankRdBursts")
1921 .desc("Per bank write bursts");
1922
1923 perBankWrBursts
1924 .init(banksPerRank * ranksPerChannel)
1925 .name(name() + ".perBankWrBursts")
1926 .desc("Per bank write bursts");
1927
1928 avgRdQLen
1929 .name(name() + ".avgRdQLen")
1930 .desc("Average read queue length when enqueuing")
1931 .precision(2);
1932
1933 avgWrQLen
1934 .name(name() + ".avgWrQLen")
1935 .desc("Average write queue length when enqueuing")
1936 .precision(2);
1937
1938 totQLat
1939 .name(name() + ".totQLat")
1940 .desc("Total ticks spent queuing");
1941
1942 totBusLat
1943 .name(name() + ".totBusLat")
1944 .desc("Total ticks spent in databus transfers");
1945
1946 totMemAccLat
1947 .name(name() + ".totMemAccLat")
1948 .desc("Total ticks spent from burst creation until serviced "
1949 "by the DRAM");
1950
1951 avgQLat
1952 .name(name() + ".avgQLat")
1953 .desc("Average queueing delay per DRAM burst")
1954 .precision(2);
1955
1956 avgQLat = totQLat / (readBursts - servicedByWrQ);
1957
1958 avgBusLat
1959 .name(name() + ".avgBusLat")
1960 .desc("Average bus latency per DRAM burst")
1961 .precision(2);
1962
1963 avgBusLat = totBusLat / (readBursts - servicedByWrQ);
1964
1965 avgMemAccLat
1966 .name(name() + ".avgMemAccLat")
1967 .desc("Average memory access latency per DRAM burst")
1968 .precision(2);
1969
1970 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ);
1971
1972 numRdRetry
1973 .name(name() + ".numRdRetry")
1974 .desc("Number of times read queue was full causing retry");
1975
1976 numWrRetry
1977 .name(name() + ".numWrRetry")
1978 .desc("Number of times write queue was full causing retry");
1979
1980 readRowHits
1981 .name(name() + ".readRowHits")
1982 .desc("Number of row buffer hits during reads");
1983
1984 writeRowHits
1985 .name(name() + ".writeRowHits")
1986 .desc("Number of row buffer hits during writes");
1987
1988 readRowHitRate
1989 .name(name() + ".readRowHitRate")
1990 .desc("Row buffer hit rate for reads")
1991 .precision(2);
1992
1993 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100;
1994
1995 writeRowHitRate
1996 .name(name() + ".writeRowHitRate")
1997 .desc("Row buffer hit rate for writes")
1998 .precision(2);
1999
2000 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100;
2001
2002 readPktSize
2003 .init(ceilLog2(burstSize) + 1)
2004 .name(name() + ".readPktSize")
2005 .desc("Read request sizes (log2)");
2006
2007 writePktSize
2008 .init(ceilLog2(burstSize) + 1)
2009 .name(name() + ".writePktSize")
2010 .desc("Write request sizes (log2)");
2011
2012 rdQLenPdf
2013 .init(readBufferSize)
2014 .name(name() + ".rdQLenPdf")
2015 .desc("What read queue length does an incoming req see");
2016
2017 wrQLenPdf
2018 .init(writeBufferSize)
2019 .name(name() + ".wrQLenPdf")
2020 .desc("What write queue length does an incoming req see");
2021
2022 bytesPerActivate
2023 .init(maxAccessesPerRow)
2024 .name(name() + ".bytesPerActivate")
2025 .desc("Bytes accessed per row activation")
2026 .flags(nozero);
2027
2028 rdPerTurnAround
2029 .init(readBufferSize)
2030 .name(name() + ".rdPerTurnAround")
2031 .desc("Reads before turning the bus around for writes")
2032 .flags(nozero);
2033
2034 wrPerTurnAround
2035 .init(writeBufferSize)
2036 .name(name() + ".wrPerTurnAround")
2037 .desc("Writes before turning the bus around for reads")
2038 .flags(nozero);
2039
2040 bytesReadDRAM
2041 .name(name() + ".bytesReadDRAM")
2042 .desc("Total number of bytes read from DRAM");
2043
2044 bytesReadWrQ
2045 .name(name() + ".bytesReadWrQ")
2046 .desc("Total number of bytes read from write queue");
2047
2048 bytesWritten
2049 .name(name() + ".bytesWritten")
2050 .desc("Total number of bytes written to DRAM");
2051
2052 bytesReadSys
2053 .name(name() + ".bytesReadSys")
2054 .desc("Total read bytes from the system interface side");
2055
2056 bytesWrittenSys
2057 .name(name() + ".bytesWrittenSys")
2058 .desc("Total written bytes from the system interface side");
2059
2060 avgRdBW
2061 .name(name() + ".avgRdBW")
2062 .desc("Average DRAM read bandwidth in MiByte/s")
2063 .precision(2);
2064
2065 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds;
2066
2067 avgWrBW
2068 .name(name() + ".avgWrBW")
2069 .desc("Average achieved write bandwidth in MiByte/s")
2070 .precision(2);
2071
2072 avgWrBW = (bytesWritten / 1000000) / simSeconds;
2073
2074 avgRdBWSys
2075 .name(name() + ".avgRdBWSys")
2076 .desc("Average system read bandwidth in MiByte/s")
2077 .precision(2);
2078
2079 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
2080
2081 avgWrBWSys
2082 .name(name() + ".avgWrBWSys")
2083 .desc("Average system write bandwidth in MiByte/s")
2084 .precision(2);
2085
2086 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
2087
2088 peakBW
2089 .name(name() + ".peakBW")
2090 .desc("Theoretical peak bandwidth in MiByte/s")
2091 .precision(2);
2092
2093 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000;
2094
2095 busUtil
2096 .name(name() + ".busUtil")
2097 .desc("Data bus utilization in percentage")
2098 .precision(2);
2099 busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
2100
2101 totGap
2102 .name(name() + ".totGap")
2103 .desc("Total gap between requests");
2104
2105 avgGap
2106 .name(name() + ".avgGap")
2107 .desc("Average gap between requests")
2108 .precision(2);
2109
2110 avgGap = totGap / (readReqs + writeReqs);
2111
2112 // Stats for DRAM Power calculation based on Micron datasheet
2113 busUtilRead
2114 .name(name() + ".busUtilRead")
2115 .desc("Data bus utilization in percentage for reads")
2116 .precision(2);
2117
2118 busUtilRead = avgRdBW / peakBW * 100;
2119
2120 busUtilWrite
2121 .name(name() + ".busUtilWrite")
2122 .desc("Data bus utilization in percentage for writes")
2123 .precision(2);
2124
2125 busUtilWrite = avgWrBW / peakBW * 100;
2126
2127 pageHitRate
2128 .name(name() + ".pageHitRate")
2129 .desc("Row buffer hit rate, read and write combined")
2130 .precision(2);
2131
2132 pageHitRate = (writeRowHits + readRowHits) /
2133 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
2134 }
2135
2136 void
2137 DRAMCtrl::recvFunctional(PacketPtr pkt)
2138 {
2139 // rely on the abstract memory
2140 functionalAccess(pkt);
2141 }
2142
2143 BaseSlavePort&
2144 DRAMCtrl::getSlavePort(const string &if_name, PortID idx)
2145 {
2146 if (if_name != "port") {
2147 return MemObject::getSlavePort(if_name, idx);
2148 } else {
2149 return port;
2150 }
2151 }
2152
2153 unsigned int
2154 DRAMCtrl::drain(DrainManager *dm)
2155 {
2156 unsigned int count = port.drain(dm);
2157
2158 // if there is anything in any of our internal queues, keep track
2159 // of that as well
2160 if (!(writeQueue.empty() && readQueue.empty() &&
2161 respQueue.empty())) {
2162 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
2163 " resp: %d\n", writeQueue.size(), readQueue.size(),
2164 respQueue.size());
2165 ++count;
2166 drainManager = dm;
2167
2168 // the only part that is not drained automatically over time
2169 // is the write queue, thus kick things into action if needed
2170 if (!writeQueue.empty() && !nextReqEvent.scheduled()) {
2171 schedule(nextReqEvent, curTick());
2172 }
2173 }
2174
2175 if (count)
2176 setDrainState(Drainable::Draining);
2177 else
2178 setDrainState(Drainable::Drained);
2179 return count;
2180 }
2181
2182 void
2183 DRAMCtrl::drainResume()
2184 {
2185 if (!isTimingMode && system()->isTimingMode()) {
2186 // if we switched to timing mode, kick things into action,
2187 // and behave as if we restored from a checkpoint
2188 startup();
2189 } else if (isTimingMode && !system()->isTimingMode()) {
2190 // if we switch from timing mode, stop the refresh events to
2191 // not cause issues with KVM
2192 for (auto r : ranks) {
2193 r->suspend();
2194 }
2195 }
2196
2197 // update the mode
2198 isTimingMode = system()->isTimingMode();
2199 }
2200
2201 DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory)
2202 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
2203 memory(_memory)
2204 { }
2205
2206 AddrRangeList
2207 DRAMCtrl::MemoryPort::getAddrRanges() const
2208 {
2209 AddrRangeList ranges;
2210 ranges.push_back(memory.getAddrRange());
2211 return ranges;
2212 }
2213
2214 void
2215 DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
2216 {
2217 pkt->pushLabel(memory.name());
2218
2219 if (!queue.checkFunctional(pkt)) {
2220 // Default implementation of SimpleTimingPort::recvFunctional()
2221 // calls recvAtomic() and throws away the latency; we can save a
2222 // little here by just not calculating the latency.
2223 memory.recvFunctional(pkt);
2224 }
2225
2226 pkt->popLabel();
2227 }
2228
2229 Tick
2230 DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
2231 {
2232 return memory.recvAtomic(pkt);
2233 }
2234
2235 bool
2236 DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
2237 {
2238 // pass it to the memory controller
2239 return memory.recvTimingReq(pkt);
2240 }
2241
2242 DRAMCtrl*
2243 DRAMCtrlParams::create()
2244 {
2245 return new DRAMCtrl(this);
2246 }