ruby: move stall and wakeup functions to AbstractController
[gem5.git] / src / mem / simple_dram.cc
index 42c97977a4704eedc0afb225aa9e566b127d9326..0b24b4056874c17585d571af7a088ca62f4ea834 100644 (file)
  *          Ani Udipi
  */
 
+#include "base/trace.hh"
+#include "debug/Drain.hh"
 #include "debug/DRAM.hh"
 #include "debug/DRAMWR.hh"
 #include "mem/simple_dram.hh"
-#include "sim/stat_control.hh"
 
 using namespace std;
 
@@ -49,22 +50,23 @@ SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) :
     AbstractMemory(p),
     port(name() + ".port", *this),
     retryRdReq(false), retryWrReq(false),
-    rowHitFlag(false), stopReads(false),
+    rowHitFlag(false), stopReads(false), actTicks(p->activation_limit, 0),
     writeEvent(this), respondEvent(this),
     refreshEvent(this), nextReqEvent(this), drainManager(NULL),
     bytesPerCacheLine(0),
     linesPerRowBuffer(p->lines_per_rowbuffer),
     ranksPerChannel(p->ranks_per_channel),
-    banksPerRank(p->banks_per_rank), rowsPerBank(0),
+    banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
     readBufferSize(p->read_buffer_size),
     writeBufferSize(p->write_buffer_size),
     writeThresholdPerc(p->write_thresh_perc),
     tWTR(p->tWTR), tBURST(p->tBURST),
     tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP),
     tRFC(p->tRFC), tREFI(p->tREFI),
+    tXAW(p->tXAW), activationLimit(p->activation_limit),
     memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
     pageMgmt(p->page_policy),
-    busBusyUntil(0), prevdramaccess(0), writeStartTime(0),
+    busBusyUntil(0), writeStartTime(0),
     prevArrival(0), numReqs(0)
 {
     // create the bank states based on the dimensions of the ranks and
@@ -88,30 +90,40 @@ SimpleDRAM::init()
         port.sendRangeChange();
     }
 
-    // get the cache line size from the connected port
+    // get the burst size from the connected port as it is currently
+    // assumed to be equal to the cache line size
     bytesPerCacheLine = port.peerBlockSize();
 
     // we could deal with plenty options here, but for now do a quick
     // sanity check
     if (bytesPerCacheLine != 64 && bytesPerCacheLine != 32)
-        panic("Unexpected cache line size %d", bytesPerCacheLine);
+        panic("Unexpected burst size %d", bytesPerCacheLine);
 
     // determine the rows per bank by looking at the total capacity
-    uint64_t capacity = AbstractMemory::size();
-    uint64_t i = 1;
-    while (i < 64 && capacity > ((1 << i))) {
-        ++i;
-    }
-
-    // rounded up to nearest power of two
-    DPRINTF(DRAM, "i is %lld\n", i);
-    capacity = 1 << i;
+    uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
 
     DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
             AbstractMemory::size());
     rowsPerBank = capacity / (bytesPerCacheLine * linesPerRowBuffer *
                               banksPerRank * ranksPerChannel);
 
+    if (range.interleaved()) {
+        if (channels != range.stripes())
+            panic("%s has %d interleaved address stripes but %d channel(s)\n",
+                  name(), range.stripes(), channels);
+
+        if (addrMapping == Enums::openmap) {
+            if (bytesPerCacheLine * linesPerRowBuffer !=
+                range.granularity()) {
+                panic("Interleaving of %s doesn't match open address map\n",
+                      name());
+            }
+        } else if (addrMapping == Enums::closemap) {
+            if (bytesPerCacheLine != range.granularity())
+                panic("Interleaving of %s doesn't match closed address map\n",
+                      name());
+        }
+    }
 }
 
 void
@@ -121,10 +133,9 @@ SimpleDRAM::startup()
     printParams();
 
     // kick off the refresh
-    schedule(&refreshEvent, curTick() + tREFI);
+    schedule(refreshEvent, curTick() + tREFI);
 }
 
-
 Tick
 SimpleDRAM::recvAtomic(PacketPtr pkt)
 {
@@ -146,53 +157,86 @@ bool
 SimpleDRAM::readQueueFull() const
 {
     DPRINTF(DRAM, "Read queue limit %d current size %d\n",
-            readBufferSize, dramReadQueue.size() + dramRespQueue.size());
+            readBufferSize, readQueue.size() + respQueue.size());
 
-    return (dramReadQueue.size() + dramRespQueue.size()) == readBufferSize;
+    return (readQueue.size() + respQueue.size()) == readBufferSize;
 }
 
 bool
 SimpleDRAM::writeQueueFull() const
 {
     DPRINTF(DRAM, "Write queue limit %d current size %d\n",
-            writeBufferSize, dramWriteQueue.size());
-    return dramWriteQueue.size() == writeBufferSize;
+            writeBufferSize, writeQueue.size());
+    return writeQueue.size() == writeBufferSize;
 }
 
-
 SimpleDRAM::DRAMPacket*
 SimpleDRAM::decodeAddr(PacketPtr pkt)
 {
+    // decode the address based on the address mapping scheme
+    //
+    // with R, C, B and K denoting rank, column, bank and rank,
+    // respectively, and going from MSB to LSB, the two schemes are
+    // RKBC (openmap) and RCKB (closedmap)
     uint8_t rank;
     uint16_t bank;
     uint16_t row;
 
     Addr addr = pkt->getAddr();
-    Addr temp = addr;
 
     // truncate the address to the access granularity
     addr = addr / bytesPerCacheLine;
 
+    // we have removed the lowest order address bits that denote the
+    // position within the cache line, proceed and select the
+    // appropriate bits for bank, rank and row (no column address is
+    // needed)
     if (addrMapping == Enums::openmap) {
+        // the lowest order bits denote the column to ensure that
+        // sequential cache lines occupy the same row
         addr = addr / linesPerRowBuffer;
 
+        // take out the channel part of the address, note that this has
+        // to match with how accesses are interleaved between the
+        // controllers in the address mapping
+        addr = addr / channels;
+
+        // after the column bits, we get the bank bits to interleave
+        // over the banks
         bank = addr % banksPerRank;
         addr = addr / banksPerRank;
 
+        // after the bank, we get the rank bits which thus interleaves
+        // over the ranks
         rank = addr % ranksPerChannel;
         addr = addr / ranksPerChannel;
 
+        // lastly, get the row bits
         row = addr % rowsPerBank;
         addr = addr / rowsPerBank;
     } else if (addrMapping == Enums::closemap) {
+        // optimise for closed page mode and utilise maximum
+        // parallelism of the DRAM (at the cost of power)
+
+        // take out the channel part of the address, not that this has
+        // to match with how accesses are interleaved between the
+        // controllers in the address mapping
+        addr = addr / channels;
+
+        // start with the bank bits, as this provides the maximum
+        // opportunity for parallelism between requests
         bank = addr % banksPerRank;
         addr = addr / banksPerRank;
 
+        // next get the rank bits
         rank = addr % ranksPerChannel;
         addr = addr / ranksPerChannel;
 
+        // next the column bits which we do not need to keep track of
+        // and simply skip past
         addr = addr / linesPerRowBuffer;
 
+        // lastly, get the row bits
         row = addr % rowsPerBank;
         addr = addr / rowsPerBank;
     } else
@@ -203,14 +247,13 @@ SimpleDRAM::decodeAddr(PacketPtr pkt)
     assert(row < rowsPerBank);
 
     DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
-            temp, rank, bank, row);
+            pkt->getAddr(), rank, bank, row);
 
     // create the corresponding DRAM packet with the entry time and
-    // ready time set to the current tick, they will be updated later
-    DRAMPacket* dram_pkt = new DRAMPacket(pkt, rank, bank, row, temp,
-                                          banks[rank][bank]);
-
-    return dram_pkt;
+    // ready time set to the current tick, the latter will be updated
+    // later
+    return new DRAMPacket(pkt, rank, bank, row, pkt->getAddr(),
+                          banks[rank][bank]);
 }
 
 void
@@ -222,14 +265,14 @@ SimpleDRAM::addToReadQueue(PacketPtr pkt)
 
     // First check write buffer to see if the data is already at
     // the controller
-    std::list<DRAMPacket*>::const_iterator i;
+    list<DRAMPacket*>::const_iterator i;
     Addr addr = pkt->getAddr();
 
     // @todo: add size check
-    for (i = dramWriteQueue.begin();  i != dramWriteQueue.end(); ++i) {
+    for (i = writeQueue.begin(); i != writeQueue.end(); ++i) {
         if ((*i)->addr == addr){
             servicedByWrQ++;
-            DPRINTF(DRAM,"Serviced by write Q\n");
+            DPRINTF(DRAM, "Read to %lld serviced by write queue\n", addr);
             bytesRead += bytesPerCacheLine;
             bytesConsumedRd += pkt->getSize();
             accessAndRespond(pkt);
@@ -239,31 +282,32 @@ SimpleDRAM::addToReadQueue(PacketPtr pkt)
 
     DRAMPacket* dram_pkt = decodeAddr(pkt);
 
-    assert(dramReadQueue.size() + dramRespQueue.size() < readBufferSize);
-    rdQLenPdf[dramReadQueue.size() + dramRespQueue.size()]++;
+    assert(readQueue.size() + respQueue.size() < readBufferSize);
+    rdQLenPdf[readQueue.size() + respQueue.size()]++;
 
     DPRINTF(DRAM, "Adding to read queue\n");
 
-    dramReadQueue.push_back(dram_pkt);
+    readQueue.push_back(dram_pkt);
 
     // Update stats
     uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
     assert(bank_id < ranksPerChannel * banksPerRank);
     perBankRdReqs[bank_id]++;
 
-    avgRdQLen = dramReadQueue.size() + dramRespQueue.size();
+    avgRdQLen = readQueue.size() + respQueue.size();
 
-    // Special case where no arbitration is required between requests
+    // If we are not already scheduled to get the read request out of
+    // the queue, do so now
     if (!nextReqEvent.scheduled() && !stopReads) {
-        DPRINTF(DRAM, "Request %lld - need to schedule immediately");
-        schedule(&nextReqEvent, curTick() + 1);
+        DPRINTF(DRAM, "Request scheduled immediately\n");
+        schedule(nextReqEvent, curTick());
     }
 }
 
 void
 SimpleDRAM::processWriteEvent()
 {
-    assert(!dramWriteQueue.empty());
+    assert(!writeQueue.empty());
     uint32_t numWritesThisTime = 0;
 
     DPRINTF(DRAMWR, "Beginning DRAM Writes\n");
@@ -271,13 +315,15 @@ SimpleDRAM::processWriteEvent()
     Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt());
 
     // @todo: are there any dangers with the untimed while loop?
-    while (!dramWriteQueue.empty()) {
-        if (numWritesThisTime > writeThreshold)
+    while (!writeQueue.empty()) {
+        if (numWritesThisTime > writeThreshold) {
+            DPRINTF(DRAMWR, "Hit write threshold %d\n", writeThreshold);
             break;
+        }
 
         chooseNextWrite();
-        DRAMPacket* dram_pkt = dramWriteQueue.front();
-        // What's the earlier the request can be put on the bus
+        DRAMPacket* dram_pkt = writeQueue.front();
+        // What's the earliest the request can be put on the bus
         Tick schedTime = std::max(curTick(), busBusyUntil);
 
         DPRINTF(DRAMWR, "Asking for latency estimate at %lld\n",
@@ -287,9 +333,6 @@ SimpleDRAM::processWriteEvent()
         Tick accessLat = lat.second;
 
         // look at the rowHitFlag set by estimateLatency
-
-        // @todo: Race condition here where another packet gives rise
-        // to another call to estimateLatency in the meanwhile?
         if (rowHitFlag)
             writeRowHits++;
 
@@ -297,13 +340,19 @@ SimpleDRAM::processWriteEvent()
 
         if (pageMgmt == Enums::open) {
             bank.openRow = dram_pkt->row;
-            bank.freeAt = schedTime + tBURST + accessLat;
+            bank.freeAt = schedTime + tBURST + std::max(accessLat, tCL);
+            busBusyUntil = bank.freeAt - tCL;
 
-            if (!rowHitFlag)
+            if (!rowHitFlag) {
                 bank.tRASDoneAt = bank.freeAt + tRP;
-
+                recordActivate(bank.freeAt - tCL - tRCD);
+                busBusyUntil = bank.freeAt - tCL - tRCD;
+            }
         } else if (pageMgmt == Enums::close) {
             bank.freeAt = schedTime + tBURST + accessLat + tRP + tRP;
+            // Work backwards from bank.freeAt to determine activate time
+            recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD);
+            busBusyUntil = bank.freeAt - tRP - tRP - tCL - tRCD;
             DPRINTF(DRAMWR, "processWriteEvent::bank.freeAt for "
                     "banks_id %d is %lld\n",
                     dram_pkt->rank * banksPerRank + dram_pkt->bank,
@@ -311,18 +360,13 @@ SimpleDRAM::processWriteEvent()
         } else
             panic("Unknown page management policy chosen\n");
 
-        // @todo: As of now, write goes on the databus asap, maybe
-        // be held up at bank. May want to change it to delay the
-        // schedTime itself.
-        busBusyUntil = schedTime + tBURST;
-        DPRINTF(DRAMWR,"Done writing to address %lld\n",dram_pkt->addr);
-
+        DPRINTF(DRAMWR, "Done writing to address %lld\n", dram_pkt->addr);
 
-        DPRINTF(DRAMWR,"schedtime is %lld, tBURST is %lld, "
+        DPRINTF(DRAMWR, "schedtime is %lld, tBURST is %lld, "
                 "busbusyuntil is %lld\n",
                 schedTime, tBURST, busBusyUntil);
 
-        dramWriteQueue.pop_front();
+        writeQueue.pop_front();
         delete dram_pkt;
 
         numWritesThisTime++;
@@ -333,7 +377,7 @@ SimpleDRAM::processWriteEvent()
             busBusyUntil - temp1, maxBankFreeAt() - temp2);
 
     // Update stats
-    avgWrQLen = dramWriteQueue.size();
+    avgWrQLen = writeQueue.size();
 
     // turn the bus back around for reads again
     busBusyUntil += tWTR;
@@ -345,15 +389,18 @@ SimpleDRAM::processWriteEvent()
     }
 
     // if there is nothing left in any queue, signal a drain
-    if (dramWriteQueue.empty() && dramReadQueue.empty() &&
-        dramRespQueue.empty () && drainManager) {
+    if (writeQueue.empty() && readQueue.empty() &&
+        respQueue.empty () && drainManager) {
         drainManager->signalDrainDone();
         drainManager = NULL;
     }
 
     // Once you're done emptying the write queue, check if there's
-    // anything in the read queue, and call schedule if required
-    schedule(&nextReqEvent, busBusyUntil);
+    // anything in the read queue, and call schedule if required. The
+    // retry above could already have caused it to be scheduled, so
+    // first check
+    if (!nextReqEvent.scheduled())
+        schedule(nextReqEvent, busBusyUntil);
 }
 
 void
@@ -369,7 +416,7 @@ SimpleDRAM::triggerWrites()
 
     assert(writeStartTime >= curTick());
     assert(!writeEvent.scheduled());
-    schedule(&writeEvent, writeStartTime);
+    schedule(writeEvent, writeStartTime);
 }
 
 void
@@ -381,19 +428,19 @@ SimpleDRAM::addToWriteQueue(PacketPtr pkt)
 
     DRAMPacket* dram_pkt = decodeAddr(pkt);
 
-    assert(dramWriteQueue.size() < writeBufferSize);
-    wrQLenPdf[dramWriteQueue.size()]++;
+    assert(writeQueue.size() < writeBufferSize);
+    wrQLenPdf[writeQueue.size()]++;
 
     DPRINTF(DRAM, "Adding to write queue\n");
 
-    dramWriteQueue.push_back(dram_pkt);
+    writeQueue.push_back(dram_pkt);
 
     // Update stats
     uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
     assert(bank_id < ranksPerChannel * banksPerRank);
     perBankWrReqs[bank_id]++;
 
-    avgWrQLen = dramWriteQueue.size();
+    avgWrQLen = writeQueue.size();
 
     // we do not wait for the writes to be send to the actual memory,
     // but instead take responsibility for the consistency here and
@@ -404,7 +451,7 @@ SimpleDRAM::addToWriteQueue(PacketPtr pkt)
     accessAndRespond(pkt);
 
     // If your write buffer is starting to fill up, drain it!
-    if (dramWriteQueue.size() > writeThreshold  && !stopReads){
+    if (writeQueue.size() > writeThreshold && !stopReads){
         triggerWrites();
     }
 }
@@ -421,7 +468,7 @@ SimpleDRAM::printParams() const
             "Banks per rank       %d\n"                         \
             "Ranks per channel    %d\n"                         \
             "Total mem capacity   %u\n",
-            name(), bytesPerCacheLine ,linesPerRowBuffer, rowsPerBank,
+            name(), bytesPerCacheLinelinesPerRowBuffer, rowsPerBank,
             banksPerRank, ranksPerChannel, bytesPerCacheLine *
             linesPerRowBuffer * rowsPerBank * banksPerRank * ranksPerChannel);
 
@@ -442,14 +489,16 @@ SimpleDRAM::printParams() const
             scheduler, address_mapping, page_policy);
 
     DPRINTF(DRAM, "Memory controller %s timing specs\n" \
-            "tRCD    %d ticks\n"                        \
-            "tCL     %d ticks\n"                        \
-            "tRP     %d ticks\n"                        \
-            "tBURST  %d ticks\n"                        \
-            "tRFC    %d ticks\n"                        \
-            "tREFI   %d ticks\n"                        \
-            "tWTR    %d ticks\n",
-            name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR);
+            "tRCD      %d ticks\n"                        \
+            "tCL       %d ticks\n"                        \
+            "tRP       %d ticks\n"                        \
+            "tBURST    %d ticks\n"                        \
+            "tRFC      %d ticks\n"                        \
+            "tREFI     %d ticks\n"                        \
+            "tWTR      %d ticks\n"                        \
+            "tXAW (%d) %d ticks\n",
+            name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR,
+            activationLimit, tXAW);
 }
 
 void
@@ -458,15 +507,15 @@ SimpleDRAM::printQs() const {
     list<DRAMPacket*>::const_iterator i;
 
     DPRINTF(DRAM, "===READ QUEUE===\n\n");
-    for (i = dramReadQueue.begin() ;  i != dramReadQueue.end() ; ++i) {
+    for (i = readQueue.begin() ;  i != readQueue.end() ; ++i) {
         DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
     }
     DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
-    for (i = dramRespQueue.begin() ;  i != dramRespQueue.end() ; ++i) {
+    for (i = respQueue.begin() ;  i != respQueue.end() ; ++i) {
         DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
     }
     DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
-    for (i = dramWriteQueue.begin() ;  i != dramWriteQueue.end() ; ++i) {
+    for (i = writeQueue.begin() ;  i != writeQueue.end() ; ++i) {
         DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
     }
 }
@@ -474,15 +523,27 @@ SimpleDRAM::printQs() const {
 bool
 SimpleDRAM::recvTimingReq(PacketPtr pkt)
 {
+    /// @todo temporary hack to deal with memory corruption issues until
+    /// 4-phase transactions are complete
+    for (int x = 0; x < pendingDelete.size(); x++)
+        delete pendingDelete[x];
+    pendingDelete.clear();
+
     // This is where we enter from the outside world
-    DPRINTF(DRAM, "Inside recvTimingReq: request %s addr %lld size %d\n",
+    DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
             pkt->cmdString(),pkt->getAddr(), pkt->getSize());
 
-   int index;
+    // simply drop inhibited packets for now
+    if (pkt->memInhibitAsserted()) {
+        DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n");
+        pendingDelete.push_back(pkt);
+        return true;
+    }
 
    if (pkt->getSize() == bytesPerCacheLine)
        cpuReqs++;
 
+   // Every million accesses, print the state of the queues
    if (numReqs % 1000000 == 0)
        printQs();
 
@@ -492,66 +553,36 @@ SimpleDRAM::recvTimingReq(PacketPtr pkt)
     }
     prevArrival = curTick();
 
-    // simply drop inhibited packets for now
-    if (pkt->memInhibitAsserted()) {
-        DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n");
-        delete pkt;
-        return true;
-    }
-
     unsigned size = pkt->getSize();
     if (size > bytesPerCacheLine)
-        panic("Request size %d is greater than cache line size %d",
+        panic("Request size %d is greater than burst size %d",
               size, bytesPerCacheLine);
 
-    if (size == 0)
-        index = log2(bytesPerCacheLine) + 1;
-    else
-        index = log2(size);
-
-    if (size != 0 && (1 << index) != size)
-        index = log2(bytesPerCacheLine) + 2;
-
-    // @todo: Do we really want to do all this before the packet is
-    // actually accepted?
-
-    /* Index 0 - Size 1 byte
-       Index 1 - Size 2 bytes
-       Index 2 - Size 4 bytes
-         .
-         .
-       Index 6 - Size 64 bytes
-       Index 7 - Size 0 bytes
-       Index 8 - Non-power-of-2 size */
-
-    if (pkt->isRead())
-        readPktSize[index]++;
-    else if (pkt->isWrite())
-        writePktSize[index]++;
-    else
-        neitherPktSize[index]++;
-
     // check local buffers and do not accept if full
     if (pkt->isRead()) {
+        assert(size != 0);
         if (readQueueFull()) {
-            DPRINTF(DRAM,"Read queue full, not accepting\n");
+            DPRINTF(DRAM, "Read queue full, not accepting\n");
             // remember that we have to retry this port
             retryRdReq = true;
             numRdRetry++;
             return false;
         } else {
+            readPktSize[ceilLog2(size)]++;
             addToReadQueue(pkt);
             readReqs++;
             numReqs++;
         }
     } else if (pkt->isWrite()) {
+        assert(size != 0);
         if (writeQueueFull()) {
-            DPRINTF(DRAM,"Write queue full, not accepting\n");
+            DPRINTF(DRAM, "Write queue full, not accepting\n");
             // remember that we have to retry this port
             retryWrReq = true;
             numWrRetry++;
             return false;
         } else {
+            writePktSize[ceilLog2(size)]++;
             addToWriteQueue(pkt);
             writeReqs++;
             numReqs++;
@@ -562,7 +593,6 @@ SimpleDRAM::recvTimingReq(PacketPtr pkt)
         accessAndRespond(pkt);
     }
 
-
     retryRdReq = false;
     retryWrReq = false;
     return true;
@@ -574,119 +604,116 @@ SimpleDRAM::processRespondEvent()
     DPRINTF(DRAM,
             "processRespondEvent(): Some req has reached its readyTime\n");
 
-     PacketPtr pkt = dramRespQueue.front()->pkt;
+     PacketPtr pkt = respQueue.front()->pkt;
 
      // Actually responds to the requestor
      bytesConsumedRd += pkt->getSize();
      bytesRead += bytesPerCacheLine;
      accessAndRespond(pkt);
 
-     DRAMPacket* dram_pkt = dramRespQueue.front();
-     dramRespQueue.pop_front();
-     delete dram_pkt;
+     delete respQueue.front();
+     respQueue.pop_front();
 
      // Update stats
-     avgRdQLen = dramReadQueue.size() + dramRespQueue.size();
+     avgRdQLen = readQueue.size() + respQueue.size();
 
-     if (!dramRespQueue.empty()){
-         assert(dramRespQueue.front()->readyTime >= curTick());
+     if (!respQueue.empty()) {
+         assert(respQueue.front()->readyTime >= curTick());
          assert(!respondEvent.scheduled());
-         schedule(&respondEvent, dramRespQueue.front()->readyTime);
+         schedule(respondEvent, respQueue.front()->readyTime);
      } else {
          // if there is nothing left in any queue, signal a drain
-         if (dramWriteQueue.empty() && dramReadQueue.empty() &&
+         if (writeQueue.empty() && readQueue.empty() &&
              drainManager) {
              drainManager->signalDrainDone();
              drainManager = NULL;
          }
      }
+
+     // We have made a location in the queue available at this point,
+     // so if there is a read that was forced to wait, retry now
+     if (retryRdReq) {
+         retryRdReq = false;
+         port.sendRetry();
+     }
 }
 
 void
 SimpleDRAM::chooseNextWrite()
 {
-    // This method does the arbitration between requests. The chosen
-    // packet is simply moved to the head of the queue. The other
-    // methods know that this is the place to look. For example, with
-    // FCFS, this method does nothing
-    assert(!dramWriteQueue.empty());
-
-    if (dramWriteQueue.size() == 1) {
-        DPRINTF(DRAMWR, "chooseNextWrite(): Single element, nothing to do\n");
+    // This method does the arbitration between write requests. The
+    // chosen packet is simply moved to the head of the write
+    // queue. The other methods know that this is the place to
+    // look. For example, with FCFS, this method does nothing
+    assert(!writeQueue.empty());
+
+    if (writeQueue.size() == 1) {
+        DPRINTF(DRAMWR, "Single write request, nothing to do\n");
         return;
     }
 
     if (memSchedPolicy == Enums::fcfs) {
-
         // Do nothing, since the correct request is already head
-
     } else if (memSchedPolicy == Enums::frfcfs) {
-
-        list<DRAMPacket*>::iterator i = dramWriteQueue.begin();
+        list<DRAMPacket*>::iterator i = writeQueue.begin();
         bool foundRowHit = false;
-        while (!foundRowHit && i != dramWriteQueue.end()) {
+        while (!foundRowHit && i != writeQueue.end()) {
             DRAMPacket* dram_pkt = *i;
             const Bank& bank = dram_pkt->bank_ref;
             if (bank.openRow == dram_pkt->row) { //FR part
-                DPRINTF(DRAMWR,"Row buffer hit\n");
-                dramWriteQueue.erase(i);
-                dramWriteQueue.push_front(dram_pkt);
+                DPRINTF(DRAMWR, "Write row buffer hit\n");
+                writeQueue.erase(i);
+                writeQueue.push_front(dram_pkt);
                 foundRowHit = true;
             } else { //FCFS part
                 ;
             }
             ++i;
         }
-
     } else
         panic("No scheduling policy chosen\n");
 
-    DPRINTF(DRAMWR, "chooseNextWrite(): Something chosen\n");
+    DPRINTF(DRAMWR, "Selected next write request\n");
 }
 
 bool
-SimpleDRAM::chooseNextReq()
+SimpleDRAM::chooseNextRead()
 {
-    // This method does the arbitration between requests.
-    // The chosen packet is simply moved to the head of the
-    // queue. The other methods know that this is the place
-    // to look. For example, with FCFS, this method does nothing
-    list<DRAMPacket*>::iterator i;
-    DRAMPacket* dram_pkt;
-
-    if (dramReadQueue.empty()){
-        DPRINTF(DRAM, "chooseNextReq(): Returning False\n");
+    // This method does the arbitration between read requests. The
+    // chosen packet is simply moved to the head of the queue. The
+    // other methods know that this is the place to look. For example,
+    // with FCFS, this method does nothing
+    if (readQueue.empty()) {
+        DPRINTF(DRAM, "No read request to select\n");
         return false;
     }
 
-    if (dramReadQueue.size() == 1)
+    // If there is only one request then there is nothing left to do
+    if (readQueue.size() == 1)
         return true;
 
     if (memSchedPolicy == Enums::fcfs) {
-
-        // Do nothing, since the correct request is already head
-
+        // Do nothing, since the request to serve is already the first
+        // one in the read queue
     } else if (memSchedPolicy == Enums::frfcfs) {
-
-        for (i = dramReadQueue.begin() ; i != dramReadQueue.end() ; ++i) {
-            dram_pkt = *i;
+        for (list<DRAMPacket*>::iterator i = readQueue.begin();
+             i != readQueue.end() ; ++i) {
+            DRAMPacket* dram_pkt = *i;
             const Bank& bank = dram_pkt->bank_ref;
+            // Check if it is a row hit
             if (bank.openRow == dram_pkt->row) { //FR part
                 DPRINTF(DRAM, "Row buffer hit\n");
-                dramReadQueue.erase(i);
-                dramReadQueue.push_front(dram_pkt);
+                readQueue.erase(i);
+                readQueue.push_front(dram_pkt);
                 break;
             } else { //FCFS part
                 ;
             }
-
         }
-
     } else
         panic("No scheduling policy chosen!\n");
 
-
-    DPRINTF(DRAM,"chooseNextReq(): Chosen something, returning True\n");
+    DPRINTF(DRAM, "Selected next read request\n");
     return true;
 }
 
@@ -705,10 +732,16 @@ SimpleDRAM::accessAndRespond(PacketPtr pkt)
         // access already turned the packet into a response
         assert(pkt->isResponse());
 
+        // @todo someone should pay for this
+        pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
+
         // queue the packet in the response queue to be sent out the
         // next tick
         port.schedTimingResp(pkt, curTick() + 1);
     } else {
+        // @todo the packet is going to be deleted, and the DRAMPacket
+        // is still having a pointer to it
+        pendingDelete.push_back(pkt);
     }
 
     DPRINTF(DRAM, "Done\n");
@@ -759,7 +792,6 @@ SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime)
             bankLat += tRP + tRCD + tCL;
         }
     } else if (pageMgmt == Enums::close) {
-
         // With a close page policy, no notion of
         // bank.tRASDoneAt
         if (bank.freeAt > inTime)
@@ -772,7 +804,8 @@ SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime)
     } else
         panic("No page management policy chosen\n");
 
-    DPRINTF(DRAM, "Returning %lld from estimateLatency()\n",accLat);
+    DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n",
+            bankLat, accLat);
 
     return make_pair(bankLat, accLat);
 }
@@ -783,6 +816,41 @@ SimpleDRAM::processNextReqEvent()
     scheduleNextReq();
 }
 
+void
+SimpleDRAM::recordActivate(Tick act_tick)
+{
+    assert(actTicks.size() == activationLimit);
+
+    DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
+
+    // sanity check
+    if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) {
+        panic("Got %d activates in window %d (%d - %d) which is smaller "
+              "than %d\n", activationLimit, act_tick - actTicks.back(),
+              act_tick, actTicks.back(), tXAW);
+    }
+
+    // shift the times used for the book keeping, the last element
+    // (highest index) is the oldest one and hence the lowest value
+    actTicks.pop_back();
+
+    // record an new activation (in the future)
+    actTicks.push_front(act_tick);
+
+    // cannot activate more than X times in time window tXAW, push the
+    // next one (the X + 1'st activate) to be tXAW away from the
+    // oldest in our window of X
+    if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) {
+        DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier "
+                "than %d\n", activationLimit, actTicks.back() + tXAW);
+        for(int i = 0; i < ranksPerChannel; i++)
+            for(int j = 0; j < banksPerRank; j++)
+                // next activate must not happen before end of window
+                banks[i][j].freeAt = std::max(banks[i][j].freeAt,
+                                              actTicks.back() + tXAW);
+    }
+}
+
 void
 SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
 {
@@ -790,9 +858,6 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
     DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
             dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
 
-    assert(curTick() >= prevdramaccess);
-    prevdramaccess = curTick();
-
     // estimate the bank and access latency
     pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick());
     Tick bankLat = lat.first;
@@ -813,14 +878,18 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
         bank.openRow = dram_pkt->row;
         bank.freeAt = curTick() + addDelay + accessLat;
         // If you activated a new row do to this access, the next access
-        // will have to respect tRAS for this bank. Assume tRAS ~= 3 * tRP
-        if (!rowHitFlag)
+        // will have to respect tRAS for this bank. Assume tRAS ~= 3 * tRP.
+        // Also need to account for t_XAW
+        if (!rowHitFlag) {
             bank.tRASDoneAt = bank.freeAt + tRP;
-
+            recordActivate(bank.freeAt - tCL - tRCD); //since this is open page,
+                                                      //no tRP by default
+        }
     } else if (pageMgmt == Enums::close) { // accounting for tRAS also
-        // assuming that tRAS ~= 3 * tRP, and tRAS ~= 4 * tRP, as is common
+        // assuming that tRAS ~= 3 * tRP, and tRC ~= 4 * tRP, as is common
         // (refer Jacob/Ng/Wang and Micron datasheets)
         bank.freeAt = curTick() + addDelay + accessLat + tRP + tRP;
+        recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD); //essentially (freeAt - tRC)
         DPRINTF(DRAM,"doDRAMAccess::bank.freeAt is %lld\n",bank.freeAt);
     } else
         panic("No page management policy chosen\n");
@@ -869,10 +938,10 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
                    curTick();
 
     if (!nextReqEvent.scheduled() && !stopReads){
-        schedule(&nextReqEvent, newTime);
+        schedule(nextReqEvent, newTime);
     } else {
         if (newTime < nextReqEvent.when())
-            reschedule(&nextReqEvent, newTime);
+            reschedule(nextReqEvent, newTime);
     }
 
 
@@ -882,43 +951,38 @@ void
 SimpleDRAM::moveToRespQ()
 {
     // Remove from read queue
-    DRAMPacket* dram_pkt = dramReadQueue.front();
-    dramReadQueue.pop_front();
+    DRAMPacket* dram_pkt = readQueue.front();
+    readQueue.pop_front();
 
     // Insert into response queue sorted by readyTime
     // It will be sent back to the requestor at its
     // readyTime
-    if (dramRespQueue.empty()) {
-        dramRespQueue.push_front(dram_pkt);
+    if (respQueue.empty()) {
+        respQueue.push_front(dram_pkt);
         assert(!respondEvent.scheduled());
         assert(dram_pkt->readyTime >= curTick());
-        schedule(&respondEvent, dram_pkt->readyTime);
+        schedule(respondEvent, dram_pkt->readyTime);
     } else {
         bool done = false;
-        std::list<DRAMPacket*>::iterator i = dramRespQueue.begin();
-        while (!done && i != dramRespQueue.end()) {
+        list<DRAMPacket*>::iterator i = respQueue.begin();
+        while (!done && i != respQueue.end()) {
             if ((*i)->readyTime > dram_pkt->readyTime) {
-                dramRespQueue.insert(i, dram_pkt);
+                respQueue.insert(i, dram_pkt);
                 done = true;
             }
             ++i;
         }
 
         if (!done)
-            dramRespQueue.push_back(dram_pkt);
+            respQueue.push_back(dram_pkt);
 
         assert(respondEvent.scheduled());
 
-        if (dramRespQueue.front()->readyTime < respondEvent.when()) {
-            assert(dramRespQueue.front()->readyTime >= curTick());
-            reschedule(&respondEvent, dramRespQueue.front()->readyTime);
+        if (respQueue.front()->readyTime < respondEvent.when()) {
+            assert(respQueue.front()->readyTime >= curTick());
+            reschedule(respondEvent, respQueue.front()->readyTime);
         }
     }
-
-    if (retryRdReq) {
-         retryRdReq = false;
-         port.sendRetry();
-     }
 }
 
 void
@@ -926,16 +990,20 @@ SimpleDRAM::scheduleNextReq()
 {
     DPRINTF(DRAM, "Reached scheduleNextReq()\n");
 
-    // Figure out which request goes next, and move it to front()
-    if (!chooseNextReq())
-        return;
-
-    doDRAMAccess(dramReadQueue.front());
+    // Figure out which read request goes next, and move it to the
+    // front of the read queue
+    if (!chooseNextRead()) {
+        // In the case there is no read request to go next, see if we
+        // are asked to drain, and if so trigger writes, this also
+        // ensures that if we hit the write limit we will do this
+        // multiple times until we are completely drained
+        if (drainManager && !writeQueue.empty() && !writeEvent.scheduled())
+            triggerWrites();
+    } else {
+        doDRAMAccess(readQueue.front());
+    }
 }
 
-
-
-
 Tick
 SimpleDRAM::maxBankFreeAt() const
 {
@@ -959,7 +1027,7 @@ SimpleDRAM::processRefreshEvent()
         for(int j = 0; j < banksPerRank; j++)
             banks[i][j].freeAt = banksFree;
 
-    schedule(&refreshEvent, curTick() + tREFI);
+    schedule(refreshEvent, curTick() + tREFI);
 }
 
 void
@@ -1084,27 +1152,22 @@ SimpleDRAM::regStats()
     writeRowHitRate = (writeRowHits / writeReqs) * 100;
 
     readPktSize
-        .init(log2(bytesPerCacheLine)+3)
+        .init(ceilLog2(bytesPerCacheLine) + 1)
         .name(name() + ".readPktSize")
         .desc("Categorize read packet sizes");
 
      writePktSize
-        .init(log2(bytesPerCacheLine)+3)
+        .init(ceilLog2(bytesPerCacheLine) + 1)
         .name(name() + ".writePktSize")
-        .desc("categorize write packet sizes");
-
-     neitherPktSize
-        .init(log2(bytesPerCacheLine)+3)
-        .name(name() + ".neitherpktsize")
-        .desc("categorize neither packet sizes");
+        .desc("Categorize write packet sizes");
 
      rdQLenPdf
-        .init(readBufferSize + 1)
+        .init(readBufferSize)
         .name(name() + ".rdQLenPdf")
         .desc("What read queue length does an incoming req see");
 
      wrQLenPdf
-        .init(writeBufferSize + 1)
+        .init(writeBufferSize)
         .name(name() + ".wrQLenPdf")
         .desc("What write queue length does an incoming req see");
 
@@ -1203,10 +1266,20 @@ SimpleDRAM::drain(DrainManager *dm)
 
     // if there is anything in any of our internal queues, keep track
     // of that as well
-    if (!(dramWriteQueue.empty() && dramReadQueue.empty() &&
-          dramRespQueue.empty())) {
+    if (!(writeQueue.empty() && readQueue.empty() &&
+          respQueue.empty())) {
+        DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
+                " resp: %d\n", writeQueue.size(), readQueue.size(),
+                respQueue.size());
         ++count;
         drainManager = dm;
+        // the only part that is not drained automatically over time
+        // is the write queue, thus trigger writes if there are any
+        // waiting and no reads waiting, otherwise wait until the
+        // reads are done
+        if (readQueue.empty() && !writeQueue.empty() &&
+            !writeEvent.scheduled())
+            triggerWrites();
     }
 
     if (count)