backendLatency(p->static_backend_latency),
busBusyUntil(0), writeStartTime(0),
prevArrival(0), numReqs(0),
- numWritesThisTime(0), newTime(0)
+ numWritesThisTime(0), newTime(0),
+ startTickPrechargeAll(0), numBanksActive(0)
{
// create the bank states based on the dimensions of the ranks and
// banks
pktsServicedByWrQ++;
DPRINTF(DRAM, "Read to addr %lld with size %d serviced by "
"write queue\n", addr, size);
- bytesRead += burstSize;
+ bytesReadWrQ += burstSize;
bytesConsumedRd += size;
break;
}
// Actually responds to the requestor
bytesConsumedRd += dram_pkt->size;
- bytesRead += burstSize;
+ bytesReadDRAM += burstSize;
if (dram_pkt->burstHelper) {
// it is a split packet
dram_pkt->burstHelper->burstsServiced++;
DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
+ // Tracking accesses after all banks are precharged.
+ // startTickPrechargeAll: is the tick when all the banks were again
+ // precharged. The difference between act_tick and startTickPrechargeAll
+ // gives the time for which DRAM doesn't get any accesses after refreshing
+ // or after a page is closed in closed-page or open-adaptive-page policy.
+ if ((numBanksActive == 0) && (act_tick > startTickPrechargeAll)) {
+ prechargeAllTime += act_tick - startTickPrechargeAll;
+ }
+
+ // No need to update number of active banks for closed-page policy as only 1
+ // bank will be activated at any given point, which will be instatntly
+ // precharged
+ if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive)
+ ++numBanksActive;
+
// start by enforcing tRRD
for(int i = 0; i < banksPerRank; i++) {
// next activate must not happen before tRRD
if (!got_more_hits && got_bank_conflict) {
bank.openRow = -1;
bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP;
+ --numBanksActive;
+ if (numBanksActive == 0) {
+ startTickPrechargeAll = std::max(startTickPrechargeAll,
+ bank.freeAt);
+ DPRINTF(DRAM, "All banks precharged at tick: %ld\n",
+ startTickPrechargeAll);
+ }
+ DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
}
}
actTick + tRCD + tCL + tRP);
DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt);
bytesPerActivate.sample(burstSize);
+ startTickPrechargeAll = std::max(startTickPrechargeAll, bank.freeAt);
} else
panic("No page management policy chosen\n");
Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC;
for(int i = 0; i < ranksPerChannel; i++)
- for(int j = 0; j < banksPerRank; j++)
+ for(int j = 0; j < banksPerRank; j++) {
banks[i][j].freeAt = banksFree;
+ banks[i][j].openRow = -1;
+ }
+
+ // updating startTickPrechargeAll, isprechargeAll
+ numBanksActive = 0;
+ startTickPrechargeAll = banksFree;
schedule(refreshEvent, curTick() + tREFI);
}
.desc("Bytes accessed per row activation")
.flags(nozero);
- bytesRead
- .name(name() + ".bytesRead")
- .desc("Total number of bytes read from memory");
+ bytesReadDRAM
+ .name(name() + ".bytesReadDRAM")
+ .desc("Total number of bytes read from DRAM");
+
+ bytesReadWrQ
+ .name(name() + ".bytesReadWrQ")
+ .desc("Total number of bytes read from write queue");
bytesWritten
.name(name() + ".bytesWritten")
.desc("Average achieved read bandwidth in MB/s")
.precision(2);
- avgRdBW = (bytesRead / 1000000) / simSeconds;
+ avgRdBW = ((bytesReadDRAM + bytesReadWrQ) / 1000000) / simSeconds;
avgWrBW
.name(name() + ".avgWrBW")
.precision(2);
avgGap = totGap / (readReqs + writeReqs);
+
+ // Stats for DRAM Power calculation based on Micron datasheet
+ busUtilRead
+ .name(name() + ".busUtilRead")
+ .desc("Data bus utilization in percentage for reads")
+ .precision(2);
+
+ busUtilRead = avgRdBW / peakBW * 100;
+
+ busUtilWrite
+ .name(name() + ".busUtilWrite")
+ .desc("Data bus utilization in percentage for writes")
+ .precision(2);
+
+ busUtilWrite = avgWrBW / peakBW * 100;
+
+ pageHitRate
+ .name(name() + ".pageHitRate")
+ .desc("Row buffer hit rate, read and write combined")
+ .precision(2);
+
+ pageHitRate = (writeRowHits + readRowHits) / (writeReqs + readReqs -
+ servicedByWrQ) * 100;
+
+ prechargeAllPercent
+ .name(name() + ".prechargeAllPercent")
+ .desc("Percentage of time for which DRAM has all the banks in "
+ "precharge state")
+ .precision(2);
+
+ prechargeAllPercent = prechargeAllTime / simTicks * 100;
}
void
Stats::Scalar writeReqs;
Stats::Scalar readBursts;
Stats::Scalar writeBursts;
- Stats::Scalar bytesRead;
+ Stats::Scalar bytesReadDRAM;
+ Stats::Scalar bytesReadWrQ;
Stats::Scalar bytesWritten;
Stats::Scalar bytesConsumedRd;
Stats::Scalar bytesConsumedWr;
Stats::Formula avgConsumedWrBW;
Stats::Formula peakBW;
Stats::Formula busUtil;
+ Stats::Formula busUtilRead;
+ Stats::Formula busUtilWrite;
// Average queue lengths
Stats::Average avgRdQLen;
Stats::Formula writeRowHitRate;
Stats::Formula avgGap;
+ // DRAM Power Calculation
+ Stats::Formula pageHitRate;
+ Stats::Formula prechargeAllPercent;
+ Stats::Scalar prechargeAllTime;
+
+ // To track number of cycles all the banks are precharged
+ Tick startTickPrechargeAll;
+ // To track number of banks which are currently active
+ unsigned int numBanksActive;
+
/** @todo this is a temporary workaround until the 4-phase code is
* committed. upstream caches needs this packet until true is returned, so
* hold onto it for deletion until a subsequent call