* Authors: Andreas Hansson
* Ani Udipi
* Neha Agarwal
+ * Omar Naji
*/
#include "base/bitfield.hh"
port(name() + ".port", *this),
retryRdReq(false), retryWrReq(false),
busState(READ),
- nextReqEvent(this), respondEvent(this), activateEvent(this),
- prechargeEvent(this), refreshEvent(this), powerEvent(this),
+ nextReqEvent(this), respondEvent(this),
drainManager(NULL),
deviceSize(p->device_size),
deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
maxAccessesPerRow(p->max_accesses_per_row),
frontendLatency(p->static_frontend_latency),
backendLatency(p->static_backend_latency),
- busBusyUntil(0), refreshDueAt(0), refreshState(REF_IDLE),
- pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), prevArrival(0),
- nextReqTime(0), pwrStateTick(0), numBanksActive(0),
- activeRank(0), timeStampOffset(0)
+ busBusyUntil(0), prevArrival(0),
+ nextReqTime(0), activeRank(0), timeStampOffset(0)
{
- // create the bank states based on the dimensions of the ranks and
- // banks
- banks.resize(ranksPerChannel);
-
- //create list of drampower objects. For each rank 1 drampower instance.
for (int i = 0; i < ranksPerChannel; i++) {
- DRAMPower drampower = DRAMPower(p, false);
- rankPower.emplace_back(drampower);
- }
+ Rank* rank = new Rank(*this, p);
+ ranks.push_back(rank);
- actTicks.resize(ranksPerChannel);
- for (size_t c = 0; c < ranksPerChannel; ++c) {
- banks[c].resize(banksPerRank);
- actTicks[c].resize(activationLimit, 0);
- }
+ rank->actTicks.resize(activationLimit, 0);
+ rank->banks.resize(banksPerRank);
+ rank->rank = i;
- // set the bank indices
- for (int r = 0; r < ranksPerChannel; r++) {
for (int b = 0; b < banksPerRank; b++) {
- banks[r][b].rank = r;
- banks[r][b].bank = b;
+ rank->banks[b].bank = b;
// GDDR addressing of banks to BG is linear.
// Here we assume that all DRAM generations address bank groups as
// follows:
// banks 1,5,9,13 are in bank group 1
// banks 2,6,10,14 are in bank group 2
// banks 3,7,11,15 are in bank group 3
- banks[r][b].bankgr = b % bankGroupsPerRank;
+ rank->banks[b].bankgr = b % bankGroupsPerRank;
} else {
// No bank groups; simply assign to bank number
- banks[r][b].bankgr = b;
+ rank->banks[b].bankgr = b;
}
}
}
{
// timestamp offset should be in clock cycles for DRAMPower
timeStampOffset = divCeil(curTick(), tCK);
+
// update the start tick for the precharge accounting to the
// current tick
- pwrStateTick = curTick();
+ for (auto r : ranks) {
+ r->startup(curTick() + tREFI - tRP);
+ }
// shift the bus busy time sufficiently far ahead that we never
// have to worry about negative values when computing the time for
// the next request, this will add an insignificant bubble at the
// start of simulation
busBusyUntil = curTick() + tRP + tRCD + tCL;
-
- // kick off the refresh, and give ourselves enough time to
- // precharge
- schedule(refreshEvent, curTick() + tREFI - tRP);
}
Tick
// later
uint16_t bank_id = banksPerRank * rank + bank;
return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
- size, banks[rank][bank]);
+ size, ranks[rank]->banks[bank], *ranks[rank]);
}
void
}
}
-void
+bool
DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
{
// This method does the arbitration between requests. The chosen
// FCFS, this method does nothing
assert(!queue.empty());
+ // bool to indicate if a packet to an available rank is found
+ bool found_packet = false;
if (queue.size() == 1) {
- DPRINTF(DRAM, "Single request, nothing to do\n");
- return;
+ DRAMPacket* dram_pkt = queue.front();
+ // available rank corresponds to state refresh idle
+ if (ranks[dram_pkt->rank]->isAvailable()) {
+ found_packet = true;
+ DPRINTF(DRAM, "Single request, going to a free rank\n");
+ } else {
+ DPRINTF(DRAM, "Single request, going to a busy rank\n");
+ }
+ return found_packet;
}
if (memSchedPolicy == Enums::fcfs) {
- // Do nothing, since the correct request is already head
+ // check if there is a packet going to a free rank
+ for(auto i = queue.begin(); i != queue.end() ; ++i) {
+ DRAMPacket* dram_pkt = *i;
+ if (ranks[dram_pkt->rank]->isAvailable()) {
+ queue.erase(i);
+ queue.push_front(dram_pkt);
+ found_packet = true;
+ break;
+ }
+ }
} else if (memSchedPolicy == Enums::frfcfs) {
- reorderQueue(queue, switched_cmd_type);
+ found_packet = reorderQueue(queue, switched_cmd_type);
} else
panic("No scheduling policy chosen\n");
+ return found_packet;
}
-void
+bool
DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
{
// Only determine this when needed
// Search for row hits first, if no row hit is found then schedule the
// packet to one of the earliest banks available
+ bool found_packet = false;
bool found_earliest_pkt = false;
bool found_prepped_diff_rank_pkt = false;
- auto selected_pkt_it = queue.begin();
+ auto selected_pkt_it = queue.end();
for (auto i = queue.begin(); i != queue.end() ; ++i) {
DRAMPacket* dram_pkt = *i;
const Bank& bank = dram_pkt->bankRef;
+ // check if rank is busy. If this is the case jump to the next packet
// Check if it is a row hit
- if (bank.openRow == dram_pkt->row) {
- if (dram_pkt->rank == activeRank || switched_cmd_type) {
- // FCFS within the hits, giving priority to commands
- // that access the same rank as the previous burst
- // to minimize bus turnaround delays
- // Only give rank prioity when command type is not changing
- DPRINTF(DRAM, "Row buffer hit\n");
- selected_pkt_it = i;
- break;
- } else if (!found_prepped_diff_rank_pkt) {
- // found row hit for command on different rank than prev burst
- selected_pkt_it = i;
- found_prepped_diff_rank_pkt = true;
- }
- } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) {
- // No row hit and
- // haven't found an entry with a row hit to a new rank
- if (earliest_banks == 0)
- // Determine entries with earliest bank prep delay
- // Function will give priority to commands that access the
- // same rank as previous burst and can prep the bank seamlessly
- earliest_banks = minBankPrep(queue, switched_cmd_type);
-
- // FCFS - Bank is first available bank
- if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
- // Remember the packet to be scheduled to one of the earliest
- // banks available, FCFS amongst the earliest banks
- selected_pkt_it = i;
- found_earliest_pkt = true;
+ if (dram_pkt->rankRef.isAvailable()) {
+ if (bank.openRow == dram_pkt->row) {
+ if (dram_pkt->rank == activeRank || switched_cmd_type) {
+ // FCFS within the hits, giving priority to commands
+ // that access the same rank as the previous burst
+ // to minimize bus turnaround delays
+ // Only give rank prioity when command type is
+ // not changing
+ DPRINTF(DRAM, "Row buffer hit\n");
+ selected_pkt_it = i;
+ break;
+ } else if (!found_prepped_diff_rank_pkt) {
+ // found row hit for command on different rank
+ // than prev burst
+ selected_pkt_it = i;
+ found_prepped_diff_rank_pkt = true;
+ }
+ } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) {
+ // packet going to a rank which is currently not waiting for a
+ // refresh, No row hit and
+ // haven't found an entry with a row hit to a new rank
+ if (earliest_banks == 0)
+ // Determine entries with earliest bank prep delay
+ // Function will give priority to commands that access the
+ // same rank as previous burst and can prep
+ // the bank seamlessly
+ earliest_banks = minBankPrep(queue, switched_cmd_type);
+
+ // FCFS - Bank is first available bank
+ if (bits(earliest_banks, dram_pkt->bankId,
+ dram_pkt->bankId)) {
+ // Remember the packet to be scheduled to one of
+ // the earliest banks available, FCFS amongst the
+ // earliest banks
+ selected_pkt_it = i;
+ //if the packet found is going to a rank that is currently
+ //not busy then update the found_packet to true
+ found_earliest_pkt = true;
+ }
}
}
}
- DRAMPacket* selected_pkt = *selected_pkt_it;
- queue.erase(selected_pkt_it);
- queue.push_front(selected_pkt);
+ if (selected_pkt_it != queue.end()) {
+ DRAMPacket* selected_pkt = *selected_pkt_it;
+ queue.erase(selected_pkt_it);
+ queue.push_front(selected_pkt);
+ found_packet = true;
+ }
+ return found_packet;
}
void
}
void
-DRAMCtrl::activateBank(Bank& bank, Tick act_tick, uint32_t row)
+DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref,
+ Tick act_tick, uint32_t row)
{
- // get the rank index from the bank
- uint8_t rank = bank.rank;
-
- assert(actTicks[rank].size() == activationLimit);
+ assert(rank_ref.actTicks.size() == activationLimit);
DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
// update the open row
- assert(bank.openRow == Bank::NO_ROW);
- bank.openRow = row;
+ assert(bank_ref.openRow == Bank::NO_ROW);
+ bank_ref.openRow = row;
// start counting anew, this covers both the case when we
// auto-precharged, and when this access is forced to
// precharge
- bank.bytesAccessed = 0;
- bank.rowAccesses = 0;
+ bank_ref.bytesAccessed = 0;
+ bank_ref.rowAccesses = 0;
- ++numBanksActive;
- assert(numBanksActive <= banksPerRank * ranksPerChannel);
+ ++rank_ref.numBanksActive;
+ assert(rank_ref.numBanksActive <= banksPerRank);
DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
- bank.bank, bank.rank, act_tick, numBanksActive);
+ bank_ref.bank, rank_ref.rank, act_tick,
+ ranks[rank_ref.rank]->numBanksActive);
- rankPower[bank.rank].powerlib.doCommand(MemCommand::ACT, bank.bank,
- divCeil(act_tick, tCK) -
- timeStampOffset);
+ rank_ref.power.powerlib.doCommand(MemCommand::ACT, bank_ref.bank,
+ divCeil(act_tick, tCK) -
+ timeStampOffset);
DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) -
- timeStampOffset, bank.bank, bank.rank);
+ timeStampOffset, bank_ref.bank, rank_ref.rank);
// The next access has to respect tRAS for this bank
- bank.preAllowedAt = act_tick + tRAS;
+ bank_ref.preAllowedAt = act_tick + tRAS;
// Respect the row-to-column command delay
- bank.colAllowedAt = std::max(act_tick + tRCD, bank.colAllowedAt);
+ bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt);
// start by enforcing tRRD
for(int i = 0; i < banksPerRank; i++) {
// next activate to any bank in this rank must not happen
// before tRRD
- if (bankGroupArch && (bank.bankgr == banks[rank][i].bankgr)) {
+ if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) {
// bank group architecture requires longer delays between
// ACT commands within the same bank group. Use tRRD_L
// in this case
- banks[rank][i].actAllowedAt = std::max(act_tick + tRRD_L,
- banks[rank][i].actAllowedAt);
+ rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L,
+ rank_ref.banks[i].actAllowedAt);
} else {
// use shorter tRRD value when either
// 1) bank group architecture is not supportted
// 2) bank is in a different bank group
- banks[rank][i].actAllowedAt = std::max(act_tick + tRRD,
- banks[rank][i].actAllowedAt);
+ rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD,
+ rank_ref.banks[i].actAllowedAt);
}
}
// next, we deal with tXAW, if the activation limit is disabled
// then we directly schedule an activate power event
- if (!actTicks[rank].empty()) {
+ if (!rank_ref.actTicks.empty()) {
// sanity check
- if (actTicks[rank].back() &&
- (act_tick - actTicks[rank].back()) < tXAW) {
+ if (rank_ref.actTicks.back() &&
+ (act_tick - rank_ref.actTicks.back()) < tXAW) {
panic("Got %d activates in window %d (%llu - %llu) which "
"is smaller than %llu\n", activationLimit, act_tick -
- actTicks[rank].back(), act_tick, actTicks[rank].back(),
- tXAW);
+ rank_ref.actTicks.back(), act_tick,
+ rank_ref.actTicks.back(), tXAW);
}
// shift the times used for the book keeping, the last element
// (highest index) is the oldest one and hence the lowest value
- actTicks[rank].pop_back();
+ rank_ref.actTicks.pop_back();
// record an new activation (in the future)
- actTicks[rank].push_front(act_tick);
+ rank_ref.actTicks.push_front(act_tick);
// cannot activate more than X times in time window tXAW, push the
// next one (the X + 1'st activate) to be tXAW away from the
// oldest in our window of X
- if (actTicks[rank].back() &&
- (act_tick - actTicks[rank].back()) < tXAW) {
+ if (rank_ref.actTicks.back() &&
+ (act_tick - rank_ref.actTicks.back()) < tXAW) {
DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate "
"no earlier than %llu\n", activationLimit,
- actTicks[rank].back() + tXAW);
+ rank_ref.actTicks.back() + tXAW);
for(int j = 0; j < banksPerRank; j++)
// next activate must not happen before end of window
- banks[rank][j].actAllowedAt =
- std::max(actTicks[rank].back() + tXAW,
- banks[rank][j].actAllowedAt);
+ rank_ref.banks[j].actAllowedAt =
+ std::max(rank_ref.actTicks.back() + tXAW,
+ rank_ref.banks[j].actAllowedAt);
}
}
// at the point when this activate takes place, make sure we
// transition to the active power state
- if (!activateEvent.scheduled())
- schedule(activateEvent, act_tick);
- else if (activateEvent.when() > act_tick)
+ if (!rank_ref.activateEvent.scheduled())
+ schedule(rank_ref.activateEvent, act_tick);
+ else if (rank_ref.activateEvent.when() > act_tick)
// move it sooner in time
- reschedule(activateEvent, act_tick);
+ reschedule(rank_ref.activateEvent, act_tick);
}
void
-DRAMCtrl::processActivateEvent()
-{
- // we should transition to the active state as soon as any bank is active
- if (pwrState != PWR_ACT)
- // note that at this point numBanksActive could be back at
- // zero again due to a precharge scheduled in the future
- schedulePowerEvent(PWR_ACT, curTick());
-}
-
-void
-DRAMCtrl::prechargeBank(Bank& bank, Tick pre_at, bool trace)
+DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace)
{
// make sure the bank has an open row
assert(bank.openRow != Bank::NO_ROW);
bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at);
- assert(numBanksActive != 0);
- --numBanksActive;
+ assert(rank_ref.numBanksActive != 0);
+ --rank_ref.numBanksActive;
DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got "
- "%d active\n", bank.bank, bank.rank, pre_at, numBanksActive);
+ "%d active\n", bank.bank, rank_ref.rank, pre_at,
+ rank_ref.numBanksActive);
if (trace) {
- rankPower[bank.rank].powerlib.doCommand(MemCommand::PRE, bank.bank,
+ rank_ref.power.powerlib.doCommand(MemCommand::PRE, bank.bank,
divCeil(pre_at, tCK) -
timeStampOffset);
DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) -
- timeStampOffset, bank.bank, bank.rank);
+ timeStampOffset, bank.bank, rank_ref.rank);
}
// if we look at the current number of active banks we might be
// tempted to think the DRAM is now idle, however this can be
// would have reached the idle state, so schedule an event and
// rather check once we actually make it to the point in time when
// the (last) precharge takes place
- if (!prechargeEvent.scheduled())
- schedule(prechargeEvent, pre_done_at);
- else if (prechargeEvent.when() < pre_done_at)
- reschedule(prechargeEvent, pre_done_at);
-}
-
-void
-DRAMCtrl::processPrechargeEvent()
-{
- // if we reached zero, then special conditions apply as we track
- // if all banks are precharged for the power models
- if (numBanksActive == 0) {
- // we should transition to the idle state when the last bank
- // is precharged
- schedulePowerEvent(PWR_IDLE, curTick());
- }
+ if (!rank_ref.prechargeEvent.scheduled())
+ schedule(rank_ref.prechargeEvent, pre_done_at);
+ else if (rank_ref.prechargeEvent.when() < pre_done_at)
+ reschedule(rank_ref.prechargeEvent, pre_done_at);
}
void
DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
+ // get the rank
+ Rank& rank = dram_pkt->rankRef;
+
// get the bank
Bank& bank = dram_pkt->bankRef;
// If there is a page open, precharge it.
if (bank.openRow != Bank::NO_ROW) {
- prechargeBank(bank, std::max(bank.preAllowedAt, curTick()));
+ prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick()));
}
// next we need to account for the delay in activating the
// Record the activation and deal with all the global timing
// constraints caused be a new activation (tRRD and tXAW)
- activateBank(bank, act_tick, dram_pkt->row);
+ activateBank(rank, bank, act_tick, dram_pkt->row);
// issue the command as early as possible
cmd_at = bank.colAllowedAt;
// before tCCD_L. Different bank group timing requirement is
// tBURST; Add tCS for different ranks
if (dram_pkt->rank == j) {
- if (bankGroupArch && (bank.bankgr == banks[j][i].bankgr)) {
+ if (bankGroupArch &&
+ (bank.bankgr == ranks[j]->banks[i].bankgr)) {
// bank group architecture requires longer delays between
// RD/WR burst commands to the same bank group.
// Use tCCD_L in this case
// Add tCS to account for rank-to-rank bus delay requirements
cmd_dly = tBURST + tCS;
}
- banks[j][i].colAllowedAt = std::max(cmd_at + cmd_dly,
- banks[j][i].colAllowedAt);
+ ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly,
+ ranks[j]->banks[i].colAllowedAt);
}
}
if (auto_precharge) {
// if auto-precharge push a PRE command at the correct tick to the
// list used by DRAMPower library to calculate power
- prechargeBank(bank, std::max(curTick(), bank.preAllowedAt));
+ prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt));
DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
}
DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n",
dram_pkt->addr, dram_pkt->readyTime, busBusyUntil);
- rankPower[dram_pkt->rank].powerlib.doCommand(command, dram_pkt->bank,
+ dram_pkt->rankRef.power.powerlib.doCommand(command, dram_pkt->bank,
divCeil(cmd_at, tCK) -
timeStampOffset);
void
DRAMCtrl::processNextReqEvent()
{
+ int busyRanks = 0;
+ for (auto r : ranks) {
+ if (!r->isAvailable()) {
+ // rank is busy refreshing
+ busyRanks++;
+
+ // let the rank know that if it was waiting to drain, it
+ // is now done and ready to proceed
+ r->checkDrainDone();
+ }
+ }
+
+ if (busyRanks == ranksPerChannel) {
+ // if all ranks are refreshing wait for them to finish
+ // and stall this state machine without taking any further
+ // action, and do not schedule a new nextReqEvent
+ return;
+ }
+
// pre-emptively set to false. Overwrite if in READ_TO_WRITE
// or WRITE_TO_READ state
bool switched_cmd_type = false;
switched_cmd_type = true;
}
- if (refreshState != REF_IDLE) {
- // if a refresh waiting for this event loop to finish, then hand
- // over now, and do not schedule a new nextReqEvent
- if (refreshState == REF_DRAIN) {
- DPRINTF(DRAM, "Refresh drain done, now precharging\n");
-
- refreshState = REF_PRE;
-
- // hand control back to the refresh event loop
- schedule(refreshEvent, curTick());
- }
-
- // let the refresh finish before issuing any further requests
- return;
- }
-
// when we get here it is either a read or a write
if (busState == READ) {
return;
}
} else {
+ // bool to check if there is a read to a free rank
+ bool found_read = false;
+
// Figure out which read request goes next, and move it to the
// front of the read queue
- chooseNext(readQueue, switched_cmd_type);
+ found_read = chooseNext(readQueue, switched_cmd_type);
+
+ // if no read to an available rank is found then return
+ // at this point. There could be writes to the available ranks
+ // which are above the required threshold. However, to
+ // avoid adding more complexity to the code, return and wait
+ // for a refresh event to kick things into action again.
+ if (!found_read)
+ return;
DRAMPacket* dram_pkt = readQueue.front();
-
+ assert(dram_pkt->rankRef.isAvailable());
// here we get a bit creative and shift the bus busy time not
// just the tWTR, but also a CAS latency to capture the fact
// that we are allowed to prepare a new bank, but not issue a
busState = READ_TO_WRITE;
}
} else {
- chooseNext(writeQueue, switched_cmd_type);
+ // bool to check if write to free rank is found
+ bool found_write = false;
+
+ found_write = chooseNext(writeQueue, switched_cmd_type);
+
+ // if no writes to an available rank are found then return.
+ // There could be reads to the available ranks. However, to avoid
+ // adding more complexity to the code, return at this point and wait
+ // for a refresh event to kick things into action again.
+ if (!found_write)
+ return;
+
DRAMPacket* dram_pkt = writeQueue.front();
+ assert(dram_pkt->rankRef.isAvailable());
// sanity check
assert(dram_pkt->size <= burstSize);
// nothing to do
}
}
-
- schedule(nextReqEvent, std::max(nextReqTime, curTick()));
+ // It is possible that a refresh to another rank kicks things back into
+ // action before reaching this point.
+ if (!nextReqEvent.scheduled())
+ schedule(nextReqEvent, std::max(nextReqTime, curTick()));
// If there is space available and we have writes waiting then let
// them retry. This is done here to ensure that the retry does not
// determine if we have queued transactions targetting the
// bank in question
vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
- for (auto p = queue.begin(); p != queue.end(); ++p) {
- got_waiting[(*p)->bankId] = true;
+ for (const auto& p : queue) {
+ if(p->rankRef.isAvailable())
+ got_waiting[p->bankId] = true;
}
for (int i = 0; i < ranksPerChannel; i++) {
for (int j = 0; j < banksPerRank; j++) {
- uint8_t bank_id = i * banksPerRank + j;
+ uint16_t bank_id = i * banksPerRank + j;
// if we have waiting requests for the bank, and it is
// amongst the first available, update the mask
if (got_waiting[bank_id]) {
+ // make sure this rank is not currently refreshing.
+ assert(ranks[i]->isAvailable());
// simplistic approximation of when the bank can issue
// an activate, ignoring any rank-to-rank switching
// cost in this calculation
- Tick act_at = banks[i][j].openRow == Bank::NO_ROW ?
- banks[i][j].actAllowedAt :
- std::max(banks[i][j].preAllowedAt, curTick()) + tRP;
+ Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ?
+ ranks[i]->banks[j].actAllowedAt :
+ std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP;
// prioritize commands that access the
// same rank as previous burst
return bank_mask;
}
+DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p)
+ : EventManager(&_memory), memory(_memory),
+ pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), pwrStateTick(0),
+ refreshState(REF_IDLE), refreshDueAt(0),
+ power(_p, false), numBanksActive(0),
+ activateEvent(*this), prechargeEvent(*this),
+ refreshEvent(*this), powerEvent(*this)
+{ }
+
+void
+DRAMCtrl::Rank::startup(Tick ref_tick)
+{
+ assert(ref_tick > curTick());
+
+ pwrStateTick = curTick();
+
+ // kick off the refresh, and give ourselves enough time to
+ // precharge
+ schedule(refreshEvent, ref_tick);
+}
+
+void
+DRAMCtrl::Rank::checkDrainDone()
+{
+ // if this rank was waiting to drain it is now able to proceed to
+ // precharge
+ if (refreshState == REF_DRAIN) {
+ DPRINTF(DRAM, "Refresh drain done, now precharging\n");
+
+ refreshState = REF_PRE;
+
+ // hand control back to the refresh event loop
+ schedule(refreshEvent, curTick());
+ }
+}
+
+void
+DRAMCtrl::Rank::processActivateEvent()
+{
+ // we should transition to the active state as soon as any bank is active
+ if (pwrState != PWR_ACT)
+ // note that at this point numBanksActive could be back at
+ // zero again due to a precharge scheduled in the future
+ schedulePowerEvent(PWR_ACT, curTick());
+}
+
+void
+DRAMCtrl::Rank::processPrechargeEvent()
+{
+ // if we reached zero, then special conditions apply as we track
+ // if all banks are precharged for the power models
+ if (numBanksActive == 0) {
+ // we should transition to the idle state when the last bank
+ // is precharged
+ schedulePowerEvent(PWR_IDLE, curTick());
+ }
+}
+
void
-DRAMCtrl::processRefreshEvent()
+DRAMCtrl::Rank::processRefreshEvent()
{
// when first preparing the refresh, remember when it was due
if (refreshState == REF_IDLE) {
DPRINTF(DRAM, "Refresh due\n");
}
- // let any scheduled read or write go ahead, after which it will
+ // let any scheduled read or write to the same rank go ahead,
+ // after which it will
// hand control back to this event loop
if (refreshState == REF_DRAIN) {
- if (nextReqEvent.scheduled()) {
+ // if a request is at the moment being handled and this request is
+ // accessing the current rank then wait for it to finish
+ if ((rank == memory.activeRank)
+ && (memory.nextReqEvent.scheduled())) {
// hand control over to the request loop until it is
// evaluated next
DPRINTF(DRAM, "Refresh awaiting draining\n");
// first determine when we can precharge
Tick pre_at = curTick();
- for (int i = 0; i < ranksPerChannel; i++) {
- for (int j = 0; j < banksPerRank; j++) {
- // respect both causality and any existing bank
- // constraints, some banks could already have a
- // (auto) precharge scheduled
- pre_at = std::max(banks[i][j].preAllowedAt, pre_at);
- }
+
+ for (auto &b : banks) {
+ // respect both causality and any existing bank
+ // constraints, some banks could already have a
+ // (auto) precharge scheduled
+ pre_at = std::max(b.preAllowedAt, pre_at);
}
- // make sure all banks are precharged, and for those that
+ // make sure all banks per rank are precharged, and for those that
// already are, update their availability
- Tick act_allowed_at = pre_at + tRP;
-
- for (int i = 0; i < ranksPerChannel; i++) {
- for (int j = 0; j < banksPerRank; j++) {
- if (banks[i][j].openRow != Bank::NO_ROW) {
- prechargeBank(banks[i][j], pre_at, false);
- } else {
- banks[i][j].actAllowedAt =
- std::max(banks[i][j].actAllowedAt, act_allowed_at);
- banks[i][j].preAllowedAt =
- std::max(banks[i][j].preAllowedAt, pre_at);
- }
+ Tick act_allowed_at = pre_at + memory.tRP;
+
+ for (auto &b : banks) {
+ if (b.openRow != Bank::NO_ROW) {
+ memory.prechargeBank(*this, b, pre_at, false);
+ } else {
+ b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at);
+ b.preAllowedAt = std::max(b.preAllowedAt, pre_at);
}
+ }
- // at the moment this affects all ranks
- rankPower[i].powerlib.doCommand(MemCommand::PREA, 0,
- divCeil(pre_at, tCK) -
- timeStampOffset);
+ // precharge all banks in rank
+ power.powerlib.doCommand(MemCommand::PREA, 0,
+ divCeil(pre_at, memory.tCK) -
+ memory.timeStampOffset);
- DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", divCeil(pre_at, tCK) -
- timeStampOffset, i);
- }
+ DPRINTF(DRAMPower, "%llu,PREA,0,%d\n",
+ divCeil(pre_at, memory.tCK) -
+ memory.timeStampOffset, rank);
} else {
DPRINTF(DRAM, "All banks already precharged, starting refresh\n");
assert(numBanksActive == 0);
assert(pwrState == PWR_REF);
- Tick ref_done_at = curTick() + tRFC;
+ Tick ref_done_at = curTick() + memory.tRFC;
- for (int i = 0; i < ranksPerChannel; i++) {
- for (int j = 0; j < banksPerRank; j++) {
- banks[i][j].actAllowedAt = ref_done_at;
- }
-
- // at the moment this affects all ranks
- rankPower[i].powerlib.doCommand(MemCommand::REF, 0,
- divCeil(curTick(), tCK) -
- timeStampOffset);
-
- // at the moment sort the list of commands and update the counters
- // for DRAMPower libray when doing a refresh
- sort(rankPower[i].powerlib.cmdList.begin(),
- rankPower[i].powerlib.cmdList.end(), DRAMCtrl::sortTime);
-
- // update the counters for DRAMPower, passing false to
- // indicate that this is not the last command in the
- // list. DRAMPower requires this information for the
- // correct calculation of the background energy at the end
- // of the simulation. Ideally we would want to call this
- // function with true once at the end of the
- // simulation. However, the discarded energy is extremly
- // small and does not effect the final results.
- rankPower[i].powerlib.updateCounters(false);
-
- // call the energy function
- rankPower[i].powerlib.calcEnergy();
-
- // Update the stats
- updatePowerStats(i);
-
- DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), tCK) -
- timeStampOffset, i);
+ for (auto &b : banks) {
+ b.actAllowedAt = ref_done_at;
}
+ // at the moment this affects all ranks
+ power.powerlib.doCommand(MemCommand::REF, 0,
+ divCeil(curTick(), memory.tCK) -
+ memory.timeStampOffset);
+
+ // at the moment sort the list of commands and update the counters
+ // for DRAMPower libray when doing a refresh
+ sort(power.powerlib.cmdList.begin(),
+ power.powerlib.cmdList.end(), DRAMCtrl::sortTime);
+
+ // update the counters for DRAMPower, passing false to
+ // indicate that this is not the last command in the
+ // list. DRAMPower requires this information for the
+ // correct calculation of the background energy at the end
+ // of the simulation. Ideally we would want to call this
+ // function with true once at the end of the
+ // simulation. However, the discarded energy is extremly
+ // small and does not effect the final results.
+ power.powerlib.updateCounters(false);
+
+ // call the energy function
+ power.powerlib.calcEnergy();
+
+ // Update the stats
+ updatePowerStats();
+
+ DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) -
+ memory.timeStampOffset, rank);
+
// make sure we did not wait so long that we cannot make up
// for it
- if (refreshDueAt + tREFI < ref_done_at) {
+ if (refreshDueAt + memory.tREFI < ref_done_at) {
fatal("Refresh was delayed so long we cannot catch up\n");
}
// compensate for the delay in actually performing the refresh
// when scheduling the next one
- schedule(refreshEvent, refreshDueAt + tREFI - tRP);
+ schedule(refreshEvent, refreshDueAt + memory.tREFI - memory.tRP);
assert(!powerEvent.scheduled());
schedulePowerEvent(PWR_IDLE, ref_done_at);
DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n",
- ref_done_at, refreshDueAt + tREFI);
+ ref_done_at, refreshDueAt + memory.tREFI);
}
}
void
-DRAMCtrl::schedulePowerEvent(PowerState pwr_state, Tick tick)
+DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick)
{
// respect causality
assert(tick >= curTick());
}
void
-DRAMCtrl::processPowerEvent()
+DRAMCtrl::Rank::processPowerEvent()
{
// remember where we were, and for how long
Tick duration = curTick() - pwrStateTick;
// kick things into action again
refreshState = REF_IDLE;
- assert(!nextReqEvent.scheduled());
- schedule(nextReqEvent, curTick());
+ // a request event could be already scheduled by the state
+ // machine of the other rank
+ if (!memory.nextReqEvent.scheduled())
+ schedule(memory.nextReqEvent, curTick());
} else {
assert(prev_state == PWR_ACT);
}
void
-DRAMCtrl::updatePowerStats(uint8_t rank)
+DRAMCtrl::Rank::updatePowerStats()
{
// Get the energy and power from DRAMPower
Data::MemoryPowerModel::Energy energy =
- rankPower[rank].powerlib.getEnergy();
- Data::MemoryPowerModel::Power power =
- rankPower[rank].powerlib.getPower();
-
- actEnergy[rank] = energy.act_energy * devicesPerRank;
- preEnergy[rank] = energy.pre_energy * devicesPerRank;
- readEnergy[rank] = energy.read_energy * devicesPerRank;
- writeEnergy[rank] = energy.write_energy * devicesPerRank;
- refreshEnergy[rank] = energy.ref_energy * devicesPerRank;
- actBackEnergy[rank] = energy.act_stdby_energy * devicesPerRank;
- preBackEnergy[rank] = energy.pre_stdby_energy * devicesPerRank;
- totalEnergy[rank] = energy.total_energy * devicesPerRank;
- averagePower[rank] = power.average_power * devicesPerRank;
+ power.powerlib.getEnergy();
+ Data::MemoryPowerModel::Power rank_power =
+ power.powerlib.getPower();
+
+ actEnergy = energy.act_energy * memory.devicesPerRank;
+ preEnergy = energy.pre_energy * memory.devicesPerRank;
+ readEnergy = energy.read_energy * memory.devicesPerRank;
+ writeEnergy = energy.write_energy * memory.devicesPerRank;
+ refreshEnergy = energy.ref_energy * memory.devicesPerRank;
+ actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank;
+ preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank;
+ totalEnergy = energy.total_energy * memory.devicesPerRank;
+ averagePower = rank_power.average_power * memory.devicesPerRank;
}
+void
+DRAMCtrl::Rank::regStats()
+{
+ using namespace Stats;
+
+ pwrStateTime
+ .init(5)
+ .name(name() + ".memoryStateTime")
+ .desc("Time in different power states");
+ pwrStateTime.subname(0, "IDLE");
+ pwrStateTime.subname(1, "REF");
+ pwrStateTime.subname(2, "PRE_PDN");
+ pwrStateTime.subname(3, "ACT");
+ pwrStateTime.subname(4, "ACT_PDN");
+
+ actEnergy
+ .name(name() + ".actEnergy")
+ .desc("Energy for activate commands per rank (pJ)");
+
+ preEnergy
+ .name(name() + ".preEnergy")
+ .desc("Energy for precharge commands per rank (pJ)");
+
+ readEnergy
+ .name(name() + ".readEnergy")
+ .desc("Energy for read commands per rank (pJ)");
+
+ writeEnergy
+ .name(name() + ".writeEnergy")
+ .desc("Energy for write commands per rank (pJ)");
+
+ refreshEnergy
+ .name(name() + ".refreshEnergy")
+ .desc("Energy for refresh commands per rank (pJ)");
+
+ actBackEnergy
+ .name(name() + ".actBackEnergy")
+ .desc("Energy for active background per rank (pJ)");
+
+ preBackEnergy
+ .name(name() + ".preBackEnergy")
+ .desc("Energy for precharge background per rank (pJ)");
+
+ totalEnergy
+ .name(name() + ".totalEnergy")
+ .desc("Total energy per rank (pJ)");
+
+ averagePower
+ .name(name() + ".averagePower")
+ .desc("Core power per rank (mW)");
+}
void
DRAMCtrl::regStats()
{
AbstractMemory::regStats();
+ for (auto r : ranks) {
+ r->regStats();
+ }
+
readReqs
.name(name() + ".readReqs")
.desc("Number of read requests accepted");
.name(name() + ".busUtil")
.desc("Data bus utilization in percentage")
.precision(2);
-
busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
totGap
pageHitRate = (writeRowHits + readRowHits) /
(writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
-
- pwrStateTime
- .init(5)
- .name(name() + ".memoryStateTime")
- .desc("Time in different power states");
- pwrStateTime.subname(0, "IDLE");
- pwrStateTime.subname(1, "REF");
- pwrStateTime.subname(2, "PRE_PDN");
- pwrStateTime.subname(3, "ACT");
- pwrStateTime.subname(4, "ACT_PDN");
-
- actEnergy
- .init(ranksPerChannel)
- .name(name() + ".actEnergy")
- .desc("Energy for activate commands per rank (pJ)");
-
- preEnergy
- .init(ranksPerChannel)
- .name(name() + ".preEnergy")
- .desc("Energy for precharge commands per rank (pJ)");
-
- readEnergy
- .init(ranksPerChannel)
- .name(name() + ".readEnergy")
- .desc("Energy for read commands per rank (pJ)");
-
- writeEnergy
- .init(ranksPerChannel)
- .name(name() + ".writeEnergy")
- .desc("Energy for write commands per rank (pJ)");
-
- refreshEnergy
- .init(ranksPerChannel)
- .name(name() + ".refreshEnergy")
- .desc("Energy for refresh commands per rank (pJ)");
-
- actBackEnergy
- .init(ranksPerChannel)
- .name(name() + ".actBackEnergy")
- .desc("Energy for active background per rank (pJ)");
-
- preBackEnergy
- .init(ranksPerChannel)
- .name(name() + ".preBackEnergy")
- .desc("Energy for precharge background per rank (pJ)");
-
- totalEnergy
- .init(ranksPerChannel)
- .name(name() + ".totalEnergy")
- .desc("Total energy per rank (pJ)");
-
- averagePower
- .init(ranksPerChannel)
- .name(name() + ".averagePower")
- .desc("Core power per rank (mW)");
}
void
* Authors: Andreas Hansson
* Ani Udipi
* Neha Agarwal
+ * Omar Naji
*/
/**
#define __MEM_DRAM_CTRL_HH__
#include <deque>
+#include <string>
#include "base/statistics.hh"
#include "enums/AddrMap.hh"
BusState busState;
- /** List to keep track of activate ticks */
- std::vector<std::deque<Tick>> actTicks;
-
/**
* A basic class to track the bank state, i.e. what row is
* currently open (if any), when is the bank free to accept a new
static const uint32_t NO_ROW = -1;
uint32_t openRow;
- uint8_t rank;
uint8_t bank;
uint8_t bankgr;
uint32_t bytesAccessed;
Bank() :
- openRow(NO_ROW), rank(0), bank(0), bankgr(0),
+ openRow(NO_ROW), bank(0), bankgr(0),
colAllowedAt(0), preAllowedAt(0), actAllowedAt(0),
rowAccesses(0), bytesAccessed(0)
{ }
};
+
+ /**
+ * Rank class includes a vector of banks. Refresh and Power state
+ * machines are defined per rank. Events required to change the
+ * state of the refresh and power state machine are scheduled per
+ * rank. This class allows the implementation of rank-wise refresh
+ * and rank-wise power-down.
+ */
+ class Rank : public EventManager
+ {
+
+ private:
+
+ /**
+ * The power state captures the different operational states of
+ * the DRAM and interacts with the bus read/write state machine,
+ * and the refresh state machine. In the idle state all banks are
+ * precharged. From there we either go to an auto refresh (as
+ * determined by the refresh state machine), or to a precharge
+ * power down mode. From idle the memory can also go to the active
+ * state (with one or more banks active), and in turn from there
+ * to active power down. At the moment we do not capture the deep
+ * power down and self-refresh state.
+ */
+ enum PowerState {
+ PWR_IDLE = 0,
+ PWR_REF,
+ PWR_PRE_PDN,
+ PWR_ACT,
+ PWR_ACT_PDN
+ };
+
+ /**
+ * The refresh state is used to control the progress of the
+ * refresh scheduling. When normal operation is in progress the
+ * refresh state is idle. From there, it progresses to the refresh
+ * drain state once tREFI has passed. The refresh drain state
+ * captures the DRAM row active state, as it will stay there until
+ * all ongoing accesses complete. Thereafter all banks are
+ * precharged, and lastly, the DRAM is refreshed.
+ */
+ enum RefreshState {
+ REF_IDLE = 0,
+ REF_DRAIN,
+ REF_PRE,
+ REF_RUN
+ };
+
+ /**
+ * A reference to the parent DRAMCtrl instance
+ */
+ DRAMCtrl& memory;
+
+ /**
+ * Since we are taking decisions out of order, we need to keep
+ * track of what power transition is happening at what time, such
+ * that we can go back in time and change history. For example, if
+ * we precharge all banks and schedule going to the idle state, we
+ * might at a later point decide to activate a bank before the
+ * transition to idle would have taken place.
+ */
+ PowerState pwrStateTrans;
+
+ /**
+ * Current power state.
+ */
+ PowerState pwrState;
+
+ /**
+ * Track when we transitioned to the current power state
+ */
+ Tick pwrStateTick;
+
+ /**
+ * current refresh state
+ */
+ RefreshState refreshState;
+
+ /**
+ * Keep track of when a refresh is due.
+ */
+ Tick refreshDueAt;
+
+ /*
+ * Command energies
+ */
+ Stats::Scalar actEnergy;
+ Stats::Scalar preEnergy;
+ Stats::Scalar readEnergy;
+ Stats::Scalar writeEnergy;
+ Stats::Scalar refreshEnergy;
+
+ /*
+ * Active Background Energy
+ */
+ Stats::Scalar actBackEnergy;
+
+ /*
+ * Precharge Background Energy
+ */
+ Stats::Scalar preBackEnergy;
+
+ Stats::Scalar totalEnergy;
+ Stats::Scalar averagePower;
+
+ /**
+ * Track time spent in each power state.
+ */
+ Stats::Vector pwrStateTime;
+
+ /**
+ * Function to update Power Stats
+ */
+ void updatePowerStats();
+
+ /**
+ * Schedule a power state transition in the future, and
+ * potentially override an already scheduled transition.
+ *
+ * @param pwr_state Power state to transition to
+ * @param tick Tick when transition should take place
+ */
+ void schedulePowerEvent(PowerState pwr_state, Tick tick);
+
+ public:
+
+ /**
+ * Current Rank index
+ */
+ uint8_t rank;
+
+ /**
+ * One DRAMPower instance per rank
+ */
+ DRAMPower power;
+
+ /**
+ * Vector of Banks. Each rank is made of several devices which in
+ * term are made from several banks.
+ */
+ std::vector<Bank> banks;
+
+ /**
+ * To track number of banks which are currently active for
+ * this rank.
+ */
+ unsigned int numBanksActive;
+
+ /** List to keep track of activate ticks */
+ std::deque<Tick> actTicks;
+
+ Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p);
+
+ const std::string name() const
+ {
+ return csprintf("%s_%d", memory.name(), rank);
+ }
+
+ /**
+ * Kick off accounting for power and refresh states and
+ * schedule initial refresh.
+ *
+ * @param ref_tick Tick for first refresh
+ */
+ void startup(Tick ref_tick);
+
+ /**
+ * Check if the current rank is available for scheduling.
+ *
+ * @param Return true if the rank is idle from a refresh point of view
+ */
+ bool isAvailable() const { return refreshState == REF_IDLE; }
+
+ /**
+ * Let the rank check if it was waiting for requests to drain
+ * to allow it to transition states.
+ */
+ void checkDrainDone();
+
+ /*
+ * Function to register Stats
+ */
+ void regStats();
+
+ void processActivateEvent();
+ EventWrapper<Rank, &Rank::processActivateEvent>
+ activateEvent;
+
+ void processPrechargeEvent();
+ EventWrapper<Rank, &Rank::processPrechargeEvent>
+ prechargeEvent;
+
+ void processRefreshEvent();
+ EventWrapper<Rank, &Rank::processRefreshEvent>
+ refreshEvent;
+
+ void processPowerEvent();
+ EventWrapper<Rank, &Rank::processPowerEvent>
+ powerEvent;
+
+ };
+
/**
* A burst helper helps organize and manage a packet that is larger than
* the DRAM burst size. A system packet that is larger than the burst size
BurstHelper(unsigned int _burstCount)
: burstCount(_burstCount), burstsServiced(0)
- { }
+ { }
};
/**
*/
BurstHelper* burstHelper;
Bank& bankRef;
+ Rank& rankRef;
DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank,
uint32_t _row, uint16_t bank_id, Addr _addr,
- unsigned int _size, Bank& bank_ref)
+ unsigned int _size, Bank& bank_ref, Rank& rank_ref)
: entryTime(curTick()), readyTime(curTick()),
pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row),
bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL),
- bankRef(bank_ref)
+ bankRef(bank_ref), rankRef(rank_ref)
{ }
};
void processRespondEvent();
EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent;
- void processActivateEvent();
- EventWrapper<DRAMCtrl, &DRAMCtrl::processActivateEvent> activateEvent;
-
- void processPrechargeEvent();
- EventWrapper<DRAMCtrl, &DRAMCtrl::processPrechargeEvent> prechargeEvent;
-
- void processRefreshEvent();
- EventWrapper<DRAMCtrl, &DRAMCtrl::processRefreshEvent> refreshEvent;
-
- void processPowerEvent();
- EventWrapper<DRAMCtrl,&DRAMCtrl::processPowerEvent> powerEvent;
-
/**
* Check if the read queue has room for more entries
*
*
* @param queue Queued requests to consider
* @param switched_cmd_type Command type is changing
+ * @return true if a packet is scheduled to a rank which is available else
+ * false
*/
- void chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type);
+ bool chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type);
/**
* For FR-FCFS policy reorder the read/write queue depending on row buffer
*
* @param queue Queued requests to consider
* @param switched_cmd_type Command type is changing
+ * @return true if a packet is scheduled to a rank which is available else
+ * false
*/
- void reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type);
+ bool reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type);
/**
* Find which are the earliest banks ready to issue an activate
* method updates the time that the banks become available based
* on the current limits.
*
- * @param bank Reference to the bank
+ * @param rank_ref Reference to the rank
+ * @param bank_ref Reference to the bank
* @param act_tick Time when the activation takes place
* @param row Index of the row
*/
- void activateBank(Bank& bank, Tick act_tick, uint32_t row);
+ void activateBank(Rank& rank_ref, Bank& bank_ref, Tick act_tick,
+ uint32_t row);
/**
* Precharge a given bank and also update when the precharge is
* done. This will also deal with any stats related to the
* accesses to the open page.
*
+ * @param rank_ref The rank to precharge
* @param bank_ref The bank to precharge
* @param pre_at Time when the precharge takes place
* @param trace Is this an auto precharge then do not add to trace
*/
- void prechargeBank(Bank& bank_ref, Tick pre_at, bool trace = true);
+ void prechargeBank(Rank& rank_ref, Bank& bank_ref,
+ Tick pre_at, bool trace = true);
/**
* Used for debugging to observe the contents of the queues.
DrainManager *drainManager;
/**
- * Multi-dimensional vector of banks, first dimension is ranks,
- * second is bank
+ * Vector of ranks
*/
- std::vector<std::vector<Bank> > banks;
+ std::vector<Rank*> ranks;
/**
* The following are basic design parameters of the memory
*/
Tick busBusyUntil;
- /**
- * Keep track of when a refresh is due.
- */
- Tick refreshDueAt;
-
- /**
- * The refresh state is used to control the progress of the
- * refresh scheduling. When normal operation is in progress the
- * refresh state is idle. From there, it progresses to the refresh
- * drain state once tREFI has passed. The refresh drain state
- * captures the DRAM row active state, as it will stay there until
- * all ongoing accesses complete. Thereafter all banks are
- * precharged, and lastly, the DRAM is refreshed.
- */
- enum RefreshState {
- REF_IDLE = 0,
- REF_DRAIN,
- REF_PRE,
- REF_RUN
- };
-
- RefreshState refreshState;
-
- /**
- * The power state captures the different operational states of
- * the DRAM and interacts with the bus read/write state machine,
- * and the refresh state machine. In the idle state all banks are
- * precharged. From there we either go to an auto refresh (as
- * determined by the refresh state machine), or to a precharge
- * power down mode. From idle the memory can also go to the active
- * state (with one or more banks active), and in turn from there
- * to active power down. At the moment we do not capture the deep
- * power down and self-refresh state.
- */
- enum PowerState {
- PWR_IDLE = 0,
- PWR_REF,
- PWR_PRE_PDN,
- PWR_ACT,
- PWR_ACT_PDN
- };
-
- /**
- * Since we are taking decisions out of order, we need to keep
- * track of what power transition is happening at what time, such
- * that we can go back in time and change history. For example, if
- * we precharge all banks and schedule going to the idle state, we
- * might at a later point decide to activate a bank before the
- * transition to idle would have taken place.
- */
- PowerState pwrStateTrans;
-
- /**
- * Current power state.
- */
- PowerState pwrState;
-
- /**
- * Schedule a power state transition in the future, and
- * potentially override an already scheduled transition.
- *
- * @param pwr_state Power state to transition to
- * @param tick Tick when transition should take place
- */
- void schedulePowerEvent(PowerState pwr_state, Tick tick);
-
Tick prevArrival;
/**
// DRAM Power Calculation
Stats::Formula pageHitRate;
- Stats::Vector pwrStateTime;
-
- //Command energies
- Stats::Vector actEnergy;
- Stats::Vector preEnergy;
- Stats::Vector readEnergy;
- Stats::Vector writeEnergy;
- Stats::Vector refreshEnergy;
- //Active Background Energy
- Stats::Vector actBackEnergy;
- //Precharge Background Energy
- Stats::Vector preBackEnergy;
- Stats::Vector totalEnergy;
- //Power Consumed
- Stats::Vector averagePower;
-
- // Track when we transitioned to the current power state
- Tick pwrStateTick;
-
- // To track number of banks which are currently active
- unsigned int numBanksActive;
// Holds the value of the rank of burst issued
uint8_t activeRank;
*/
std::vector<PacketPtr> pendingDelete;
- // One DRAMPower instance per rank
- std::vector<DRAMPower> rankPower;
-
/**
- * This function increments the energy when called. If stats are
- * dumped periodically, note accumulated energy values will
- * appear in the stats (even if the stats are reset). This is a
- * result of the energy values coming from DRAMPower, and there
- * is currently no support for resetting the state.
- *
- * @param rank Currrent rank
- */
- void updatePowerStats(uint8_t rank);
+ * This function increments the energy when called. If stats are
+ * dumped periodically, note accumulated energy values will
+ * appear in the stats (even if the stats are reset). This is a
+ * result of the energy values coming from DRAMPower, and there
+ * is currently no support for resetting the state.
+ *
+ * @param rank Currrent rank
+ */
+ void updatePowerStats(Rank& rank_ref);
/**
* Function for sorting commands in the command list of DRAMPower.