inline void
MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
- Counter order, bool cpuSide)
+ Counter order, bool cpuSide, bool markPending)
{
if (cpuSide) {
if (pkt->needsExclusive()) {
if (pkt->cmd == MemCmd::UpgradeReq) {
hasUpgrade = true;
}
+ }
+ if (markPending) {
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
if (mshr != NULL) {
assert(!mshr->downstreamPending);
}
}
- push_back(Target(pkt, readyTime, order, cpuSide));
+ push_back(Target(pkt, readyTime, order, cpuSide, markPending));
}
{
Iterator end_i = end();
for (Iterator i = begin(); i != end_i; ++i) {
- MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
- if (mshr != NULL) {
- assert(mshr->downstreamPending);
- mshr->downstreamPending = false;
+ if (i->markedPending) {
+ MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
+ if (mshr != NULL) {
+ mshr->clearDownstreamPending();
+ }
}
}
}
// Don't know of a case where we would allocate a new MSHR for a
// snoop (mem-side request), so set cpuSide to true here.
assert(targets->isReset());
- targets->add(target, whenReady, _order, true);
+ targets->add(target, whenReady, _order, true, true);
assert(deferredTargets->isReset());
pendingInvalidate = false;
pendingShared = false;
}
+void
+MSHR::clearDownstreamPending()
+{
+ assert(downstreamPending);
+ downstreamPending = false;
+ // recursively clear flag on any MSHRs we will be forwarding
+ // responses to
+ targets->clearDownstreamPending();
+}
+
bool
MSHR::markInService()
{
(!deferredTargets->empty() || pendingInvalidate ||
(!targets->needsExclusive && pkt->needsExclusive()))) {
// need to put on deferred list
- deferredTargets->add(pkt, whenReady, _order, true);
+ deferredTargets->add(pkt, whenReady, _order, true, true);
} else {
- // no request outstanding, or still OK to append to
- // outstanding request
- targets->add(pkt, whenReady, _order, true);
+ // No request outstanding, or still OK to append to
+ // outstanding request: append to regular target list. Only
+ // mark pending if current request hasn't been issued yet
+ // (isn't in service).
+ targets->add(pkt, whenReady, _order, true, !inService);
}
++ntargets;
// actual target device (typ. PhysicalMemory) will delete the
// packet on reception, so we need to save a copy here
PacketPtr cp_pkt = new Packet(pkt, true);
- targets->add(cp_pkt, curTick, _order, false);
+ targets->add(cp_pkt, curTick, _order, false,
+ downstreamPending && targets->needsExclusive);
++ntargets;
if (targets->needsExclusive) {
// the regular target list.
assert(!targets->needsExclusive);
targets->needsExclusive = true;
+ // if any of the deferred targets were upper-level cache
+ // requests marked downstreamPending, need to clear that
+ assert(!downstreamPending); // not pending here anymore
+ deferredTargets->clearDownstreamPending();
// this clears out deferredTargets too
targets->splice(targets->end(), *deferredTargets);
deferredTargets->resetFlags();
Counter order; //!< Global order (for memory consistency mgmt)
PacketPtr pkt; //!< Pending request packet.
bool cpuSide; //!< Did request come from cpu side or mem side?
+ bool markedPending; //!< Did we mark upstream MSHR
+ //!< as downstreamPending?
bool isCpuSide() const { return cpuSide; }
- Target(PacketPtr _pkt, Tick _readyTime, Counter _order, bool _cpuSide)
+ Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
+ bool _cpuSide, bool _markedPending)
: recvTime(curTick), readyTime(_readyTime), order(_order),
- pkt(_pkt), cpuSide(_cpuSide)
+ pkt(_pkt), cpuSide(_cpuSide), markedPending(_markedPending)
{}
};
TargetList();
void resetFlags() { needsExclusive = hasUpgrade = false; }
bool isReset() { return !needsExclusive && !hasUpgrade; }
- void add(PacketPtr pkt, Tick readyTime, Counter order, bool cpuSide);
+ void add(PacketPtr pkt, Tick readyTime, Counter order,
+ bool cpuSide, bool markPending);
void replaceUpgrades();
void clearDownstreamPending();
bool checkFunctional(PacketPtr pkt);
bool markInService();
+ void clearDownstreamPending();
+
/**
* Mark this MSHR as free.
*/