int initial_offset = initial_tgt->pkt->getOffset(blkSize);
bool from_cache = false;
-
- while (mshr->hasTargets()) {
- MSHR::Target *target = mshr->getTarget();
- Packet *tgt_pkt = target->pkt;
-
- switch (target->source) {
+ MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
+ for (auto &target: targets) {
+ Packet *tgt_pkt = target.pkt;
+ switch (target.source) {
case MSHR::Target::FromCPU:
Tick completion_time;
// Here we charge on completion_time the delay of the xbar if the
mshr->promoteWritable();
// NB: we use the original packet here and not the response!
blk = handleFill(tgt_pkt, blk, writebacks,
- mshr->allocOnFill());
+ targets.allocOnFill);
assert(blk != nullptr);
// treat as a fill, and discard the invalidation
assert(tgt_pkt->req->masterId() < system->maxMasters());
missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
- completion_time - target->recvTime;
+ completion_time - target.recvTime;
} else if (pkt->cmd == MemCmd::UpgradeFailResp) {
// failed StoreCond upgrade
assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
break;
default:
- panic("Illegal target->source enum %d\n", target->source);
+ panic("Illegal target->source enum %d\n", target.source);
}
-
- mshr->popTarget();
}
maintainClusivity(from_cache, blk);
if (mshr != nullptr) {
mshr->clearDownstreamPending();
}
+ t.markedPending = false;
}
}
}
return true;
}
+MSHR::TargetList
+MSHR::extractServiceableTargets(PacketPtr pkt)
+{
+ TargetList ready_targets;
+ // If the downstream MSHR got an invalidation request then we only
+ // service the first of the FromCPU targets and any other
+ // non-FromCPU target. This way the remaining FromCPU targets
+ // issue a new request and get a fresh copy of the block and we
+ // avoid memory consistency violations.
+ if (pkt->cmd == MemCmd::ReadRespWithInvalidate) {
+ auto it = targets.begin();
+ assert(it->source == Target::FromCPU);
+ ready_targets.push_back(*it);
+ it = targets.erase(it);
+ while (it != targets.end()) {
+ if (it->source == Target::FromCPU) {
+ it++;
+ } else {
+ assert(it->source == Target::FromSnoop);
+ ready_targets.push_back(*it);
+ it = targets.erase(it);
+ }
+ }
+ ready_targets.populateFlags();
+ } else {
+ std::swap(ready_targets, targets);
+ }
+ targets.populateFlags();
+
+ return ready_targets;
+}
bool
MSHR::promoteDeferredTargets()
{
- assert(targets.empty());
- if (deferredTargets.empty()) {
- return false;
- }
+ if (targets.empty()) {
+ if (deferredTargets.empty()) {
+ return false;
+ }
- // swap targets & deferredTargets lists
- std::swap(targets, deferredTargets);
+ std::swap(targets, deferredTargets);
+ } else {
+ // If the targets list is not empty then we have one targets
+ // from the deferredTargets list to the targets list. A new
+ // request will then service the targets list.
+ targets.splice(targets.end(), deferredTargets);
+ targets.populateFlags();
+ }
// clear deferredTargets flags
deferredTargets.resetFlags();
const Counter order; //!< Global order (for memory consistency mgmt)
const PacketPtr pkt; //!< Pending request packet.
const Source source; //!< Request from cpu, memory, or prefetcher?
- const bool markedPending; //!< Did we mark upstream MSHR
- //!< as downstreamPending?
+
+ /**
+ * We use this flag to track whether we have cleared the
+ * downstreamPending flag for the MSHR of the cache above
+ * where this packet originates from and guard noninitial
+ * attempts to clear it.
+ *
+ * The flag markedPending needs to be updated when the
+ * TargetList is in service which can be:
+ * 1) during the Target instantiation if the MSHR is in
+ * service and the target is not deferred,
+ * 2) when the MSHR becomes in service if the target is not
+ * deferred,
+ * 3) or when the TargetList is promoted (deferredTargets ->
+ * targets).
+ */
+ bool markedPending;
+
const bool allocOnFill; //!< Should the response servicing this
//!< target list allocate in the cache?
int getNumTargets() const
{ return targets.size() + deferredTargets.size(); }
+ /**
+ * Extracts the subset of the targets that can be serviced given a
+ * received response. This function returns the targets list
+ * unless the response is a ReadRespWithInvalidate. The
+ * ReadRespWithInvalidate is only invalidating response that its
+ * invalidation was not expected when the request (a
+ * ReadSharedReq) was sent out. For ReadRespWithInvalidate we can
+ * safely service only the first FromCPU target and all FromSnoop
+ * targets (inform all snoopers that we no longer have the block).
+ *
+ * @param pkt The response from the downstream memory
+ */
+ TargetList extractServiceableTargets(PacketPtr pkt);
+
/**
* Returns true if there are targets left.
* @return true if there are targets