MSHR::MSHR() : downstreamPending(false),
pendingModified(false),
postInvalidate(false), postDowngrade(false),
- isForward(false), allocOnFill(false)
+ isForward(false)
{
}
MSHR::TargetList::TargetList()
- : needsWritable(false), hasUpgrade(false)
+ : needsWritable(false), hasUpgrade(false), allocOnFill(false)
{}
void
-MSHR::TargetList::updateFlags(PacketPtr pkt, Target::Source source)
+MSHR::TargetList::updateFlags(PacketPtr pkt, Target::Source source,
+ bool alloc_on_fill)
{
if (source != Target::FromSnoop) {
if (pkt->needsWritable()) {
if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
hasUpgrade = true;
}
+
+ // potentially re-evaluate whether we should allocate on a fill or
+ // not
+ allocOnFill = allocOnFill || alloc_on_fill;
}
}
{
resetFlags();
for (auto& t: *this) {
- updateFlags(t.pkt, t.source);
+ updateFlags(t.pkt, t.source, t.allocOnFill);
}
}
inline void
MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
- Counter order, Target::Source source, bool markPending)
+ Counter order, Target::Source source, bool markPending,
+ bool alloc_on_fill)
{
- updateFlags(pkt, source);
+ updateFlags(pkt, source, alloc_on_fill);
if (markPending) {
// Iterate over the SenderState stack and see if we find
// an MSHR entry. If we do, set the downstreamPending
}
}
- emplace_back(pkt, readyTime, order, source, markPending);
+ emplace_back(pkt, readyTime, order, source, markPending, alloc_on_fill);
}
order = _order;
assert(target);
isForward = false;
- allocOnFill = alloc_on_fill;
_isUncacheable = target->req->isUncacheable();
inService = false;
downstreamPending = false;
// snoop (mem-side request), so set source according to request here
Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
Target::FromPrefetcher : Target::FromCPU;
- targets.add(target, when_ready, _order, source, true);
+ targets.add(target, when_ready, _order, source, true, alloc_on_fill);
assert(deferredTargets.isReset());
}
// have targets addded if originally allocated uncacheable
assert(!_isUncacheable);
- // potentially re-evaluate whether we should allocate on a fill or
- // not
- allocOnFill = allocOnFill || alloc_on_fill;
-
// if there's a request already in service for this MSHR, we will
// have to defer the new target until after the response if any of
// the following are true:
// need to put on deferred list
if (hasPostInvalidate())
replaceUpgrade(pkt);
- deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true);
+ deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true,
+ alloc_on_fill);
} else {
// No request outstanding, or still OK to append to
// outstanding request: append to regular target list. Only
// mark pending if current request hasn't been issued yet
// (isn't in service).
- targets.add(pkt, whenReady, _order, Target::FromCPU, !inService);
+ targets.add(pkt, whenReady, _order, Target::FromCPU, !inService,
+ alloc_on_fill);
}
}
// recipient does not care there is no harm in doing so
}
targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
- downstreamPending && targets.needsWritable);
+ downstreamPending && targets.needsWritable, false);
if (pkt->needsWritable()) {
// This transaction will take away our pending copy
prefix, blkAddr, blkAddr + blkSize - 1,
isSecure ? "s" : "ns",
isForward ? "Forward" : "",
- allocOnFill ? "AllocOnFill" : "",
+ allocOnFill() ? "AllocOnFill" : "",
needsWritable() ? "Wrtbl" : "",
_isUncacheable ? "Unc" : "",
inService ? "InSvc" : "",
const Source source; //!< Request from cpu, memory, or prefetcher?
const bool markedPending; //!< Did we mark upstream MSHR
//!< as downstreamPending?
+ const bool allocOnFill; //!< Should the response servicing this
+ //!< target list allocate in the cache?
Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
- Source _source, bool _markedPending)
+ Source _source, bool _markedPending, bool alloc_on_fill)
: recvTime(curTick()), readyTime(_readyTime), order(_order),
- pkt(_pkt), source(_source), markedPending(_markedPending)
+ pkt(_pkt), source(_source), markedPending(_markedPending),
+ allocOnFill(alloc_on_fill)
{}
};
public:
bool needsWritable;
bool hasUpgrade;
+ /** Set when the response should allocate on fill */
+ bool allocOnFill;
TargetList();
*
* @param pkt Packet considered for the flag update
* @param source Indicates the source of the packet
+ * @param alloc_on_fill Whether the pkt would allocate on a fill
*/
- void updateFlags(PacketPtr pkt, Target::Source source);
+ void updateFlags(PacketPtr pkt, Target::Source source,
+ bool alloc_on_fill);
- void resetFlags() { needsWritable = hasUpgrade = false; }
+ void resetFlags() { needsWritable = hasUpgrade = allocOnFill = false; }
/**
* Goes through the list of targets and uses them to populate
*/
void populateFlags();
- bool isReset() const { return !needsWritable && !hasUpgrade; }
+ /**
+ * Tests if the flags of this TargetList have their default
+ * values.
+ */
+ bool isReset() const {
+ return !needsWritable && !hasUpgrade && !allocOnFill;
+ }
+
+ /**
+ * Add the specified packet in the TargetList. This function
+ * stores information related to the added packet and updates
+ * accordingly the flags.
+ *
+ * @param pkt Packet considered for adding
+ * @param readTime Tick at which the packet is processed by this cache
+ * @param order A counter giving a unique id to each target
+ * @param source Indicates the source agent of the packet
+ * @param markPending Set for deferred targets or pending MSHRs
+ * @param alloc_on_fill Whether it should allocate on a fill
+ */
void add(PacketPtr pkt, Tick readyTime, Counter order,
- Target::Source source, bool markPending);
+ Target::Source source, bool markPending,
+ bool alloc_on_fill);
/**
* Convert upgrades to the equivalent request if the cache line they
/** MSHR list iterator. */
typedef List::iterator Iterator;
- /** Keep track of whether we should allocate on fill or not */
- bool allocOnFill;
-
/** The pending* and post* flags are only valid if inService is
* true. Using the accessor functions lets us detect if these
* flags are accessed improperly.
bool sendPacket(Cache &cache);
+ bool allocOnFill() const {
+ return targets.allocOnFill;
+ }
private:
/**