// full write buffer, otherwise we favour the miss requests
if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
// need to search MSHR queue for conflicting earlier miss.
- MSHR *conflict_mshr =
- mshrQueue.findPending(wq_entry->blkAddr,
- wq_entry->isSecure);
+ MSHR *conflict_mshr = mshrQueue.findPending(wq_entry);
if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
// Service misses in order until conflict is cleared.
return wq_entry;
} else if (miss_mshr) {
// need to check for conflicting earlier writeback
- WriteQueueEntry *conflict_mshr =
- writeBuffer.findPending(miss_mshr->blkAddr,
- miss_mshr->isSecure);
+ WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr);
if (conflict_mshr) {
// not sure why we don't check order here... it was in the
// original code but commented out.
Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
Target::FromPrefetcher : Target::FromCPU;
targets.add(target, when_ready, _order, source, true, alloc_on_fill);
+
+ // All targets must refer to the same block
+ assert(target->matchBlockAddr(targets.front().pkt, blkSize));
}
print(str);
return str.str();
}
+
+bool
+MSHR::matchBlockAddr(const Addr addr, const bool is_secure) const
+{
+ assert(hasTargets());
+ return (blkAddr == addr) && (isSecure == is_secure);
+}
+
+bool
+MSHR::matchBlockAddr(const PacketPtr pkt) const
+{
+ assert(hasTargets());
+ return pkt->matchBlockAddr(blkAddr, isSecure, blkSize);
+}
+
+bool
+MSHR::conflictAddr(const QueueEntry* entry) const
+{
+ assert(hasTargets());
+ return entry->matchBlockAddr(blkAddr, isSecure);
+}
* @return string with mshr fields + [deferred]targets
*/
std::string print() const;
+
+ bool matchBlockAddr(const Addr addr, const bool is_secure) const override;
+ bool matchBlockAddr(const PacketPtr pkt) const override;
+ bool conflictAddr(const QueueEntry* entry) const override;
};
#endif // __MEM_CACHE_MSHR_HH__
// cacheable accesses being added to an WriteQueueEntry
// serving an uncacheable access
if (!(ignore_uncacheable && entry->isUncacheable()) &&
- entry->blkAddr == blk_addr && entry->isSecure == is_secure) {
+ entry->matchBlockAddr(blk_addr, is_secure)) {
return entry;
}
}
{
pkt->pushLabel(label);
for (const auto& entry : allocatedList) {
- if (entry->blkAddr == blk_addr && entry->trySatisfyFunctional(pkt)) {
+ if (entry->matchBlockAddr(blk_addr, pkt->isSecure()) &&
+ entry->trySatisfyFunctional(pkt)) {
pkt->popLabel();
return true;
}
}
/**
- * Find any pending requests that overlap the given request.
- * @param blk_addr Block address.
- * @param is_secure True if the target memory space is secure.
- * @return A pointer to the earliest matching WriteQueueEntry.
+ * Find any pending requests that overlap the given request of a
+ * different queue.
+ *
+ * @param entry The entry to be compared against.
+ * @return A pointer to the earliest matching entry.
*/
- Entry* findPending(Addr blk_addr, bool is_secure) const
+ Entry* findPending(const QueueEntry* entry) const
{
- for (const auto& entry : readyList) {
- if (entry->blkAddr == blk_addr && entry->isSecure == is_secure) {
- return entry;
+ for (const auto& ready_entry : readyList) {
+ if (ready_entry->conflictAddr(entry)) {
+ return ready_entry;
}
}
return nullptr;
/** True if the entry targets the secure memory space. */
bool isSecure;
- QueueEntry() : readyTime(0), _isUncacheable(false),
- inService(false), order(0), blkAddr(0), blkSize(0),
- isSecure(false)
+ QueueEntry()
+ : readyTime(0), _isUncacheable(false),
+ inService(false), order(0), blkAddr(0), blkSize(0), isSecure(false)
{}
bool isUncacheable() const { return _isUncacheable; }
+ /**
+ * Check if entry corresponds to the one being looked for.
+ *
+ * @param addr Address to match against.
+ * @param is_secure Whether the target should be in secure space or not.
+ * @return True if entry matches given information.
+ */
+ virtual bool matchBlockAddr(const Addr addr, const bool is_secure)
+ const = 0;
+
+ /**
+ * Check if entry contains a packet that corresponds to the one being
+ * looked for.
+ *
+ * @param pkt The packet to search for.
+ * @return True if any of its targets' packets matches the given one.
+ */
+ virtual bool matchBlockAddr(const PacketPtr pkt) const = 0;
+
+ /**
+ * Check if given entry's packets conflict with this' entries packets.
+ *
+ * @param entry Other entry to compare against.
+ * @return True if entry matches given information.
+ */
+ virtual bool conflictAddr(const QueueEntry* entry) const = 0;
+
/**
* Send this queue entry as a downstream packet, with the exact
* behaviour depending on the specific entry type.
"a cacheable eviction or a writeclean");
targets.add(target, when_ready, _order);
+
+ // All targets must refer to the same block
+ assert(target->matchBlockAddr(targets.front().pkt, blkSize));
}
void
return cache.sendWriteQueuePacket(this);
}
+bool
+WriteQueueEntry::matchBlockAddr(const Addr addr, const bool is_secure) const
+{
+ assert(hasTargets());
+ return (blkAddr == addr) && (isSecure == is_secure);
+}
+
+bool
+WriteQueueEntry::matchBlockAddr(const PacketPtr pkt) const
+{
+ assert(hasTargets());
+ return pkt->matchBlockAddr(blkAddr, isSecure, blkSize);
+}
+
+bool
+WriteQueueEntry::conflictAddr(const QueueEntry* entry) const
+{
+ assert(hasTargets());
+ return entry->matchBlockAddr(blkAddr, isSecure);
+}
+
void
WriteQueueEntry::print(std::ostream &os, int verbosity,
const std::string &prefix) const
* @return string with mshr fields
*/
std::string print() const;
+
+ bool matchBlockAddr(const Addr addr, const bool is_secure) const override;
+ bool matchBlockAddr(const PacketPtr pkt) const override;
+ bool conflictAddr(const QueueEntry* entry) const override;
};
#endif // __MEM_CACHE_WRITE_QUEUE_ENTRY_HH__