+
+ // potentially re-evaluate whether we should allocate on a fill or
+ // not
+ allocOnFill = allocOnFill || alloc_on_fill;
+
+ if (source != Target::FromPrefetcher) {
+ hasFromCache = hasFromCache || pkt->fromCache();
+
+ updateWriteFlags(pkt);
+ }
+ }
+}
+
+void
+MSHR::TargetList::populateFlags()
+{
+ resetFlags();
+ for (auto& t: *this) {
+ updateFlags(t.pkt, t.source, t.allocOnFill);
+ }
+}
+
+void
+MSHR::TargetList::updateWriteFlags(PacketPtr pkt)
+{
+ if (isWholeLineWrite()) {
+ // if we have already seen writes for the full block
+ // stop here, this might be a full line write followed
+ // by other compatible requests (e.g., reads)
+ return;
+ }
+
+ if (canMergeWrites) {
+ if (!pkt->isWrite()) {
+ // We won't allow further merging if this hasn't
+ // been a write
+ canMergeWrites = false;
+ return;
+ }
+
+ // Avoid merging requests with special flags (e.g.,
+ // strictly ordered)
+ const Request::FlagsType no_merge_flags =
+ Request::UNCACHEABLE | Request::STRICT_ORDER |
+ Request::MMAPPED_IPR | Request::PRIVILEGED |
+ Request::LLSC | Request::MEM_SWAP |
+ Request::MEM_SWAP_COND | Request::SECURE;
+ const auto &req_flags = pkt->req->getFlags();
+ bool compat_write = !req_flags.isSet(no_merge_flags);
+
+ // if this is the first write, it might be a whole
+ // line write and even if we can't merge any
+ // subsequent write requests, we still need to service
+ // it as a whole line write (e.g., SECURE whole line
+ // write)
+ bool first_write = empty();
+ if (first_write || compat_write) {
+ auto offset = pkt->getOffset(blkSize);
+ auto begin = writesBitmap.begin() + offset;
+ std::fill(begin, begin + pkt->getSize(), true);
+ }
+
+ // We won't allow further merging if this has been a
+ // special write
+ canMergeWrites &= compat_write;