assert(blkSize == pkt->getSize());
if (!blk) {
- // a writeback that misses needs to allocate a new block
- blk = allocateBlock(pkt->getAddr(), pkt->isSecure(),
- writebacks);
- if (!blk) {
- // no replaceable block available: give up, fwd to
- // next level.
- incMissCount(pkt);
+ if (pkt->writeThrough()) {
+ // if this is a write through packet, we don't try to
+ // allocate if the block is not present
return false;
- }
- tags->insertBlock(pkt, blk);
+ } else {
+ // a writeback that misses needs to allocate a new block
+ blk = allocateBlock(pkt->getAddr(), pkt->isSecure(),
+ writebacks);
+ if (!blk) {
+ // no replaceable block available: give up, fwd to
+ // next level.
+ incMissCount(pkt);
+ return false;
+ }
+ tags->insertBlock(pkt, blk);
- blk->status = (BlkValid | BlkReadable);
- if (pkt->isSecure()) {
- blk->status |= BlkSecure;
+ blk->status = (BlkValid | BlkReadable);
+ if (pkt->isSecure()) {
+ blk->status |= BlkSecure;
+ }
}
}
// write clean operation and the block is already in this
// cache, we need to update the data and the block flags
assert(blk);
- blk->status |= BlkDirty;
+ if (!pkt->writeThrough()) {
+ blk->status |= BlkDirty;
+ }
// nothing else to do; writeback doesn't expect response
assert(!pkt->needsResponse());
std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
// populate the time when the block will be ready to access.
blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay +
pkt->payloadDelay;
- return true;
+ // if this a write-through packet it will be sent to cache
+ // below
+ return !pkt->writeThrough();
} else if (blk && (pkt->needsWritable() ? blk->isWritable() :
blk->isReadable())) {
// OK to satisfy access
}
PacketPtr
-Cache::writecleanBlk(CacheBlk *blk)
+Cache::writecleanBlk(CacheBlk *blk, Request::Flags dest)
{
Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set),
blkSize, 0, Request::wbMasterId);
// We inform the cache below that the block has sharers in the
// system as we retain our copy.
pkt->setHasSharers();
+ if (dest) {
+ req->setFlags(dest);
+ pkt->setWriteThrough();
+ }
std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
return pkt;
}
/**
* Create a writeclean request for the given block.
* @param blk The block to write clean
+ * @param dest The destination of this clean operation
* @return The write clean packet for the block.
*/
- PacketPtr writecleanBlk(CacheBlk *blk);
+ PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest = 0);
/**
* Create a CleanEvict request for the given block.
// determine how long to be crossbar layer is busy
Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
+ // is this the destination point for this packet? (e.g. true if
+ // this xbar is the PoC for a cache maintenance operation to the
+ // PoC) otherwise the destination is any cache that can satisfy
+ // the request
+ const bool is_destination = isDestination(pkt);
+
const bool snoop_caches = !system->bypassCaches() &&
pkt->cmd != MemCmd::WriteClean;
if (snoop_caches) {
} else {
// determine if we are forwarding the packet, or responding to
// it
- if (!pointOfCoherency || pkt->isRead() || pkt->isWrite()) {
+ if (forwardPacket(pkt)) {
// if we are passing on, rather than sinking, a packet to
// which an upstream cache has committed to responding,
// the line was needs writable, and the responding only
pkt->setExpressSnoop();
}
+ // make sure that the write request (e.g., WriteClean)
+ // will stop at the memory below if this crossbar is its
+ // destination
+ if (pkt->isWrite() && is_destination) {
+ pkt->clearWriteThrough();
+ }
+
// since it is a normal request, attempt to send the packet
success = masterPorts[master_port_id]->sendTimingReq(pkt);
} else {
MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
Tick snoop_response_latency = 0;
+ // is this the destination point for this packet? (e.g. true if
+ // this xbar is the PoC for a cache maintenance operation to the
+ // PoC) otherwise the destination is any cache that can satisfy
+ // the request
+ const bool is_destination = isDestination(pkt);
+
const bool snoop_caches = !system->bypassCaches() &&
pkt->cmd != MemCmd::WriteClean;
if (snoop_caches) {
DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
pkt->print());
} else {
- if (!pointOfCoherency || pkt->isRead() || pkt->isWrite()) {
+ if (forwardPacket(pkt)) {
+ // make sure that the write request (e.g., WriteClean)
+ // will stop at the memory below if this crossbar is its
+ // destination
+ if (pkt->isWrite() && is_destination) {
+ pkt->clearWriteThrough();
+ }
+
// forward the request to the appropriate destination
response_latency = masterPorts[master_port_id]->sendAtomic(pkt);
} else {
(!pkt->needsWritable() || pkt->responderHadWritable()));
}
+bool
+CoherentXBar::forwardPacket(const PacketPtr pkt)
+{
+ // we are forwarding the packet if:
+ // 1) this is a read or a write
+ // 2) this crossbar is above the point of coherency
+ return pkt->isRead() || pkt->isWrite() || !pointOfCoherency;
+}
+
+
void
CoherentXBar::regStats()
{
*/
bool sinkPacket(const PacketPtr pkt) const;
+ /**
+ * Determine if the crossbar should forward the packet, as opposed to
+ * responding to it.
+ */
+ bool forwardPacket(const PacketPtr pkt);
+
+ /**
+ * Determine if the packet's destination is the memory below
+ *
+ * The memory below is the destination for a cache mainteance
+ * operation to the Point of Coherence/Unification if this is the
+ * Point of Coherence/Unification.
+ *
+ * @param pkt The processed packet
+ *
+ * @return Whether the memory below is the destination for the packet
+ */
+ bool isDestination(const PacketPtr pkt) const
+ {
+ return (pkt->req->isToPOC() && pointOfCoherency) ||
+ (pkt->req->isToPOU() && pointOfUnification);
+ }
+
Stats::Scalar snoops;
Stats::Scalar snoopTraffic;
Stats::Distribution snoopFanout;
void
Packet::print(ostream &o, const int verbosity, const string &prefix) const
{
- ccprintf(o, "%s%s [%x:%x]%s%s%s%s", prefix, cmdString(),
+ ccprintf(o, "%s%s [%x:%x]%s%s%s%s%s%s", prefix, cmdString(),
getAddr(), getAddr() + getSize() - 1,
req->isSecure() ? " (s)" : "",
req->isInstFetch() ? " IF" : "",
req->isUncacheable() ? " UC" : "",
- isExpressSnoop() ? " ES" : "");
+ isExpressSnoop() ? " ES" : "",
+ req->isToPOC() ? " PoC" : "",
+ req->isToPOU() ? " PoU" : "");
}
std::string
enum : FlagsType {
// Flags to transfer across when copying a packet
- COPY_FLAGS = 0x0000000F,
+ COPY_FLAGS = 0x0000001F,
// Does this packet have sharers (which means it should not be
// considered writable) or not. See setHasSharers below.
// responding to a snoop. See setCacheResponding below.
CACHE_RESPONDING = 0x00000008,
+ // The writeback/writeclean should be propagated further
+ // downstream by the receiver
+ WRITE_THROUGH = 0x00000010,
+
/// Are the 'addr' and 'size' fields valid?
VALID_ADDR = 0x00000100,
VALID_SIZE = 0x00000200,
bool responderHadWritable() const
{ return flags.isSet(RESPONDER_HAD_WRITABLE); }
+ /**
+ * A writeback/writeclean cmd gets propagated further downstream
+ * by the receiver when the flag is set.
+ */
+ void setWriteThrough()
+ {
+ assert(cmd.isWrite() &&
+ (cmd.isEviction() || cmd == MemCmd::WriteClean));
+ flags.set(WRITE_THROUGH);
+ }
+ void clearWriteThrough() { flags.clear(WRITE_THROUGH); }
+ bool writeThrough() const { return flags.isSet(WRITE_THROUGH); }
+
void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); }
bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); }
void setBlockCached() { flags.set(BLOCK_CACHED); }
/*
- * Copyright (c) 2012-2013 ARM Limited
+ * Copyright (c) 2012-2013,2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
class Request
{
public:
- typedef uint32_t FlagsType;
+ typedef uint64_t FlagsType;
typedef uint8_t ArchFlagsType;
typedef ::Flags<FlagsType> Flags;
/** The request is a page table walk */
PT_WALK = 0x20000000,
+ /** The request targets the point of unification */
+ DST_POU = 0x0000001000000000,
+
+ /** The request targets the point of coherence */
+ DST_POC = 0x0000002000000000,
+
+ /** Bits to define the destination of a request */
+ DST_BITS = 0x0000003000000000,
+
/**
* These flags are *not* cleared when a Request object is
* reused (assigned a new address).
_flags.isSet(ATOMIC_NO_RETURN_OP);
}
+ /**
+ * Accessor functions for the destination of a memory request. The
+ * destination flag can specify a point of reference for the
+ * operation (e.g. a cache block clean to the the point of
+ * unification). At the moment the destination is only used by the
+ * cache maintenance operations.
+ */
+ bool isToPOU() const { return _flags.isSet(DST_POU); }
+ bool isToPOC() const { return _flags.isSet(DST_POC); }
+ Flags getDest() const { return _flags & DST_BITS; }
+
/**
* Accessor functions for the memory space configuration flags and used by
* GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that