/*
- * Copyright (c) 2012 ARM Limited
+ * Copyright (c) 2012-2013 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
Blocked_NoMSHRs = MSHRQueue_MSHRs,
Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
Blocked_NoTargets,
+ Blocked_PendingWriteInvalidate,
NUM_BLOCKED_CAUSES
};
queue.schedSendEvent(time);
}
- /**
- * Schedule the transmissions of a response packet at a given
- * point in time.
- *
- * @param pkt response packet
- * @param when time to send the response
- */
- void respond(PacketPtr pkt, Tick time) {
- queue.schedSendTiming(pkt, time);
- }
-
protected:
CacheMasterPort(const std::string &_name, BaseCache *_cache,
- PacketQueue &_queue) :
+ MasterPacketQueue &_queue) :
QueuedMasterPort(_name, _cache, _queue)
{ }
/** Return to normal operation and accept new requests. */
void clearBlocked();
- /**
- * Schedule the transmissions of a response packet at a given
- * point in time.
- *
- * @param pkt response packet
- * @param when time to send the response
- */
- void respond(PacketPtr pkt, Tick time) {
- queue.schedSendTiming(pkt, time);
- }
+ bool isBlocked() const { return blocked; }
protected:
const std::string &_label);
/** A normal packet queue used to store responses. */
- PacketQueue queue;
+ SlavePacketQueue queue;
bool blocked;
private:
- EventWrapper<Port, &Port::sendRetry> sendRetryEvent;
+ void processSendRetry();
+
+ EventWrapper<CacheSlavePort,
+ &CacheSlavePort::processSendRetry> sendRetryEvent;
};
}
}
+ /**
+ * Write back dirty blocks in the cache using functional accesses.
+ */
+ virtual void memWriteback() = 0;
+ /**
+ * Invalidates all blocks in the cache.
+ *
+ * @warn Dirty cache lines will not be written back to
+ * memory. Make sure to call functionalWriteback() first if you
+ * want the to write them to memory.
+ */
+ virtual void memInvalidate() = 0;
+ /**
+ * Determine if there are any dirty blocks in the cache.
+ *
+ * \return true if at least one block is dirty, false otherwise.
+ */
+ virtual bool isDirty() const = 0;
+
/** Block size of this cache */
const unsigned blkSize;
/**
* The latency of a hit in this device.
*/
- int hitLatency;
+ const Cycles hitLatency;
+
+ /**
+ * The latency of sending reponse to its upper level cache/core on a
+ * linefill. In most contemporary processors, the return path on a cache
+ * miss is much quicker that the hit latency. The responseLatency parameter
+ * tries to capture this latency.
+ */
+ const Cycles responseLatency;
/** The number of targets for each MSHR. */
const int numTarget;
/** Do we forward snoops from mem side port through to cpu side port? */
- bool forwardSnoops;
+ const bool forwardSnoops;
/** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should
* never try to forward ownership and similar optimizations to the cpu
* side */
- bool isTopLevel;
+ const bool isTopLevel;
/**
* Bit vector of the blocking reasons for the access path.
uint64_t order;
/** Stores time the cache blocked for statistics. */
- Tick blockedCycle;
+ Cycles blockedCycle;
/** Pointer to the MSHR that has no targets. */
MSHR *noTargetMSHR;
/** The number of misses to trigger an exit event. */
Counter missCount;
- /** The drain event. */
- Event *drainEvent;
-
/**
* The address range to which the cache responds on the CPU side.
* Normally this is all possible memory addresses. */
- AddrRangeList addrRanges;
+ const AddrRangeList addrRanges;
public:
/** System we are currently operating in. */
virtual void init();
- virtual MasterPort &getMasterPort(const std::string &if_name, int idx = -1);
- virtual SlavePort &getSlavePort(const std::string &if_name, int idx = -1);
+ virtual BaseMasterPort &getMasterPort(const std::string &if_name,
+ PortID idx = InvalidPortID);
+ virtual BaseSlavePort &getSlavePort(const std::string &if_name,
+ PortID idx = InvalidPortID);
/**
* Query block size of a cache.
/**
* Returns true if the cache is blocked for accesses.
*/
- bool isBlocked()
+ bool isBlocked() const
{
return blocked != 0;
}
uint8_t flag = 1 << cause;
if (blocked == 0) {
blocked_causes[cause]++;
- blockedCycle = curTick();
+ blockedCycle = curCycle();
cpuSidePort->setBlocked();
}
blocked |= flag;
blocked &= ~flag;
DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
if (blocked == 0) {
- blocked_cycles[cause] += curTick() - blockedCycle;
+ blocked_cycles[cause] += curCycle() - blockedCycle;
cpuSidePort->clearBlocked();
}
}
// interesting again.
}
- virtual unsigned int drain(Event *de);
+ virtual unsigned int drain(DrainManager *dm);
- virtual bool inCache(Addr addr) = 0;
+ virtual bool inCache(Addr addr, bool is_secure) const = 0;
- virtual bool inMissQueue(Addr addr) = 0;
+ virtual bool inMissQueue(Addr addr, bool is_secure) const = 0;
void incMissCount(PacketPtr pkt)
{
assert(pkt->req->masterId() < system->maxMasters());
misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
-
+ pkt->req->incAccessDepth();
if (missCount) {
--missCount;
if (missCount == 0)