mem/physical.cc
mem/port.cc
+ mem/cache/base_cache.cc
+ mem/cache/cache.cc
+ mem/cache/cache_builder.cc
+ mem/cache/coherence/coherence_protocol.cc
+ mem/cache/coherence/uni_coherence.cc
+ mem/cache/miss/blocking_buffer.cc
+ mem/cache/miss/miss_queue.cc
+ mem/cache/miss/mshr.cc
+ mem/cache/miss/mshr_queue.cc
+ mem/cache/prefetch/base_prefetcher.cc
+ mem/cache/prefetch/ghb_prefetcher.cc
+ mem/cache/prefetch/prefetcher.cc
+ mem/cache/prefetch/stride_prefetcher.cc
+ mem/cache/prefetch/tagged_prefetcher.cc
+ mem/cache/tags/base_tags.cc
+ mem/cache/tags/cache_tags.cc
+ mem/cache/tags/fa_lru.cc
+ mem/cache/tags/iic/cc
+ mem/cache/tags/lru.cc
+ mem/cache/tags/repl/gen.cc
+ mem/cache/tags/repl/repl.cc
+ mem/cache/tags/split.cc
+ mem/cache/tags/split_lifo.cc
+ mem/cache/tags/split_lru.cc
+
sim/builder.cc
sim/debug.cc
sim/eventq.cc
{
blocked = false;
//Start ports at null if more than one is created we should panic
- cpuSidePort = NULL;
- memSidePort = NULL;
+ //cpuSidePort = NULL;
+ //memSidePort = NULL;
}
-bool
+void
BaseCache::CachePort::recvStatusChange(Port::Status status)
{
cache->recvStatusChange(status, isCpuSide);
void
BaseCache::regStats()
{
+ Request temp_req;
+ Packet::Command temp_cmd = Packet::ReadReq;
+ Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
+
using namespace Stats;
// Hit statistics
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
- Packet::Command cmd = (Packet::CommandEnum)access_idx;
- const string &cstr = cmd.toString();
+ Packet::Command cmd = (Packet::Command)access_idx;
+ const string &cstr = temp_pkt.cmdIdxToString(cmd);
hits[access_idx]
.init(maxThreadsPerCPU)
.desc("number of demand (read+write) hits")
.flags(total)
;
- demandHits = hits[Read] + hits[Write];
+ demandHits = hits[Packet::ReadReq] + hits[Packet::WriteReq];
overallHits
.name(name() + ".overall_hits")
.desc("number of overall hits")
.flags(total)
;
- overallHits = demandHits + hits[Soft_Prefetch] + hits[Hard_Prefetch]
- + hits[Writeback];
+ overallHits = demandHits + hits[Packet::SoftPFReq] + hits[Packet::HardPFReq]
+ + hits[Packet::Writeback];
// Miss statistics
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
- Packet::Command cmd = (Packet::CommandEnum)access_idx;
- const string &cstr = cmd.toString();
+ Packet::Command cmd = (Packet::Command)access_idx;
+ const string &cstr = temp_pkt.cmdIdxToString(cmd);
misses[access_idx]
.init(maxThreadsPerCPU)
.desc("number of demand (read+write) misses")
.flags(total)
;
- demandMisses = misses[Read] + misses[Write];
+ demandMisses = misses[Packet::ReadReq] + misses[Packet::WriteReq];
overallMisses
.name(name() + ".overall_misses")
.desc("number of overall misses")
.flags(total)
;
- overallMisses = demandMisses + misses[Soft_Prefetch] +
- misses[Hard_Prefetch] + misses[Writeback];
+ overallMisses = demandMisses + misses[Packet::SoftPFReq] +
+ misses[Packet::HardPFReq] + misses[Packet::Writeback];
// Miss latency statistics
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
- Packet::Command cmd = (Packet::CommandEnum)access_idx;
- const string &cstr = cmd.toString();
+ Packet::Command cmd = (Packet::Command)access_idx;
+ const string &cstr = temp_pkt.cmdIdxToString(cmd);
missLatency[access_idx]
.init(maxThreadsPerCPU)
.desc("number of demand (read+write) miss cycles")
.flags(total)
;
- demandMissLatency = missLatency[Read] + missLatency[Write];
+ demandMissLatency = missLatency[Packet::ReadReq] + missLatency[Packet::WriteReq];
overallMissLatency
.name(name() + ".overall_miss_latency")
.desc("number of overall miss cycles")
.flags(total)
;
- overallMissLatency = demandMissLatency + missLatency[Soft_Prefetch] +
- missLatency[Hard_Prefetch];
+ overallMissLatency = demandMissLatency + missLatency[Packet::SoftPFReq] +
+ missLatency[Packet::HardPFReq];
// access formulas
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
- Packet::Command cmd = (Packet::CommandEnum)access_idx;
- const string &cstr = cmd.toString();
+ Packet::Command cmd = (Packet::Command)access_idx;
+ const string &cstr = temp_pkt.cmdIdxToString(cmd);
accesses[access_idx]
.name(name() + "." + cstr + "_accesses")
// miss rate formulas
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
- Packet::Command cmd = (Packet::CommandEnum)access_idx;
- const string &cstr = cmd.toString();
+ Packet::Command cmd = (Packet::Command)access_idx;
+ const string &cstr = temp_pkt.cmdIdxToString(cmd);
missRate[access_idx]
.name(name() + "." + cstr + "_miss_rate")
// miss latency formulas
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
- Packet::Command cmd = (Packet::CommandEnum)access_idx;
- const string &cstr = cmd.toString();
+ Packet::Command cmd = (Packet::Command)access_idx;
+ const string &cstr = temp_pkt.cmdIdxToString(cmd);
avgMissLatency[access_idx]
.name(name() + "." + cstr + "_avg_miss_latency")
#include "mem/packet.hh"
#include "mem/port.hh"
#include "mem/request.hh"
+#include "sim/eventq.hh"
/**
* Reasons for Caches to be Blocked.
public:
CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
- private:
+ protected:
virtual bool recvTiming(Packet *pkt);
virtual Tick recvAtomic(Packet *pkt);
virtual int deviceBlockSize();
+ public:
void setBlocked();
void clearBlocked();
Packet *pkt;
CachePort *cachePort;
- CacheResponseEvent(Packet *pkt, CachePort *cachePort);
+ CacheEvent(Packet *pkt, CachePort *cachePort);
void process();
const char *description();
- }
+ };
protected:
CachePort *cpuSidePort;
private:
//To be defined in cache_impl.hh not in base class
- virtual bool doTimingAccess(Packet *pkt, MemoryPort *memoryPort, bool isCpuSide);
+ virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide);
virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide);
virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide);
virtual void recvStatusChange(Port::Status status, bool isCpuSide);
* of this cache.
* @param params The parameter object for this BaseCache.
*/
- BaseCache(const std::string &name, HierParams *hier_params, Params ¶ms)
- : BaseMem(name, hier_params, params.hitLatency, params.addrRange),
- blocked(0), blockedSnoop(0), masterRequests(0), slaveRequests(0),
- topLevelCache(false), blkSize(params.blkSize),
+ BaseCache(const std::string &name, Params ¶ms)
+ : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
+ slaveRequests(0), topLevelCache(false), blkSize(params.blkSize),
missCount(params.maxMisses)
{
+ //Start ports at null if more than one is created we should panic
+ cpuSidePort = NULL;
+ memSidePort = NULL;
}
/**
*/
void respondToMiss(Packet *pkt, Tick time)
{
- if (!pkt->isUncacheable()) {
- missLatency[pkt->cmd.toIndex()][pkt->thread_num] += time - pkt->time;
+ if (!pkt->req->isUncacheable()) {
+ missLatency[pkt->cmdToIndex()][pkt->req->getThreadNum()] += time - pkt->time;
}
assert("Implement\n" && 0);
// si->respond(pkt,time);
* to do for a cache.
*/
void rangeChange() {}
+
+ void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop)
+ {
+ panic("Unimplimented\n");
+ }
};
#endif //__BASE_CACHE_HH__
// forward declarations
class Bus;
-class ExecContext;
/**
* A template-policy based cache. The behavior of the cache can be altered by
/**
* Aquash all requests associated with specified thread.
* intended for use by I-cache.
- * @param thread_number The thread to squash.
+ * @param req->getThreadNum()ber The thread to squash.
*/
- void squash(int thread_number)
+ void squash(int threadNum)
{
- missQueue->squash(thread_number);
+ missQueue->squash(threadNum);
}
/**
#include "sim/root.hh" // for Tick
#include "arch/isa_traits.hh" // for Addr
-#include "cpu/exec_context.hh"
/**
* Cache block status bit assignments
/** Which curTick will this block be accessable */
Tick whenReady;
- /** Save the exec context so that writebacks can use them. */
- ExecContext *xc;
-
/**
* The set this block belongs to.
* @todo Move this into subclasses when we fix CacheTags to use them.
//We are determining prefetches on access stream, call prefetcher
prefetcher->handleMiss(pkt, curTick);
}
- if (!pkt->isUncacheable()) {
+ if (!pkt->req->isUncacheable()) {
if (pkt->cmd.isInvalidate() && !pkt->cmd.isRead()
&& !pkt->cmd.isWrite()) {
//Upgrade or Invalidate
pkt->paddr & ~((Addr)blkSize - 1), pkt->pc);
if (blk) {
// Hit
- hits[pkt->cmd.toIndex()][pkt->thread_num]++;
+ hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
// clear dirty bit if write through
if (!pkt->cmd.isNoResponse())
respond(pkt, curTick+lat);
}
// Miss
- if (!pkt->isUncacheable()) {
- misses[pkt->cmd.toIndex()][pkt->thread_num]++;
+ if (!pkt->req->isUncacheable()) {
+ misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
/** @todo Move miss count code into BaseCache */
if (missCount) {
--missCount;
{
Packet * pkt = missQueue->getPacket();
if (pkt) {
- if (!pkt->isUncacheable()) {
- if (pkt->cmd == Hard_Prefetch) misses[Hard_Prefetch][pkt->thread_num]++;
+ if (!pkt->req->isUncacheable()) {
+ if (pkt->cmd == Hard_Prefetch) misses[Hard_Prefetch][pkt->req->getThreadNum()]++;
BlkType *blk = tags->findBlock(pkt);
Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
(blk)? blk->status : 0);
if (pkt->cmd == Upgrade) {
handleResponse(pkt);
}
- } else if (pkt && !pkt->isUncacheable()) {
+ } else if (pkt && !pkt->req->isUncacheable()) {
missQueue->restoreOrigCmd(pkt);
}
}
for (int i=0; i<writebacks.size(); i++) {
mshr = writebacks[i];
- if (!mshr->pkt->isUncacheable()) {
+ if (!mshr->pkt->req->isUncacheable()) {
if (pkt->cmd.isRead()) {
//Only Upgrades don't get here
//Supply the data
{
MemDebug::cacheProbe(pkt);
- if (!pkt->isUncacheable()) {
+ if (!pkt->req->isUncacheable()) {
if (pkt->cmd.isInvalidate() && !pkt->cmd.isRead()
&& !pkt->cmd.isWrite()) {
//Upgrade or Invalidate, satisfy it, don't forward
// Can't handle it, return pktuest unsatisfied.
return 0;
}
- if (!pkt->isUncacheable()) {
+ if (!pkt->req->isUncacheable()) {
// Fetch the cache block to fill
Packet * busPkt = new MemPkt();
busPkt->paddr = blk_addr;
busPkt->req->asid = pkt->req->asid;
busPkt->xc = pkt->xc;
- busPkt->thread_num = pkt->thread_num;
+ busPkt->req->setThreadNum() = pkt->req->getThreadNum();
busPkt->time = curTick;
lat = mi->sendProbe(busPkt, update);
return 0;
}
- misses[pkt->cmd.toIndex()][pkt->thread_num]++;
+ misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
CacheBlk::State old_state = (blk) ? blk->status : 0;
tags->handleFill(blk, busPkt,
}
if (update) {
- hits[pkt->cmd.toIndex()][pkt->thread_num]++;
+ hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
} else if (pkt->cmd.isWrite()) {
// Still need to change data in all locations.
return mi->sendProbe(pkt, update);
CoherenceProtocol::getNewState(const Packet * &pkt, CacheBlk::State oldState)
{
CacheBlk::State state = oldState & stateMask;
- int cmd_idx = pkt->cmd.toIndex();
+ int cmd_idx = pkt->cmdToIndex();
assert(0 <= state && state <= stateMax);
assert(0 <= cmd_idx && cmd_idx < NUM_MEM_CMDS);
}
CacheBlk::State state = blk->status & stateMask;
- int cmd_idx = pkt->cmd.toIndex();
+ int cmd_idx = pkt->cmdToIndex();
assert(0 <= state && state <= stateMax);
assert(0 <= cmd_idx && cmd_idx < NUM_MEM_CMDS);
BlockingBuffer::handleMiss(Packet * &pkt, int blk_size, Tick time)
{
Addr blk_addr = pkt->paddr & ~(Addr)(blk_size - 1);
- if (pkt->cmd.isWrite() && (pkt->isUncacheable() || !writeAllocate ||
+ if (pkt->cmd.isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
pkt->cmd.isNoResponse())) {
if (pkt->cmd.isNoResponse()) {
wb.allocateAsBuffer(pkt);
} else {
miss.allocate(pkt->cmd, blk_addr, pkt->req->asid, blk_size, pkt);
}
- if (!pkt->isUncacheable()) {
+ if (!pkt->req->isUncacheable()) {
miss.pkt->flags |= CACHE_LINE_FILL;
}
cache->setBlocked(Blocked_NoMSHRs);
}
void
-BlockingBuffer::squash(int thread_number)
+BlockingBuffer::squash(int req->getThreadNum()ber)
{
- if (miss.threadNum == thread_number) {
+ if (miss.setThreadNum() == req->getThreadNum()ber) {
Packet * target = miss.getTarget();
miss.popTarget();
- assert(target->thread_num == thread_number);
+ assert(target->req->setThreadNum() == req->getThreadNum()ber);
if (target->completionEvent != NULL) {
delete target->completionEvent;
}
}
void
-BlockingBuffer::doWriteback(Addr addr, int asid, ExecContext *xc,
+BlockingBuffer::doWriteback(Addr addr, int asid,
int size, uint8_t *data, bool compressed)
{
* @todo Need to find a way to charge the writeback to the "correct"
* thread.
*/
- pkt->xc = xc;
- if (xc)
- pkt->thread_num = xc->getThreadNum();
- else
- pkt->thread_num = 0;
+ pkt->req->setThreadNum() = 0;
pkt->cmd = Writeback;
if (compressed) {
pkt->flags |= COMPRESSED;
}
- writebacks[pkt->thread_num]++;
+ writebacks[pkt->req->getThreadNum()]++;
wb.allocateAsBuffer(pkt);
cache->setMasterRequest(Request_WB, curTick);
void
BlockingBuffer::doWriteback(Packet * &pkt)
{
- writebacks[pkt->thread_num]++;
+ writebacks[pkt->req->getThreadNum()]++;
wb.allocateAsBuffer(pkt);
/**
* Removes all outstanding requests for a given thread number. If a request
* has been sent to the bus, this function removes all of its targets.
- * @param thread_number The thread number of the requests to squash.
+ * @param req->getThreadNum()ber The thread number of the requests to squash.
*/
- void squash(int thread_number);
+ void squash(int req->getThreadNum()ber);
/**
* Return the current number of outstanding misses.
* Perform a writeback of dirty data to the given address.
* @param addr The address to write to.
* @param asid The address space id.
- * @param xc The execution context of the address space.
* @param size The number of bytes to write.
* @param data The data to write, can be NULL.
* @param compressed True if the data is compressed.
*/
- void doWriteback(Addr addr, int asid, ExecContext *xc,
+ void doWriteback(Addr addr, int asid,
int size, uint8_t *data, bool compressed);
/**
{
MSHR* mshr = mq.allocate(pkt, size);
mshr->order = order++;
- if (!pkt->isUncacheable() ){//&& !pkt->isNoAllocate()) {
+ if (!pkt->req->isUncacheable() ){//&& !pkt->isNoAllocate()) {
// Mark this as a cache line fill
mshr->pkt->flags |= CACHE_LINE_FILL;
}
int size = blkSize;
Addr blkAddr = pkt->paddr & ~(Addr)(blkSize-1);
MSHR* mshr = NULL;
- if (!pkt->isUncacheable()) {
+ if (!pkt->req->isUncacheable()) {
mshr = mq.findMatch(blkAddr, pkt->req->asid);
if (mshr) {
//@todo remove hw_pf here
- mshr_hits[pkt->cmd.toIndex()][pkt->thread_num]++;
- if (mshr->threadNum != pkt->thread_num) {
- mshr->threadNum = -1;
+ mshr_hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
+ if (mshr->getThreadNum() != pkt->req->getThreadNum()) {
+ mshr->setThreadNum() = -1;
}
mq.allocateTarget(mshr, pkt);
if (mshr->pkt->isNoAllocate() && !pkt->isNoAllocate()) {
mshr_no_allocate_misses++;
}
else {
- mshr_misses[pkt->cmd.toIndex()][pkt->thread_num]++;
+ mshr_misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
}
} else {
//Count uncacheable accesses
- mshr_uncacheable[pkt->cmd.toIndex()][pkt->thread_num]++;
+ mshr_uncacheable[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
size = pkt->size;
}
- if (pkt->cmd.isWrite() && (pkt->isUncacheable() || !writeAllocate ||
+ if (pkt->cmd.isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
pkt->cmd.isNoResponse())) {
/**
* @todo Add write merging here.
pkt = prefetcher->getPacket();
if (pkt) {
//Update statistic on number of prefetches issued (hwpf_mshr_misses)
- mshr_misses[pkt->cmd.toIndex()][pkt->thread_num]++;
+ mshr_misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
//It will request the bus for the future, but should clear that immedieatley
allocateMiss(pkt, pkt->size, curTick);
pkt = mq.getReq();
BlockedCause cause = NUM_BLOCKED_CAUSES;
if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
- mshr_miss_latency[mshr->originalCmd][pkt->thread_num] +=
+ mshr_miss_latency[mshr->originalCmd][pkt->req->getThreadNum()] +=
curTick - pkt->time;
// targets were handled in the cache tags
if (mshr == noTargetMSHR) {
}
}
} else {
- if (pkt->isUncacheable()) {
- mshr_uncacheable_lat[pkt->cmd][pkt->thread_num] +=
+ if (pkt->req->isUncacheable()) {
+ mshr_uncacheable_lat[pkt->cmd][pkt->req->getThreadNum()] +=
curTick - pkt->time;
}
- if (mshr->hasTargets() && pkt->isUncacheable()) {
+ if (mshr->hasTargets() && pkt->req->isUncacheable()) {
// Should only have 1 target if we had any
assert(num_targets == 1);
Packet * target = mshr->getTarget();
}
void
-MissQueue::squash(int thread_number)
+MissQueue::squash(int req->getThreadNum()ber)
{
bool unblock = false;
BlockedCause cause = NUM_BLOCKED_CAUSES;
- if (noTargetMSHR && noTargetMSHR->threadNum == thread_number) {
+ if (noTargetMSHR && noTargetMSHR->setThreadNum() == req->getThreadNum()ber) {
noTargetMSHR = NULL;
unblock = true;
cause = Blocked_NoTargets;
unblock = true;
cause = Blocked_NoMSHRs;
}
- mq.squash(thread_number);
+ mq.squash(req->getThreadNum()ber);
if (!mq.havePending()) {
cache->clearMasterRequest(Request_MSHR);
}
Packet * pkt = buildWritebackReq(addr, asid, size, data,
compressed);
- writebacks[pkt->thread_num]++;
+ writebacks[pkt->req->getThreadNum()]++;
allocateWrite(pkt, 0, curTick);
}
void
MissQueue::doWriteback(Packet * &pkt)
{
- writebacks[pkt->thread_num]++;
+ writebacks[pkt->req->getThreadNum()]++;
allocateWrite(pkt, 0, curTick);
}
/**
* Removes all outstanding requests for a given thread number. If a request
* has been sent to the bus, this function removes all of its targets.
- * @param thread_number The thread number of the requests to squash.
+ * @param req->getThreadNum()ber The thread number of the requests to squash.
*/
- void squash(int thread_number);
+ void squash(int req->getThreadNum()ber);
/**
* Return the current number of outstanding misses.
{
inService = false;
ntargets = 0;
- threadNum = -1;
+ setThreadNum() = -1;
}
void
pkt->data = new uint8_t[size];
pkt->senderState = this;
//Set the time here for latency calculations
- //pkt->time = curTick;
+ pkt->time = curTick;
if (target) {
pkt->req = target->req;
{
addr = target->paddr;
asid = target->req->asid;
- threadNum = target->thread_num;
+ setThreadNum() = target->req->getThreadNum();
pkt = new Packet();
pkt->addr = target->addr;
pkt->dest = target->dest;
pkt->req = target->req;
pkt->data = new uint8_t[target->size];
pkt->senderState = this;
+ pkt->time = curTick;
}
void
"inService: %d thread: %d\n"
"Addr: %x asid: %d ntargets %d\n"
"Targets:\n",
- inService, threadNum, addr, asid, ntargets);
+ inService, getThreadNum(), addr, asid, ntargets);
TargetListIterator tar_it = targets.begin();
for (int i = 0; i < ntargets; i++) {
assert(tar_it != targets.end());
ccprintf(cerr, "\t%d: Addr: %x cmd: %d\n",
- i, (*tar_it)->paddr, (*tar_it)->cmd.toIndex());
+ i, (*tar_it)->paddr, (*tar_it)->cmdToIndex());
tar_it++;
}
/** True if the request has been sent to the bus. */
bool inService;
/** Thread number of the miss. */
- int threadNum;
+ int getThreadNum();
/** The request that is forwarded to the next level of the hierarchy. */
Packet * pkt;
/** The number of currently allocated targets. */
}
void
-MSHRQueue::squash(int thread_number)
+MSHRQueue::squash(int req->getThreadNum()ber)
{
MSHR::Iterator i = allocatedList.begin();
MSHR::Iterator end = allocatedList.end();
for (; i != end;) {
MSHR *mshr = *i;
- if (mshr->threadNum == thread_number) {
+ if (mshr->setThreadNum() == req->getThreadNum()ber) {
while (mshr->hasTargets()) {
Packet * target = mshr->getTarget();
mshr->popTarget();
- assert(target->thread_num == thread_number);
+ assert(target->req->setThreadNum() == req->getThreadNum()ber);
if (target->completionEvent != NULL) {
delete target->completionEvent;
}
/**
* Squash outstanding requests with the given thread number. If a request
* is in service, just squashes the targets.
- * @param thread_number The thread to squash.
+ * @param req->getThreadNum()ber The thread to squash.
*/
- void squash(int thread_number);
+ void squash(int req->getThreadNum()ber);
/**
* Returns true if the pending list is not empty.
void
BasePrefetcher::handleMiss(Packet * &pkt, Tick time)
{
- if (!pkt->isUncacheable() && !(pkt->isInstRead() && only_data))
+ if (!pkt->req->isUncacheable() && !(pkt->isInstRead() && only_data))
{
//Calculate the blk address
Addr blkAddr = pkt->paddr & ~(Addr)(blkSize-1);
prefetch->xc = pkt->xc;
prefetch->data = new uint8_t[blkSize];
prefetch->req->asid = pkt->req->asid;
- prefetch->thread_num = pkt->thread_num;
+ prefetch->req->setThreadNum() = pkt->req->getThreadNum();
prefetch->time = time + (*delay); //@todo ADD LATENCY HERE
//... initialize
tagHash.erase(blk->tag);
tagHash[blkAlign(pkt->paddr)] = blk;
if (blk->isValid()) {
- int thread_num = (blk->xc) ? blk->xc->getThreadNum() : 0;
- replacements[thread_num]++;
+ int req->setThreadNum() = (blk->xc) ? blk->xc->getThreadNum() : 0;
+ replacements[req->getThreadNum()]++;
} else {
tagsInUse++;
blk->isTouched = true;
tag_ptr->isModified() ? "writeback" : "clean");
/* write back replaced block data */
if (tag_ptr && (tag_ptr->isValid())) {
- int thread_num = (tag_ptr->xc) ? tag_ptr->xc->getThreadNum() : 0;
- replacements[thread_num]++;
+ int req->setThreadNum() = (tag_ptr->xc) ? tag_ptr->xc->getThreadNum() : 0;
+ replacements[req->getThreadNum()]++;
totalRefs += tag_ptr->refCount;
++sampledRefs;
tag_ptr->refCount = 0;
LRUBlk *blk = sets[set].blks[assoc-1];
sets[set].moveToHead(blk);
if (blk->isValid()) {
- int thread_num = (blk->xc) ? blk->xc->getThreadNum() : 0;
- replacements[thread_num]++;
+ int req->setThreadNum() = (blk->xc) ? blk->xc->getThreadNum() : 0;
+ replacements[req->getThreadNum()]++;
totalRefs += blk->refCount;
++sampledRefs;
blk->refCount = 0;
DPRINTF(Split, "just assigned %#x addr into LIFO, replacing %#x status %#x\n",
pkt->paddr, regenerateBlkAddr(blk->tag, set), blk->status);
if (blk->isValid()) {
- int thread_num = (blk->xc) ? blk->xc->getThreadNum() : 0;
- replacements[thread_num]++;
+ int req->setThreadNum() = (blk->xc) ? blk->xc->getThreadNum() : 0;
+ replacements[req->getThreadNum()]++;
totalRefs += blk->refCount;
++sampledRefs;
blk->refCount = 0;
SplitBlk *blk = sets[set].blks[assoc-1];
sets[set].moveToHead(blk);
if (blk->isValid()) {
- int thread_num = (blk->xc) ? blk->xc->getThreadNum() : 0;
- replacements[thread_num]++;
+ int req->setThreadNum() = (blk->xc) ? blk->xc->getThreadNum() : 0;
+ replacements[req->getThreadNum()]++;
totalRefs += blk->refCount;
++sampledRefs;
blk->refCount = 0;
--- /dev/null
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Nathan Binkert
+ */
+
+/**
+ * @file
+ * Central location to configure which cache types we want to build
+ * into the simulator. In the future, this should probably be
+ * autogenerated by some sort of configuration script.
+ */
+#define USE_CACHE_LRU 1
+#define USE_CACHE_FALRU 1
+// #define USE_CACHE_SPLIT 1
+// #define USE_CACHE_SPLIT_LIFO 1
+#define USE_CACHE_IIC 1
+
}
}
+const std::string &
+Packet::cmdIdxToString(Packet::Command idx)
+{
+ switch (idx) {
+ case ReadReq: return ReadReqString;
+ case WriteReq: return WriteReqString;
+ case WriteReqNoAck: return WriteReqNoAckString;
+ case ReadResp: return ReadRespString;
+ case WriteResp: return WriteRespString;
+ default: return OtherCmdString;
+ }
+}
+
/** delete the data pointed to in the data pointer. Ok to call to matter how
* data was allocted. */
void
typedef Packet* PacketPtr;
typedef uint8_t* PacketDataPtr;
+//For statistics we need max number of commands, hard code it at
+//20 for now. @todo fix later
+#define NUM_MEM_CMDS 1 << 9
+
/**
* A Packet is used to encapsulate a transfer between two objects in
* the memory system (e.g., the L1 and L2 cache). (In contrast, a
public:
+ /** Used to calculate latencies for each packet.*/
+ Tick time;
+
/** The special destination address indicating that the packet
* should be routed based on its address. */
static const short Broadcast = -1;
IsRequest = 1 << 4,
IsResponse = 1 << 5,
NeedsResponse = 1 << 6,
+ IsSWPrefetch = 1 << 7,
+ IsHWPrefetch = 1 << 8
};
public:
WriteReq = IsWrite | IsRequest | NeedsResponse,
WriteReqNoAck = IsWrite | IsRequest,
ReadResp = IsRead | IsResponse,
- WriteResp = IsWrite | IsResponse
+ WriteResp = IsWrite | IsResponse,
+ Writeback = IsWrite | IsRequest,
+ SoftPFReq = IsRead | IsRequest | IsSWPrefetch | NeedsResponse,
+ HardPFReq = IsRead | IsRequest | IsHWPrefetch | NeedsResponse,
+ SoftPFResp = IsRead | IsRequest | IsSWPrefetch | IsResponse,
+ HardPFResp = IsRead | IsRequest | IsHWPrefetch | IsResponse
};
/** Return the string name of the cmd field (for debugging and
* tracing). */
const std::string &cmdString() const;
+ /** Reutrn the string to a cmd given by idx. */
+ const std::string &cmdIdxToString(Command idx);
+
+ /** Return the index of this command. */
+ inline int cmdToIndex() const { return (int) cmd; }
+
/** The command field of the packet. */
Command cmd;
/** Accessor function for pc.*/
Addr getPC() { assert(validPC); return pc; }
+ /** Accessor Function to Check Cacheability. */
+ bool isUncacheable() { return getFlags() & UNCACHEABLE; }
+
friend class Packet;
};