// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
-#include <string>
-#include <sstream>
#include <iomanip>
+#include <set>
+#include <sstream>
+#include <string>
#include <vector>
#include "base/misc.hh"
using namespace std;
+int TESTER_ALLOCATOR=0;
+
MemTest::MemTest(const string &name,
MemInterface *_cache_interface,
FunctionalMemory *main_mem,
noResponseCycles = 0;
numReads = 0;
tickEvent.schedule(0);
+
+ id = TESTER_ALLOCATOR++;
}
static void
void
MemTest::completeRequest(MemReqPtr &req, uint8_t *data)
{
+ //Remove the address from the list of outstanding
+ std::set<unsigned>::iterator removeAddr = outstandingAddrs.find(req->paddr);
+ assert(removeAddr != outstandingAddrs.end());
+ outstandingAddrs.erase(removeAddr);
+
switch (req->cmd) {
case Read:
if (memcmp(req->data, data, req->size) != 0) {
break;
case Copy:
+ //Also remove dest from outstanding list
+ removeAddr = outstandingAddrs.find(req->dest);
+ assert(removeAddr != outstandingAddrs.end());
+ outstandingAddrs.erase(removeAddr);
numCopiesStat++;
break;
if (!tickEvent.scheduled())
tickEvent.schedule(curTick + 1);
- if (++noResponseCycles >= 5000) {
+ if (++noResponseCycles >= 500000) {
cerr << name() << ": deadlocked at cycle " << curTick << endl;
fatal("");
}
unsigned source_align = rand() % 100;
unsigned dest_align = rand() % 100;
+ //If we aren't doing copies, use id as offset, and do a false sharing
+ //mem tester
+ if (percentCopies == 0) {
+ //We can eliminate the lower bits of the offset, and then use the id
+ //to offset within the blks
+ offset1 &= ~63; //Not the low order bits
+ offset1 += id;
+ access_size = 0;
+ }
+
MemReqPtr req = new MemReq();
if (cacheable < percentUncacheable) {
if (cmd < percentReads) {
// read
+
+ //For now we only allow one outstanding request per addreess per tester
+ //This means we assume CPU does write forwarding to reads that alias something
+ //in the cpu store buffer.
+ if (outstandingAddrs.find(req->paddr) != outstandingAddrs.end()) return;
+ else outstandingAddrs.insert(req->paddr);
+
req->cmd = Read;
uint8_t *result = new uint8_t[8];
checkMem->access(Read, req->paddr, result, req->size);
}
} else if (cmd < (100 - percentCopies)){
// write
+
+ //For now we only allow one outstanding request per addreess per tester
+ //This means we assume CPU does write forwarding to reads that alias something
+ //in the cpu store buffer.
+ if (outstandingAddrs.find(req->paddr) != outstandingAddrs.end()) return;
+ else outstandingAddrs.insert(req->paddr);
+
req->cmd = Write;
memcpy(req->data, &data, req->size);
checkMem->access(Write, req->paddr, req->data, req->size);
// copy
Addr source = ((base) ? baseAddr1 : baseAddr2) + offset1;
Addr dest = ((base) ? baseAddr2 : baseAddr1) + offset2;
+ if (outstandingAddrs.find(source) != outstandingAddrs.end()) return;
+ else outstandingAddrs.insert(source);
+ if (outstandingAddrs.find(dest) != outstandingAddrs.end()) return;
+ else outstandingAddrs.insert(dest);
+
if (source_align >= percentSourceUnaligned) {
source = blockAddr(source);
}
#ifndef __MEMTEST_HH__
#define __MEMTEST_HH__
-#include "sim/sim_object.hh"
-#include "mem/mem_interface.hh"
-#include "mem/functional_mem/functional_memory.hh"
-#include "cpu/base_cpu.hh"
-#include "cpu/exec_context.hh"
+#include <set>
#include "base/statistics.hh"
+#include "cpu/base_cpu.hh"
+#include "cpu/exec_context.hh"
+#include "mem/functional_mem/functional_memory.hh"
+#include "mem/mem_interface.hh"
+#include "sim/sim_object.hh"
#include "sim/stats.hh"
class MemTest : public BaseCPU
unsigned percentCopies; // target percentage of copy accesses
unsigned percentUncacheable;
+ int id;
+
+ std::set<unsigned> outstandingAddrs;
+
unsigned blockSize;
Addr blockAddrMask;
Fault
SimpleCPU::read(Addr addr, T &data, unsigned flags)
{
+ if (status() == DcacheMissStall) {
+ Fault fault = xc->read(memReq,data);
+
+ if (traceData) {
+ traceData->setAddr(addr);
+ if (fault == No_Fault)
+ traceData->setData(data);
+ }
+ return fault;
+ }
+
memReq->reset(addr, sizeof(T), flags);
// translate to physical address
Fault fault = xc->translateDataReadReq(memReq);
- // do functional access
- if (fault == No_Fault)
- fault = xc->read(memReq, data);
-
- if (traceData) {
- traceData->setAddr(addr);
- if (fault == No_Fault)
- traceData->setData(data);
- }
-
// if we have a cache, do cache access too
if (fault == No_Fault && dcacheInterface) {
memReq->cmd = Read;
lastDcacheStall = curTick;
unscheduleTickEvent();
_status = DcacheMissStall;
+ } else {
+ // do functional access
+ fault = xc->read(memReq, data);
+
+ if (traceData) {
+ traceData->setAddr(addr);
+ if (fault == No_Fault)
+ traceData->setData(data);
+ }
+ }
+ } else if(fault == No_Fault) {
+ // do functional access
+ fault = xc->read(memReq, data);
+
+ if (traceData) {
+ traceData->setAddr(addr);
+ if (fault == No_Fault)
+ traceData->setData(data);
}
}
scheduleTickEvent(1);
break;
case DcacheMissStall:
+ if (memReq->cmd.isRead()) {
+ curStaticInst->execute(this,traceData);
+ }
dcacheStallCycles += curTick - lastDcacheStall;
_status = Running;
scheduleTickEvent(1);
comInstEventQueue[0]->serviceEvents(numInst);
// decode the instruction
- inst = htoa(inst);
- StaticInstPtr<TheISA> si(inst);
+ inst = htoa(inst);
+ curStaticInst = StaticInst<TheISA>::decode(inst);
- traceData = Trace::getInstRecord(curTick, xc, this, si,
+ traceData = Trace::getInstRecord(curTick, xc, this, curStaticInst,
xc->regs.pc);
#ifdef FULL_SYSTEM
xc->func_exe_inst++;
- fault = si->execute(this, traceData);
+ fault = curStaticInst->execute(this, traceData);
#ifdef FULL_SYSTEM
if (xc->fnbin)
- xc->execute(si.get());
+ xc->execute(curStaticInst.get());
#endif
- if (si->isMemRef()) {
+ if (curStaticInst->isMemRef()) {
numMemRefs++;
}
- if (si->isLoad()) {
+ if (curStaticInst->isLoad()) {
++numLoad;
comLoadEventQueue[0]->serviceEvents(numLoad);
}
// Refcounted pointer to the one memory request.
MemReqPtr memReq;
+ StaticInstPtr<TheISA> curStaticInst;
+
class CacheCompletionEvent : public Event
{
private: