// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
-#include <string>
-#include <sstream>
#include <iomanip>
+#include <set>
+#include <sstream>
+#include <string>
#include <vector>
#include "base/misc.hh"
using namespace std;
+int TESTER_ALLOCATOR=0;
+
MemTest::MemTest(const string &name,
MemInterface *_cache_interface,
FunctionalMemory *main_mem,
noResponseCycles = 0;
numReads = 0;
tickEvent.schedule(0);
+
+ id = TESTER_ALLOCATOR++;
}
static void
void
MemTest::completeRequest(MemReqPtr &req, uint8_t *data)
{
+ //Remove the address from the list of outstanding
+ std::set<unsigned>::iterator removeAddr = outstandingAddrs.find(req->paddr);
+ assert(removeAddr != outstandingAddrs.end());
+ outstandingAddrs.erase(removeAddr);
+
switch (req->cmd) {
case Read:
if (memcmp(req->data, data, req->size) != 0) {
break;
case Copy:
+ //Also remove dest from outstanding list
+ removeAddr = outstandingAddrs.find(req->dest);
+ assert(removeAddr != outstandingAddrs.end());
+ outstandingAddrs.erase(removeAddr);
numCopiesStat++;
break;
if (!tickEvent.scheduled())
tickEvent.schedule(curTick + 1);
- if (++noResponseCycles >= 5000) {
+ if (++noResponseCycles >= 500000) {
cerr << name() << ": deadlocked at cycle " << curTick << endl;
fatal("");
}
unsigned source_align = rand() % 100;
unsigned dest_align = rand() % 100;
+ //If we aren't doing copies, use id as offset, and do a false sharing
+ //mem tester
+ if (percentCopies == 0) {
+ //We can eliminate the lower bits of the offset, and then use the id
+ //to offset within the blks
+ offset1 &= ~63; //Not the low order bits
+ offset1 += id;
+ access_size = 0;
+ }
+
MemReqPtr req = new MemReq();
if (cacheable < percentUncacheable) {
if (cmd < percentReads) {
// read
+
+ //For now we only allow one outstanding request per addreess per tester
+ //This means we assume CPU does write forwarding to reads that alias something
+ //in the cpu store buffer.
+ if (outstandingAddrs.find(req->paddr) != outstandingAddrs.end()) return;
+ else outstandingAddrs.insert(req->paddr);
+
req->cmd = Read;
uint8_t *result = new uint8_t[8];
checkMem->access(Read, req->paddr, result, req->size);
}
} else if (cmd < (100 - percentCopies)){
// write
+
+ //For now we only allow one outstanding request per addreess per tester
+ //This means we assume CPU does write forwarding to reads that alias something
+ //in the cpu store buffer.
+ if (outstandingAddrs.find(req->paddr) != outstandingAddrs.end()) return;
+ else outstandingAddrs.insert(req->paddr);
+
req->cmd = Write;
memcpy(req->data, &data, req->size);
checkMem->access(Write, req->paddr, req->data, req->size);
// copy
Addr source = ((base) ? baseAddr1 : baseAddr2) + offset1;
Addr dest = ((base) ? baseAddr2 : baseAddr1) + offset2;
+ if (outstandingAddrs.find(source) != outstandingAddrs.end()) return;
+ else outstandingAddrs.insert(source);
+ if (outstandingAddrs.find(dest) != outstandingAddrs.end()) return;
+ else outstandingAddrs.insert(dest);
+
if (source_align >= percentSourceUnaligned) {
source = blockAddr(source);
}
#ifndef __MEMTEST_HH__
#define __MEMTEST_HH__
-#include "sim/sim_object.hh"
-#include "mem/mem_interface.hh"
-#include "mem/functional_mem/functional_memory.hh"
-#include "cpu/base_cpu.hh"
-#include "cpu/exec_context.hh"
+#include <set>
#include "base/statistics.hh"
+#include "cpu/base_cpu.hh"
+#include "cpu/exec_context.hh"
+#include "mem/functional_mem/functional_memory.hh"
+#include "mem/mem_interface.hh"
+#include "sim/sim_object.hh"
#include "sim/stats.hh"
class MemTest : public BaseCPU
unsigned percentCopies; // target percentage of copy accesses
unsigned percentUncacheable;
+ int id;
+
+ std::set<unsigned> outstandingAddrs;
+
unsigned blockSize;
Addr blockAddrMask;