//if (decoder.needMoreBytes())
//{
icache_access = true;
- Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq);
- ifetch_pkt.dataStatic(&inst);
-
- icache_latency = sendPacket(icachePort, &ifetch_pkt);
-
- assert(!ifetch_pkt.isError());
-
- // ifetch_req is initialized to read the instruction
- // directly into the CPU object's inst field.
+ icache_latency = fetchInstMem();
//}
}
reschedule(tickEvent, curTick() + latency, true);
}
+Tick
+AtomicSimpleCPU::fetchInstMem()
+{
+ Packet pkt = Packet(ifetch_req, MemCmd::ReadReq);
+
+ // ifetch_req is initialized to read the instruction
+ // directly into the CPU object's inst field.
+ pkt.dataStatic(&inst);
+
+ Tick latency = sendPacket(icachePort, &pkt);
+ assert(!pkt.isError());
+
+ return latency;
+}
+
void
AtomicSimpleCPU::regProbePoints()
{
void init() override;
protected:
-
EventFunctionWrapper tickEvent;
const int width;
bool tryCompleteDrain();
virtual Tick sendPacket(RequestPort &port, const PacketPtr &pkt);
+ virtual Tick fetchInstMem();
/**
* An AtomicCPUPort overrides the default behaviour of the
Tick
NonCachingSimpleCPU::sendPacket(RequestPort &port, const PacketPtr &pkt)
{
- if (system->isMemAddr(pkt->getAddr())) {
- system->getPhysMem().access(pkt);
- return 0;
- } else {
- return port.sendAtomic(pkt);
+ MemBackdoorPtr bd = nullptr;
+ Tick latency = port.sendAtomicBackdoor(pkt, bd);
+
+ // If the target gave us a backdoor for next time and we didn't
+ // already have it, record it.
+ if (bd && memBackdoors.insert(bd->range(), bd) != memBackdoors.end()) {
+ // Install a callback to erase this backdoor if it goes away.
+ auto callback = [this](const MemBackdoor &backdoor) {
+ for (auto it = memBackdoors.begin();
+ it != memBackdoors.end(); it++) {
+ if (it->second == &backdoor) {
+ memBackdoors.erase(it);
+ return;
+ }
+ }
+ panic("Got invalidation for unknown memory backdoor.");
+ };
+ bd->addInvalidationCallback(callback);
}
+ return latency;
+}
+
+Tick
+NonCachingSimpleCPU::fetchInstMem()
+{
+ auto bd_it = memBackdoors.contains(ifetch_req->getPaddr());
+ if (bd_it == memBackdoors.end())
+ return AtomicSimpleCPU::fetchInstMem();
+
+ auto *bd = bd_it->second;
+ Addr offset = ifetch_req->getPaddr() - bd->range().start();
+ memcpy(&inst, bd->ptr() + offset, ifetch_req->getSize());
+ return 0;
}
#ifndef __CPU_SIMPLE_NONCACHING_HH__
#define __CPU_SIMPLE_NONCACHING_HH__
+#include "base/addr_range_map.hh"
#include "cpu/simple/atomic.hh"
+#include "mem/backdoor.hh"
#include "params/NonCachingSimpleCPU.hh"
/**
void verifyMemoryMode() const override;
protected:
+ AddrRangeMap<MemBackdoorPtr, 1> memBackdoors;
+
Tick sendPacket(RequestPort &port, const PacketPtr &pkt) override;
+ Tick fetchInstMem() override;
};
#endif // __CPU_SIMPLE_NONCACHING_HH__