Still a bug in atomic uni-coherence in FS.
src/cpu/o3/fetch_impl.hh:
src/cpu/o3/lsq_impl.hh:
src/cpu/simple/atomic.cc:
src/cpu/simple/timing.cc:
Make CPU models handle coherence requests
src/mem/cache/base_cache.cc:
Properly signal coherence CSHRs
src/mem/cache/coherence/uni_coherence.cc:
Only deallocate once
--HG--
extra : convert_revision :
c4533de421c371c5532ee505e3ecd451511f5c99
bool
DefaultFetch<Impl>::IcachePort::recvTiming(Packet *pkt)
{
- fetch->processCacheCompletion(pkt);
+ if (pkt->isResponse()) {
+ fetch->processCacheCompletion(pkt);
+ }
+ //else Snooped a coherence request, just return
return true;
}
bool
LSQ<Impl>::DcachePort::recvTiming(PacketPtr pkt)
{
- lsq->thread[pkt->req->getThreadNum()].completeDataAccess(pkt);
+ if (pkt->isResponse()) {
+ lsq->thread[pkt->req->getThreadNum()].completeDataAccess(pkt);
+ }
+ else {
+ //else it is a coherence request, maybe you need to do something
+ warn("Recieved a coherence request (Invalidate??), 03CPU doesn't"
+ "update LSQ for these\n");
+ }
return true;
}
Tick
AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
{
- panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
+ //Snooping a coherence request, just return
return curTick;
}
bool
TimingSimpleCPU::IcachePort::recvTiming(Packet *pkt)
{
- // delay processing of returned data until next CPU clock edge
- Tick time = pkt->req->getTime();
- while (time < curTick)
- time += lat;
-
- if (time == curTick)
- cpu->completeIfetch(pkt);
- else
- tickEvent.schedule(pkt, time);
-
- return true;
+ if (pkt->isResponse()) {
+ // delay processing of returned data until next CPU clock edge
+ Tick time = pkt->req->getTime();
+ while (time < curTick)
+ time += lat;
+
+ if (time == curTick)
+ cpu->completeIfetch(pkt);
+ else
+ tickEvent.schedule(pkt, time);
+
+ return true;
+ }
+ else {
+ //Snooping a Coherence Request, do nothing
+ return true;
+ }
}
void
bool
TimingSimpleCPU::DcachePort::recvTiming(Packet *pkt)
{
- // delay processing of returned data until next CPU clock edge
- Tick time = pkt->req->getTime();
- while (time < curTick)
- time += lat;
+ if (pkt->isResponse()) {
+ // delay processing of returned data until next CPU clock edge
+ Tick time = pkt->req->getTime();
+ while (time < curTick)
+ time += lat;
- if (time == curTick)
- cpu->completeDataAccess(pkt);
- else
- tickEvent.schedule(pkt, time);
+ if (time == curTick)
+ cpu->completeDataAccess(pkt);
+ else
+ tickEvent.schedule(pkt, time);
- return true;
+ return true;
+ }
+ else {
+ //Snooping a coherence req, do nothing
+ return true;
+ }
}
void
pkt = cachePort->cache->getCoherencePacket();
MSHR* cshr = (MSHR*) pkt->senderState;
bool success = cachePort->sendTiming(pkt);
- cachePort->cache->sendResult(pkt, cshr, success);
+ cachePort->cache->sendCoherenceResult(pkt, cshr, success);
cachePort->waitingOnRetry = !success;
if (cachePort->waitingOnRetry)
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
if (success)
{
bool unblock = cshrs.isFull();
- cshrs.markInService(cshr);
+// cshrs.markInService(cshr);
+ cshrs.deallocate(cshr);
if (!cshrs.havePending()) {
cache->clearSlaveRequest(Request_Coherence);
}
- cshrs.deallocate(cshr);
if (unblock) {
//since CSHRs are always used as buffers, should always get rid of one
assert(!cshrs.isFull());