return NoFault;
}
+Fault
+TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+{
+ return NoFault;
+}
+
} // namespace AlphaISA
AlphaISA::TLB *
* translateFunctional stub function for future CheckerCPU support
*/
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode);
+ Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const;
};
} // namespace AlphaISA
return true;
}
+Fault
+TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+{
+ return NoFault;
+}
+
TlbEntry*
TLB::lookup(Addr va, uint8_t cid, bool functional)
{
Fault translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode);
Fault translateTiming(RequestPtr req, ThreadContext *tc,
Translation *translation, Mode mode);
+ Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const;
void drainResume();
return NoFault;
}
+Fault
+TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+{
+ return NoFault;
+}
+
MipsISA::PTE &
TLB::index(bool advance)
* support the Checker model at the moment.
*/
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode);
+ Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const;
private:
Fault translateInst(RequestPtr req, ThreadContext *tc);
return NoFault;
}
+Fault
+TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+{
+ return NoFault;
+}
+
PowerISA::PTE &
TLB::index(bool advance)
{
* supported by Checker at the moment
*/
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode);
+ Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const;
// Checkpointing
void serialize(std::ostream &os);
return NoFault;
}
+Fault
+TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+{
+ return NoFault;
+}
+
Cycles
TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
{
* does not support the Checker model at the moment
*/
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode);
+ Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const;
Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt);
Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt);
void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs);
}
}
+Fault
+TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+{
+ Addr paddr = req->getPaddr();
+
+ // Check for an access to the local APIC
+ if (FullSystem) {
+ LocalApicBase localApicBase =
+ tc->readMiscRegNoEffect(MISCREG_APIC_BASE);
+ AddrRange apicRange(localApicBase.base * PageBytes,
+ (localApicBase.base + 1) * PageBytes - 1);
+
+ if (apicRange.contains(paddr)) {
+ // The Intel developer's manuals say the below restrictions apply,
+ // but the linux kernel, because of a compiler optimization, breaks
+ // them.
+ /*
+ // Check alignment
+ if (paddr & ((32/8) - 1))
+ return new GeneralProtection(0);
+ // Check access size
+ if (req->getSize() != (32/8))
+ return new GeneralProtection(0);
+ */
+ // Force the access to be uncacheable.
+ req->setFlags(Request::UNCACHEABLE);
+ req->setPaddr(x86LocalAPICAddress(tc->contextId(),
+ paddr - apicRange.start()));
+ }
+ }
+
+ return NoFault;
+}
+
Fault
TLB::translate(RequestPtr req, ThreadContext *tc, Translation *translation,
Mode mode, bool &delayedResponse, bool timing)
DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
req->setPaddr(vaddr);
}
- // Check for an access to the local APIC
- if (FullSystem) {
- LocalApicBase localApicBase =
- tc->readMiscRegNoEffect(MISCREG_APIC_BASE);
- Addr baseAddr = localApicBase.base * PageBytes;
- Addr paddr = req->getPaddr();
- if (baseAddr <= paddr && baseAddr + PageBytes > paddr) {
- // The Intel developer's manuals say the below restrictions apply,
- // but the linux kernel, because of a compiler optimization, breaks
- // them.
- /*
- // Check alignment
- if (paddr & ((32/8) - 1))
- return new GeneralProtection(0);
- // Check access size
- if (req->getSize() != (32/8))
- return new GeneralProtection(0);
- */
- // Force the access to be uncacheable.
- req->setFlags(Request::UNCACHEABLE);
- req->setPaddr(x86LocalAPICAddress(tc->contextId(),
- paddr - baseAddr));
- }
- }
- return NoFault;
+
+ return finalizePhysical(req, tc, mode);
}
Fault
*/
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode);
+ /**
+ * Do post-translation physical address finalization.
+ *
+ * Some addresses, for example requests going to the APIC,
+ * need post-translation updates. Such physical addresses are
+ * remapped into a "magic" part of the physical address space
+ * by this method.
+ *
+ * @param req Request to updated in-place.
+ * @param tc Thread context that created the request.
+ * @param mode Request type (read/write/execute).
+ * @return A fault on failure, NoFault otherwise.
+ */
+ Fault finalizePhysical(RequestPtr req, ThreadContext *tc,
+ Mode mode) const;
+
TlbEntry * insert(Addr vpn, TlbEntry &entry);
// Checkpointing
translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
}
+Fault
+GenericTLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+{
+ return NoFault;
+}
+
void
GenericTLB::demapPage(Addr vaddr, uint64_t asn)
{
Fault translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode);
void translateTiming(RequestPtr req, ThreadContext *tc,
Translation *translation, Mode mode);
+
+
+ /**
+ * Do post-translation physical address finalization.
+ *
+ * This method is used by some architectures that need
+ * post-translation massaging of physical addresses. For example,
+ * X86 uses this to remap physical addresses in the APIC range to
+ * a range of physical memory not normally available to real x86
+ * implementations.
+ *
+ * @param req Request to updated in-place.
+ * @param tc Thread context that created the request.
+ * @param mode Request type (read/write/execute).
+ * @return A fault on failure, NoFault otherwise.
+ */
+ Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const;
};
#endif // __ARCH_SPARC_TLB_HH__