return;
}
case MISCREG_TLBI_VMALLS12E1:
- // @todo: handle VMID and stage 2 to enable Virtualization
{
assert64();
scr = readMiscReg(MISCREG_SCR);
- TLBIALL tlbiOp(EL1, haveSecurity && !scr.ns);
+ TLBIVMALL tlbiOp(EL1, haveSecurity && !scr.ns, true);
tlbiOp(tc);
return;
}
case MISCREG_TLBI_VMALLE1:
- // @todo: handle VMID and stage 2 to enable Virtualization
{
assert64();
scr = readMiscReg(MISCREG_SCR);
HCR hcr = readMiscReg(MISCREG_HCR_EL2);
bool is_host = (hcr.tge && hcr.e2h);
ExceptionLevel target_el = is_host ? EL2 : EL1;
- TLBIALL tlbiOp(target_el, haveSecurity && !scr.ns);
+ TLBIVMALL tlbiOp(target_el, haveSecurity && !scr.ns, false);
tlbiOp(tc);
return;
}
case MISCREG_TLBI_VMALLS12E1IS:
- // @todo: handle VMID and stage 2 to enable Virtualization
{
assert64();
scr = readMiscReg(MISCREG_SCR);
- TLBIALL tlbiOp(EL1, haveSecurity && !scr.ns);
+ TLBIVMALL tlbiOp(EL1, haveSecurity && !scr.ns, true);
tlbiOp.broadcast(tc);
return;
}
case MISCREG_TLBI_VMALLE1IS:
- // @todo: handle VMID and stage 2 to enable Virtualization
{
assert64();
scr = readMiscReg(MISCREG_SCR);
HCR hcr = readMiscReg(MISCREG_HCR_EL2);
bool is_host = (hcr.tge && hcr.e2h);
ExceptionLevel target_el = is_host ? EL2 : EL1;
- TLBIALL tlbiOp(target_el, haveSecurity && !scr.ns);
+ TLBIVMALL tlbiOp(target_el, haveSecurity && !scr.ns, false);
tlbiOp.broadcast(tc);
return;
}
}
}
+void
+TLB::flush(const TLBIVMALL &tlbi_op)
+{
+ DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
+ (tlbi_op.secureLookup ? "secure" : "non-secure"));
+ int x = 0;
+ TlbEntry *te;
+ while (x < size) {
+ te = &table[x];
+ const bool el_match = te->checkELMatch(
+ tlbi_op.targetEL, tlbi_op.inHost);
+ if (te->valid && tlbi_op.secureLookup == !te->nstid &&
+ (te->vmid == vmid || !tlbi_op.el2Enabled) && el_match) {
+
+ DPRINTF(TLB, " - %s\n", te->print());
+ te->valid = false;
+ stats.flushedEntries++;
+ }
+ ++x;
+ }
+
+ stats.flushTlb++;
+
+ // If there's a second stage TLB (and we're not it) then flush it as well
+ // if we're currently in hyp mode
+ if (!isStage2 && tlbi_op.stage2) {
+ stage2Tlb->flush(tlbi_op.makeStage2());
+ }
+}
+
void
TLB::flush(const TLBIALLN &tlbi_op)
{
class TLBIALL;
class TLBIALLEL;
+class TLBIVMALL;
class TLBIALLN;
class TLBIMVA;
class TLBIASID;
*/
void flush(const TLBIALLEL &tlbi_op);
+ /** Implementaton of AArch64 TLBI VMALLE1(IS)/VMALLS112E1(IS)
+ * instructions
+ */
+ void flush(const TLBIVMALL &tlbi_op);
+
/** Remove all entries in the non secure world, depending on whether they
* were allocated in hyp mode or not
*/
}
}
+void
+TLBIVMALL::operator()(ThreadContext* tc)
+{
+ HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
+ inHost = (hcr.tge == 1 && hcr.e2h == 1);
+
+ getMMUPtr(tc)->flush(*this);
+
+ // If CheckerCPU is connected, need to notify it of a flush
+ CheckerCPU *checker = tc->getCheckerCpuPtr();
+ if (checker) {
+ getMMUPtr(checker)->flush(*this);
+ }
+}
+
void
TLBIASID::operator()(ThreadContext* tc)
{
bool inHost;
};
+/** Implementaton of AArch64 TLBI VMALLE1(IS)/VMALLS112E1(IS) instructions */
+class TLBIVMALL : public TLBIOp
+{
+ public:
+ TLBIVMALL(ExceptionLevel _targetEL, bool _secure, bool _stage2)
+ : TLBIOp(_targetEL, _secure), inHost(false), el2Enabled(false),
+ stage2(_stage2)
+ {}
+
+ void operator()(ThreadContext* tc) override;
+
+ TLBIVMALL
+ makeStage2() const
+ {
+ return TLBIVMALL(EL1, secureLookup, false);
+ }
+
+ bool inHost;
+ bool el2Enabled;
+ bool stage2;
+};
+
/** TLB Invalidate by ASID match */
class TLBIASID : public TLBIOp
{