# 4K | 64K | !16K | !BigEndEL0 | !SNSMem | !BigEnd | 8b ASID | 40b PA
id_aa64mmfr0_el1 = Param.UInt64(0x0000000000f00002,
"AArch64 Memory Model Feature Register 0")
- # HPDS
- id_aa64mmfr1_el1 = Param.UInt64(0x0000000000001000,
+ # PAN | HPDS
+ id_aa64mmfr1_el1 = Param.UInt64(0x0000000000101000,
"AArch64 Memory Model Feature Register 1")
id_aa64mmfr2_el1 = Param.UInt64(0x0000000000000000,
"AArch64 Memory Model Feature Register 2")
-# Copyright (c) 2009, 2012-2013, 2015-2018 ARM Limited
+# Copyright (c) 2009, 2012-2013, 2015-2019 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
"True if SVE is implemented (ARMv8)")
sve_vl = Param.SveVectorLength(1,
"SVE vector length in quadwords (128-bit)")
+ have_pan = Param.Bool(True,
+ "True if Priviledge Access Never is implemented (ARMv8.1)")
semihosting = Param.ArmSemihosting(NULL,
"Enable support for the Arm semihosting by settings this parameter")
/*
- * Copyright (c) 2010, 2012-2014, 2016-2018 ARM Limited
+ * Copyright (c) 2010, 2012-2014, 2016-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
if (fromEL > toEL)
toEL = fromEL;
+ // Check for Set Priviledge Access Never, if PAN is supported
+ AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
+ if (mmfr1.pan) {
+ if (toEL == EL1) {
+ const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
+ span = !sctlr.span;
+ }
+
+ const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2);
+ if (toEL == EL2 && hcr.e2h && hcr.tge) {
+ const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
+ span = !sctlr.span;
+ }
+ }
+
to64 = ELIs64(tc, toEL);
// The fault specific informations have been updated; it is
}
cpsr.it1 = cpsr.it2 = 0;
cpsr.j = 0;
+ cpsr.pan = span ? 1 : saved_cpsr.pan;
tc->setMiscReg(MISCREG_CPSR, cpsr);
// Make sure mailbox sets to one always
spsr.q = 0;
spsr.it1 = 0;
spsr.j = 0;
- spsr.res0_23_22 = 0;
spsr.ge = 0;
spsr.it2 = 0;
spsr.t = 0;
spsr.it2 = it.top6;
spsr.it1 = it.bottom2;
// Force some bitfields to 0
- spsr.res0_23_22 = 0;
spsr.ss = 0;
}
tc->setMiscReg(spsr_idx, spsr);
cpsr.daif = 0xf;
cpsr.il = 0;
cpsr.ss = 0;
+ cpsr.pan = span ? 1 : spsr.pan;
tc->setMiscReg(MISCREG_CPSR, cpsr);
// If we have a valid instruction then use it to annotate this fault with
/*
- * Copyright (c) 2010, 2012-2013, 2016-2018 ARM Limited
+ * Copyright (c) 2010, 2012-2013, 2016-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
bool faultUpdated;
bool hypRouted; // True if the fault has been routed to Hypervisor
+ bool span; // True if the fault is setting the PSTATE.PAN bit
virtual Addr getVector(ThreadContext *tc);
Addr getVector64(ThreadContext *tc);
ArmFault(ExtMachInst _machInst = 0, uint32_t _iss = 0) :
machInst(_machInst), issRaw(_iss), from64(false), to64(false),
fromEL(EL0), toEL(EL0), fromMode(MODE_UNDEFINED),
- faultUpdated(false), hypRouted(false) {}
+ faultUpdated(false), hypRouted(false), span(false) {}
// Returns the actual syndrome register to use based on the target
// exception level
{
if (dest == MISCREG_SPSEL) {
return imm & 0x1;
+ } else if (dest == MISCREG_PAN) {
+ return (imm & 0x1) << 22;
} else {
panic("Not a valid PSTATE field register\n");
}
/*
- * Copyright (c) 2010-2014, 2016-2018 ARM Limited
+ * Copyright (c) 2010-2014, 2016-2019 ARM Limited
* Copyright (c) 2013 Advanced Micro Devices, Inc.
* All rights reserved
*
new_cpsr.nz = spsr.nz;
new_cpsr.c = spsr.c;
new_cpsr.v = spsr.v;
+ new_cpsr.pan = spsr.pan;
if (new_cpsr.width) {
// aarch32
const ITSTATE it = getRestoredITBits(tc, spsr);
/*
- * Copyright (c) 2010-2018 ARM Limited
+ * Copyright (c) 2010-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
haveLargeAsid64 = system->haveLargeAsid64();
physAddrRange = system->physAddrRange();
haveSVE = system->haveSVE();
+ havePAN = system->havePAN();
sveVL = system->sveVL();
} else {
highestELIs64 = true; // ArmSystem::highestELIs64 does the same
haveLargeAsid64 = false;
physAddrRange = 32; // dummy value
haveSVE = true;
+ havePAN = false;
sveVL = p->sve_vl_se;
}
miscRegs[MISCREG_ID_AA64ISAR0_EL1] = insertBits(
miscRegs[MISCREG_ID_AA64ISAR0_EL1], 19, 4,
haveCrypto ? 0x1112 : 0x0);
+ // PAN
+ miscRegs[MISCREG_ID_AA64MMFR1_EL1] = insertBits(
+ miscRegs[MISCREG_ID_AA64MMFR1_EL1], 23, 20,
+ havePAN ? 0x1 : 0x0);
}
void
{
return miscRegs[MISCREG_CPSR] & 0xc;
}
+ case MISCREG_PAN:
+ {
+ return miscRegs[MISCREG_CPSR] & 0x400000;
+ }
case MISCREG_L2CTLR:
{
// mostly unimplemented, just set NumCPUs field from sim and return
misc_reg = MISCREG_CPSR;
}
break;
+ case MISCREG_PAN:
+ {
+ // PAN is affecting data accesses
+ getDTBPtr(tc)->invalidateMiscReg();
+
+ CPSR cpsr = miscRegs[MISCREG_CPSR];
+ cpsr.pan = (uint8_t) ((CPSR) newVal).pan;
+ newVal = cpsr;
+ misc_reg = MISCREG_CPSR;
+ }
+ break;
case MISCREG_AT_S1E1R_Xt:
case MISCREG_AT_S1E1W_Xt:
case MISCREG_AT_S1E0R_Xt:
case MISCREG_SPSR_EL3:
case MISCREG_SPSR_EL2:
case MISCREG_SPSR_EL1:
- // Force bits 23:21 to 0
- newVal = val & ~(0x7 << 21);
- break;
+ {
+ RegVal spsr_mask = havePAN ?
+ ~(0x5 << 21) : ~(0x7 << 21);
+
+ newVal = val & spsr_mask;
+ break;
+ }
case MISCREG_L2CTLR:
warn("miscreg L2CTLR (%s) written with %#x. ignored...\n",
miscRegName[misc_reg], uint32_t(val));
/*
- * Copyright (c) 2010, 2012-2018 ARM Limited
+ * Copyright (c) 2010, 2012-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
bool haveGICv3CPUInterface;
uint8_t physAddrRange;
bool haveSVE;
+ bool havePAN;
/** SVE vector length in quadwords */
unsigned sveVL;
SERIALIZE_SCALAR(physAddrRange);
SERIALIZE_SCALAR(haveSVE);
SERIALIZE_SCALAR(sveVL);
+ SERIALIZE_SCALAR(havePAN);
}
void unserialize(CheckpointIn &cp)
{
UNSERIALIZE_SCALAR(physAddrRange);
UNSERIALIZE_SCALAR(haveSVE);
UNSERIALIZE_SCALAR(sveVL);
+ UNSERIALIZE_SCALAR(havePAN);
}
void startup(ThreadContext *tc);
// MSR immediate: moving immediate value to selected
// bits of the PSTATE
switch (op1 << 3 | op2) {
+ case 0x4:
+ // PAN
+ return new MsrImm64(
+ machInst, MISCREG_PAN, crm);
case 0x5:
// SP
return new MsrImm64(
def buildMsrImmInst(mnem, inst_name, code):
global header_output, decoder_output, exec_output
msrImmPermission = '''
- if (!canWriteAArch64SysReg(
- (MiscRegIndex) xc->tcBase()->flattenRegId(
- RegId(MiscRegClass, dest)).index(),
- Scr64, Cpsr, xc->tcBase())) {
- return std::make_shared<UndefinedInstruction>(
- machInst, 0, EC_TRAPPED_MSR_MRS_64,
- mnemonic);
+ auto misc_index = (MiscRegIndex) xc->tcBase()->flattenRegId(
+ RegId(MiscRegClass, dest)).index();
+
+ if (!miscRegInfo[misc_index][MISCREG_IMPLEMENTED]) {
+ return std::make_shared<UndefinedInstruction>(
+ machInst, false,
+ mnemonic);
+ }
+
+ if (!canWriteAArch64SysReg(misc_index,
+ Scr64, Cpsr, xc->tcBase())) {
+
+ return std::make_shared<UndefinedInstruction>(
+ machInst, 0, EC_TRAPPED_MSR_MRS_64,
+ mnemonic);
}
'''
/*
- * Copyright (c) 2010-2013, 2015-2018 ARM Limited
+ * Copyright (c) 2010-2013, 2015-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
return MISCREG_SPSEL;
case 2:
return MISCREG_CURRENTEL;
+ case 3:
+ return MISCREG_PAN;
}
break;
case 6:
.allPrivileges().exceptUserMode();
InitReg(MISCREG_CURRENTEL)
.allPrivileges().exceptUserMode().writes(0);
+ InitReg(MISCREG_PAN)
+ .allPrivileges().exceptUserMode()
+ .implemented(havePAN);
InitReg(MISCREG_NZCV)
.allPrivileges();
InitReg(MISCREG_DAIF)
/*
- * Copyright (c) 2010-2018 ARM Limited
+ * Copyright (c) 2010-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
MISCREG_VSESR_EL2,
MISCREG_VDISR_EL2,
+ // PSTATE
+ MISCREG_PAN,
+
// Total number of Misc Registers: Physical + Dummy
NUM_MISCREGS
};
"disr_el1",
"vsesr_el2",
"vdisr_el2",
+
+ // PSTATE
+ "pan",
};
static_assert(sizeof(miscRegName) / sizeof(*miscRegName) == NUM_MISCREGS,
/*
- * Copyright (c) 2010-2018 ARM Limited
+ * Copyright (c) 2010-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
Bitfield<27> q;
Bitfield<26, 25> it1;
Bitfield<24> j;
- Bitfield<23, 22> res0_23_22;
+ Bitfield<22> pan;
Bitfield<21> ss; // AArch64
Bitfield<20> il; // AArch64
Bitfield<19, 16> ge;
Bitfield<25> ee; // Exception Endianness
Bitfield<24> e0e; // Endianness of explicit data accesses at EL0
// (AArch64 SCTLR_EL1 only)
+ Bitfield<23> span; // Set Priviledge Access Never on taking
+ // an exception
Bitfield<23> xp; // Extended page table enable (dropped in ARMv7)
Bitfield<22> u; // Alignment (dropped in ARMv7)
Bitfield<21> fi; // Fast interrupts configuration enable
/*
- * Copyright (c) 2010, 2012-2013, 2015,2017-2018 ARM Limited
+ * Copyright (c) 2010, 2012-2013, 2015,2017-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
_haveLargeAsid64(p->have_large_asid_64),
_haveSVE(p->have_sve),
_sveVL(p->sve_vl),
+ _havePAN(p->have_pan),
_m5opRange(p->m5ops_base ?
RangeSize(p->m5ops_base, 0x10000) :
AddrRange(1, 0)), // Create an empty range if disabled
/*
- * Copyright (c) 2010, 2012-2013, 2015-2018 ARM Limited
+ * Copyright (c) 2010, 2012-2013, 2015-2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
/** SVE vector length at reset, in quadwords */
const unsigned _sveVL;
+ /** True if Priviledge Access Never is implemented */
+ const unsigned _havePAN;
+
/**
* Range for memory-mapped m5 pseudo ops. The range will be
* invalid/empty if disabled.
/** Returns the SVE vector length at reset, in quadwords */
unsigned sveVL() const { return _sveVL; }
+ /** Returns true if Priviledge Access Never is implemented */
+ bool havePAN() const { return _havePAN; }
+
/** Returns the supported physical address range in bits if the highest
* implemented exception level is 64 bits (ARMv8) */
uint8_t physAddrRange64() const { return _physAddrRange64; }
break;
case EL1:
{
+ if (checkPAN(tc, ap, req, mode)) {
+ grant = false;
+ break;
+ }
+
uint8_t perm = (ap << 2) | (xn << 1) | pxn;
switch (perm) {
case 0:
}
break;
case EL2:
+ if (checkPAN(tc, ap, req, mode)) {
+ grant = false;
+ break;
+ }
+ M5_FALLTHROUGH;
case EL3:
{
uint8_t perm = (ap & 0x2) | xn;
return NoFault;
}
+bool
+TLB::checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode)
+{
+ // The PAN bit has no effect on:
+ // 1) Instruction accesses.
+ // 2) Data Cache instructions other than DC ZVA
+ // 3) Address translation instructions, other than ATS1E1RP and
+ // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
+ // gem5)
+ // 4) Unprivileged instructions (Unimplemented in gem5)
+ AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
+ if (mmfr1.pan && cpsr.pan && (ap & 0x1) && mode != Execute &&
+ (!req->isCacheMaintenance() ||
+ (req->getFlags() & Request::CACHE_BLOCK_ZERO))) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
Fault
TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
Translation *translation, bool &delay, bool timing,
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode);
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
ThreadContext *tc);
+ bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req,
+ Mode mode);
/** Reset the entire TLB