{
PCState pc = pcState();
if (!(pc.pc() & 0x3))
- return new UnimplementedOpcodeFault;
+ return std::make_shared<UnimplementedOpcodeFault>();
pc.npc(readMiscRegNoEffect(IPR_EXC_ADDR));
pcState(pc);
virtual bool skipFaultingInstruction() {return false;}
virtual bool setRestartAddress() {return true;}
public:
+ virtual ~AlphaFault() {}
void invoke(ThreadContext * tc, const StaticInstPtr &inst =
StaticInst::nullStaticInstPtr);
virtual FaultVect vect() = 0;
#ifndef __ARCH_ALPHA_INTERRUPT_HH__
#define __ARCH_ALPHA_INTERRUPT_HH__
+#include <memory>
+
#include "arch/alpha/faults.hh"
#include "arch/alpha/isa_traits.hh"
#include "base/compiler.hh"
DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
tc->readMiscRegNoEffect(IPR_IPLR), ipl, summary);
- return new InterruptFault;
+ return std::make_shared<InterruptFault>();
} else {
return NoFault;
}
// signed overflow occurs when operands have same sign
// and sign of result does not match.
if (Ra_sl<31:> == Rb_or_imm_sl<31:> && tmp<31:> != Ra_sl<31:>)
- fault = new IntegerOverflowFault;
+ fault = std::make_shared<IntegerOverflowFault>();
Rc_sl = tmp;
}});
0x02: s4addl({{ Rc_sl = (Ra_sl << 2) + Rb_or_imm_sl; }});
// signed overflow occurs when operands have same sign
// and sign of result does not match.
if (Ra<63:> == Rb_or_imm<63:> && tmp<63:> != Ra<63:>)
- fault = new IntegerOverflowFault;
+ fault = std::make_shared<IntegerOverflowFault>();
Rc = tmp;
}});
0x22: s4addq({{ Rc = (Ra << 2) + Rb_or_imm; }});
// sign bit of the subtrahend (Rb), i.e., if the initial
// signs are the *same* then no overflow can occur
if (Ra_sl<31:> != Rb_or_imm_sl<31:> && tmp<31:> != Ra_sl<31:>)
- fault = new IntegerOverflowFault;
+ fault = std::make_shared<IntegerOverflowFault>();
Rc_sl = tmp;
}});
0x0b: s4subl({{ Rc_sl = (Ra_sl << 2) - Rb_or_imm_sl; }});
// sign bit of the subtrahend (Rb), i.e., if the initial
// signs are the *same* then no overflow can occur
if (Ra<63:> != Rb_or_imm<63:> && tmp<63:> != Ra<63:>)
- fault = new IntegerOverflowFault;
+ fault = std::make_shared<IntegerOverflowFault>();
Rc = tmp;
}});
0x2b: s4subq({{ Rc = (Ra << 2) - Rb_or_imm; }});
// checking the upper 33 bits for all 0s or all 1s.
uint64_t sign_bits = tmp<63:31>;
if (sign_bits != 0 && sign_bits != mask(33))
- fault = new IntegerOverflowFault;
+ fault = std::make_shared<IntegerOverflowFault>();
Rc_sl = tmp<31:0>;
}}, IntMultOp);
0x60: mulqv({{
// the lower 64
if (!((hi == 0 && lo<63:> == 0) ||
(hi == mask(64) && lo<63:> == 1)))
- fault = new IntegerOverflowFault;
+ fault = std::make_shared<IntegerOverflowFault>();
Rc = lo;
}}, IntMultOp);
}
#if SS_COMPATIBLE_FP
0x0b: sqrts({{
if (Fb < 0.0)
- fault = new ArithmeticFault;
+ fault = std::make_shared<ArithmeticFault>();
Fc = sqrt(Fb);
}}, FloatSqrtOp);
#else
0x0b: sqrts({{
if (Fb_sf < 0.0)
- fault = new ArithmeticFault;
+ fault = std::make_shared<ArithmeticFault>();
Fc_sf = sqrt(Fb_sf);
}}, FloatSqrtOp);
#endif
0x2b: sqrtt({{
if (Fb < 0.0)
- fault = new ArithmeticFault;
+ fault = std::make_shared<ArithmeticFault>();
Fc = sqrt(Fb);
}}, FloatSqrtOp);
}
// checking the upper 33 bits for all 0s or all 1s.
uint64_t sign_bits = Fb_uq<63:31>;
if (sign_bits != 0 && sign_bits != mask(33))
- fault = new IntegerOverflowFault;
+ fault = std::make_shared<IntegerOverflowFault>();
Fc_uq = (Fb_uq<31:30> << 62) | (Fb_uq<29:0> << 29);
}});
&& xc->readMiscReg(IPR_ICM) != mode_kernel)) {
// invalid pal function code, or attempt to do privileged
// PAL call in non-kernel mode
- fault = new UnimplementedOpcodeFault;
+ fault = std::make_shared<UnimplementedOpcodeFault>();
} else {
// check to see if simulator wants to do something special
// on this PAL call (including maybe suppress it)
IprToMiscRegIndex[ipr_index] : -1;
if(miscRegIndex < 0 || !IprIsReadable(miscRegIndex) ||
miscRegIndex >= NumInternalProcRegs)
- fault = new UnimplementedOpcodeFault;
+ fault = std::make_shared<UnimplementedOpcodeFault>();
else
Ra = xc->readMiscReg(miscRegIndex);
}}, IsIprAccess);
IprToMiscRegIndex[ipr_index] : -1;
if(miscRegIndex < 0 || !IprIsWritable(miscRegIndex) ||
miscRegIndex >= NumInternalProcRegs)
- fault = new UnimplementedOpcodeFault;
+ fault = std::make_shared<UnimplementedOpcodeFault>();
else
xc->setMiscReg(miscRegIndex, Ra);
if (traceData) { traceData->setData(Ra); }
{
Fault fault = NoFault; // dummy... this ipr access should not fault
if (FullSystem && !ICSR_FPE(xc->readMiscReg(IPR_ICSR))) {
- fault = new FloatEnableFault;
+ fault = std::make_shared<FloatEnableFault>();
}
return fault;
}
OpcdecFault::execute(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
{
- return new UnimplementedOpcodeFault;
+ return std::make_shared<UnimplementedOpcodeFault>();
}
}};
{
panic("attempt to execute unimplemented instruction '%s' "
"(inst 0x%08x, opcode 0x%x)", mnemonic, machInst, OPCODE);
- return new UnimplementedOpcodeFault;
+ return std::make_shared<UnimplementedOpcodeFault>();
}
Fault
{
panic("attempt to execute unknown instruction "
"(inst 0x%08x, opcode 0x%x)", machInst, OPCODE);
- return new UnimplementedOpcodeFault;
+ return std::make_shared<UnimplementedOpcodeFault>();
}
}};
* Andrew Schultz
*/
+#include <memory>
#include <string>
#include <vector>
if (req->getPaddr() & PAddrUncachedBit43) {
// IPR memory space not implemented
if (PAddrIprSpace(req->getPaddr())) {
- return new UnimpFault("IPR memory space not implemented!");
+ return std::make_shared<UnimpFault>(
+ "IPR memory space not implemented!");
} else {
// mark request as uncacheable
req->setFlags(Request::UNCACHEABLE);
// we don't have a ROM and we don't want to try to fetch from a device
// register as we destroy any data that is clear-on-read.
if (req->isUncacheable() && itb)
- return new UnimpFault("CPU trying to fetch from uncached I/O");
+ return std::make_shared<UnimpFault>(
+ "CPU trying to fetch from uncached I/O");
}
return NoFault;
// verify that this is a good virtual address
if (!validVirtualAddress(req->getVaddr())) {
fetch_acv++;
- return new ItbAcvFault(req->getVaddr());
+ return std::make_shared<ItbAcvFault>(req->getVaddr());
}
if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
mode_kernel) {
fetch_acv++;
- return new ItbAcvFault(req->getVaddr());
+ return std::make_shared<ItbAcvFault>(req->getVaddr());
}
req->setPaddr(req->getVaddr() & PAddrImplMask);
if (!entry) {
fetch_misses++;
- return new ItbPageFault(req->getVaddr());
+ return std::make_shared<ItbPageFault>(req->getVaddr());
}
req->setPaddr((entry->ppn << PageShift) +
(1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
// instruction access fault
fetch_acv++;
- return new ItbAcvFault(req->getVaddr());
+ return std::make_shared<ItbAcvFault>(req->getVaddr());
}
fetch_hits++;
// check that the physical address is ok (catch bad physical addresses)
if (req->getPaddr() & ~PAddrImplMask) {
- return new MachineCheckFault();
+ return std::make_shared<MachineCheckFault>();
}
return checkCacheability(req, true);
DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
req->getSize());
uint64_t flags = write ? MM_STAT_WR_MASK : 0;
- return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
+ return std::make_shared<DtbAlignmentFault>(req->getVaddr(),
+ req->getFlags(),
+ flags);
}
if (PcPAL(req->getPC())) {
uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
MM_STAT_BAD_VA_MASK |
MM_STAT_ACV_MASK;
- return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
+ return std::make_shared<DtbPageFault>(req->getVaddr(),
+ req->getFlags(),
+ flags);
}
// Check for "superpage" mapping
uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
MM_STAT_ACV_MASK);
- return new DtbAcvFault(req->getVaddr(), req->getFlags(),
- flags);
+ return std::make_shared<DtbAcvFault>(req->getVaddr(),
+ req->getFlags(),
+ flags);
}
req->setPaddr(req->getVaddr() & PAddrImplMask);
uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
MM_STAT_DTB_MISS_MASK;
return (req->getFlags() & Request::VPTE) ?
- (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
- flags)) :
- (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
- flags));
+ (Fault)(std::make_shared<PDtbMissFault>(req->getVaddr(),
+ req->getFlags(),
+ flags)) :
+ (Fault)(std::make_shared<NDtbMissFault>(req->getVaddr(),
+ req->getFlags(),
+ flags));
}
req->setPaddr((entry->ppn << PageShift) +
uint64_t flags = MM_STAT_WR_MASK |
MM_STAT_ACV_MASK |
(entry->fonw ? MM_STAT_FONW_MASK : 0);
- return new DtbPageFault(req->getVaddr(), req->getFlags(),
- flags);
+ return std::make_shared<DtbPageFault>(req->getVaddr(),
+ req->getFlags(),
+ flags);
}
if (entry->fonw) {
write_acv++;
uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
- return new DtbPageFault(req->getVaddr(), req->getFlags(),
- flags);
+ return std::make_shared<DtbPageFault>(req->getVaddr(),
+ req->getFlags(),
+ flags);
}
} else {
if (!(entry->xre & MODE2MASK(mode))) {
read_acv++;
uint64_t flags = MM_STAT_ACV_MASK |
(entry->fonr ? MM_STAT_FONR_MASK : 0);
- return new DtbAcvFault(req->getVaddr(), req->getFlags(),
- flags);
+ return std::make_shared<DtbAcvFault>(req->getVaddr(),
+ req->getFlags(),
+ flags);
}
if (entry->fonr) {
read_acv++;
uint64_t flags = MM_STAT_FONR_MASK;
- return new DtbPageFault(req->getVaddr(), req->getFlags(),
- flags);
+ return std::make_shared<DtbPageFault>(req->getVaddr(),
+ req->getFlags(),
+ flags);
}
}
}
// check that the physical address is ok (catch bad physical addresses)
if (req->getPaddr() & ~PAddrImplMask) {
- return new MachineCheckFault();
+ return std::make_shared<MachineCheckFault>();
}
return checkCacheability(req);
#include "base/statistics.hh"
#include "mem/request.hh"
#include "params/AlphaTLB.hh"
-#include "sim/fault_fwd.hh"
#include "sim/tlb.hh"
class ThreadContext;
#ifndef __ARCH_ARM_INSTS_STATICINST_HH__
#define __ARCH_ARM_INSTS_STATICINST_HH__
+#include <memory>
+
#include "arch/arm/faults.hh"
#include "arch/arm/utility.hh"
#include "arch/arm/system.hh"
inline Fault
disabledFault() const
{
- return new UndefinedInstruction(machInst, false, mnemonic, true);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic, true);
}
public:
if (interrupts[INT_IRQ] && take_irq)
- return new Interrupt;
+ return std::make_shared<Interrupt>();
if ((interrupts[INT_VIRT_IRQ] || hcr.vi) && allowVIrq)
- return new VirtualInterrupt;
+ return std::make_shared<VirtualInterrupt>();
if (interrupts[INT_FIQ] && take_fiq)
- return new FastInterrupt;
+ return std::make_shared<FastInterrupt>();
if ((interrupts[INT_VIRT_FIQ] || hcr.vf) && allowVFiq)
- return new VirtualFastInterrupt;
+ return std::make_shared<VirtualFastInterrupt>();
if (interrupts[INT_ABT] && take_ea)
- return new SystemError;
+ return std::make_shared<SystemError>();
if (hcr.va && allowVAbort)
- return new VirtualDataAbort(0, TlbEntry::DomainType::NoAccess, false,
- ArmFault::AsynchronousExternalAbort);
+ return std::make_shared<VirtualDataAbort>(
+ 0, TlbEntry::DomainType::NoAccess, false,
+ ArmFault::AsynchronousExternalAbort);
if (interrupts[INT_RST])
- return new Reset;
+ return std::make_shared<Reset>();
if (interrupts[INT_SEV])
- return new ArmSev;
+ return std::make_shared<ArmSev>();
panic("intStatus and interrupts not in sync\n");
}
Breakpoint::execute(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
{
- return new PrefetchAbort(xc->pcState().pc(), ArmFault::DebugEvent);
+ return std::make_shared<PrefetchAbort>(xc->pcState().pc(),
+ ArmFault::DebugEvent);
}
}};
FailUnimplemented::execute(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
{
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
Fault
FlushPipeInst::execute(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
{
- return new FlushPipe();
+ Fault fault = std::make_shared<FlushPipe>();
+ return fault;
}
}};
if (ArmSystem::haveVirtualization(xc->tcBase()) && hstr.tjdbx &&
!inSecureState(scr, cpsr) && (cpsr.mode != MODE_HYP)) {
- fault = new HypervisorTrap(machInst, op1, EC_TRAPPED_BXJ);
+ fault = std::make_shared<HypervisorTrap>(machInst, op1, EC_TRAPPED_BXJ);
}
IWNPC = Op1;
'''
newPc = xc->tcBase()->readMiscReg(MISCREG_ELR_EL1);
break;
default:
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst,
+ false,
+ mnemonic);
break;
}
if (spsr.width && (newPc & mask(2))) {
flat_idx == MISCREG_DC_CVAC_Xt ||
flat_idx == MISCREG_DC_CIVAC_Xt
)
- return new UndefinedInstruction(machInst, 0, EC_TRAPPED_MSR_MRS_64,
+ return std::make_shared<UndefinedInstruction>(
+ machInst, 0, EC_TRAPPED_MSR_MRS_64,
mnemonic);
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
// Check for traps to supervisor (FP/SIMD regs)
if (el <= EL1 && msrMrs64TrapToSup(flat_idx, el, Cpacr64))
- return new SupervisorTrap(machInst, 0x1E00000, EC_TRAPPED_SIMD_FP);
+ return std::make_shared<SupervisorTrap>(machInst, 0x1E00000,
+ EC_TRAPPED_SIMD_FP);
bool is_vfp_neon = false;
// Check for traps to hypervisor
if ((ArmSystem::haveVirtualization(xc->tcBase()) && el <= EL2) &&
msrMrs64TrapToHyp(flat_idx, %s, CptrEl264, Hcr64, &is_vfp_neon)) {
- return new HypervisorTrap(machInst, is_vfp_neon ? 0x1E00000 : imm,
+ return std::make_shared<HypervisorTrap>(
+ machInst, is_vfp_neon ? 0x1E00000 : imm,
is_vfp_neon ? EC_TRAPPED_SIMD_FP : EC_TRAPPED_MSR_MRS_64);
}
// Check for traps to secure monitor
if ((ArmSystem::haveSecurity(xc->tcBase()) && el <= EL3) &&
msrMrs64TrapToMon(flat_idx, CptrEl364, el, &is_vfp_neon)) {
- return new SecureMonitorTrap(machInst,
+ return std::make_shared<SecureMonitorTrap>(
+ machInst,
is_vfp_neon ? 0x1E00000 : imm,
is_vfp_neon ? EC_TRAPPED_SIMD_FP : EC_TRAPPED_MSR_MRS_64);
}
if (!canWriteAArch64SysReg(
(MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
Scr64, Cpsr, xc->tcBase())) {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
MiscDest_ud = imm;
''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
if (!canWriteAArch64SysReg(
(MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
Scr64, Cpsr, xc->tcBase())) {
- return new UndefinedInstruction(machInst, 0, EC_TRAPPED_MSR_MRS_64,
+ return std::make_shared<UndefinedInstruction>(
+ machInst, 0, EC_TRAPPED_MSR_MRS_64,
mnemonic);
}
CPSR cpsr = Cpsr;
if (!canWriteAArch64SysReg(
(MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
Scr64, Cpsr, xc->tcBase())) {
- return new UndefinedInstruction(machInst, 0, EC_TRAPPED_MSR_MRS_64,
+ return std::make_shared<UndefinedInstruction>(
+ machInst, 0, EC_TRAPPED_MSR_MRS_64,
mnemonic);
}
CPSR cpsr = Cpsr;
break;
}
if (hypTrap) {
- return new HypervisorTrap(machInst, imm,
+ return std::make_shared<HypervisorTrap>(machInst, imm,
EC_TRAPPED_CP10_MRC_VMRS);
}
}
'MicroIntImmXOp', '''
if (isSP((IntRegIndex) urb) && bits(XURb, 3, 0) &&
SPAlignmentCheckEnabled(xc->tcBase())) {
- return new SPAlignmentFault();
+ return std::make_shared<SPAlignmentFault>();
}
XURa = XURb + imm;
''', ['IsMicroop'])
let {{
svcCode = '''
- fault = new SupervisorCall(machInst, imm);
+ fault = std::make_shared<SupervisorCall>(machInst, imm);
'''
svcIop = InstObjParams("svc", "Svc", "ImmOp",
if ((cpsr.mode != MODE_USER) && FullSystem) {
if (ArmSystem::haveVirtualization(xc->tcBase()) &&
!inSecureState(scr, cpsr) && (cpsr.mode != MODE_HYP) && hcr.tsc) {
- fault = new HypervisorTrap(machInst, 0, EC_SMC_TO_HYP);
+ fault = std::make_shared<HypervisorTrap>(machInst, 0,
+ EC_SMC_TO_HYP);
} else {
if (scr.scd) {
fault = disabledFault();
} else {
- fault = new SecureMonitorCall(machInst);
+ fault = std::make_shared<SecureMonitorCall>(machInst);
}
}
} else {
(ArmSystem::haveSecurity(xc->tcBase()) && (!scr.ns || !scr.hce))) {
fault = disabledFault();
} else {
- fault = new HypervisorCall(machInst, imm);
+ fault = std::make_shared<HypervisorCall>(machInst, imm);
}
'''
Dest = xc->readMiscReg(regIdx);
}
} else {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
'''
mrsBankedRegIop = InstObjParams("mrs", "MrsBankedReg", "MrsOp",
xc->setMiscReg(regIdx, Op1);
}
} else {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
'''
msrBankedRegIop = InstObjParams("msr", "MsrBankedReg", "MsrRegOp",
decoder_output += RegRegRegRegOpConstructor.subst(usada8Iop)
exec_output += PredOpExecute.subst(usada8Iop)
- bkptCode = 'return new PrefetchAbort(PC, ArmFault::DebugEvent);\n'
+ bkptCode = 'return std::make_shared<PrefetchAbort>(PC, ArmFault::DebugEvent);\n'
bkptIop = InstObjParams("bkpt", "BkptInst", "PredOp", bkptCode)
header_output += BasicDeclare.subst(bkptIop)
decoder_output += BasicConstructor.subst(bkptIop)
PseudoInst::quiesceSkip(tc);
} else if (cpsr.el == EL0 && !sctlr.ntwe) {
PseudoInst::quiesceSkip(tc);
- fault = new SupervisorTrap(machInst, 0x1E00001, EC_TRAPPED_WFI_WFE);
+ fault = std::make_shared<SupervisorTrap>(machInst, 0x1E00001,
+ EC_TRAPPED_WFI_WFE);
} else if (ArmSystem::haveVirtualization(tc) &&
!inSecureState(scr, cpsr) && (cpsr.mode != MODE_HYP) &&
hcr.twe) {
PseudoInst::quiesceSkip(tc);
- fault = new HypervisorTrap(machInst, 0x1E00001, EC_TRAPPED_WFI_WFE);
+ fault = std::make_shared<HypervisorTrap>(machInst, 0x1E00001,
+ EC_TRAPPED_WFI_WFE);
} else if (ArmSystem::haveSecurity(tc) && cpsr.el != EL3 && scr.twe) {
PseudoInst::quiesceSkip(tc);
- fault = new SecureMonitorTrap(machInst, 0x1E00001, EC_TRAPPED_WFI_WFE);
+ fault = std::make_shared<SecureMonitorTrap>(machInst, 0x1E00001,
+ EC_TRAPPED_WFI_WFE);
} else {
PseudoInst::quiesce(tc);
}
PseudoInst::quiesceSkip(tc);
} else if (cpsr.el == EL0 && !sctlr.ntwi) {
PseudoInst::quiesceSkip(tc);
- fault = new SupervisorTrap(machInst, 0x1E00000, EC_TRAPPED_WFI_WFE);
+ fault = std::make_shared<SupervisorTrap>(machInst, 0x1E00000,
+ EC_TRAPPED_WFI_WFE);
} else if (ArmSystem::haveVirtualization(tc) && hcr.twi &&
(cpsr.mode != MODE_HYP) && !inSecureState(scr, cpsr)) {
PseudoInst::quiesceSkip(tc);
- fault = new HypervisorTrap(machInst, 0x1E00000, EC_TRAPPED_WFI_WFE);
+ fault = std::make_shared<HypervisorTrap>(machInst, 0x1E00000,
+ EC_TRAPPED_WFI_WFE);
} else if (ArmSystem::haveSecurity(tc) && cpsr.el != EL3 && scr.twi) {
PseudoInst::quiesceSkip(tc);
- fault = new SecureMonitorTrap(machInst, 0x1E00000, EC_TRAPPED_WFI_WFE);
+ fault = std::make_shared<SecureMonitorTrap>(machInst, 0x1E00000,
+ EC_TRAPPED_WFI_WFE);
} else {
PseudoInst::quiesce(tc);
}
decoder_output += BasicConstructor.subst(itIop)
exec_output += PredOpExecute.subst(itIop)
unknownCode = '''
- return new UndefinedInstruction(machInst, true);
+ return std::make_shared<UndefinedInstruction>(machInst, true);
'''
unknownIop = InstObjParams("unknown", "Unknown", "UnknownOp", \
{ "code": unknownCode,
mrc14code = '''
MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(op1);
if (!canReadCoprocReg(miscReg, Scr, Cpsr, xc->tcBase())) {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
if (mcrMrc14TrapToHyp((const MiscRegIndex) op1, Hcr, Cpsr, Scr, Hdcr,
Hstr, Hcptr, imm)) {
- return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP14_MCR_MRC);
+ return std::make_shared<HypervisorTrap>(machInst, imm,
+ EC_TRAPPED_CP14_MCR_MRC);
}
Dest = MiscOp1;
'''
mcr14code = '''
MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest);
if (!canWriteCoprocReg(miscReg, Scr, Cpsr, xc->tcBase())) {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
if (mcrMrc14TrapToHyp(miscReg, Hcr, Cpsr, Scr, Hdcr,
Hstr, Hcptr, imm)) {
- return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP14_MCR_MRC);
+ return std::make_shared<HypervisorTrap>(machInst, imm,
+ EC_TRAPPED_CP14_MCR_MRC);
}
MiscDest = Op1;
'''
// the register is accessable, in other modes we trap if only if the register
// IS accessable.
if (!canRead && !(hypTrap && !inUserMode(Cpsr) && !inSecureState(Scr, Cpsr))) {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
if (hypTrap) {
- return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP15_MCR_MRC);
+ return std::make_shared<HypervisorTrap>(machInst, imm,
+ EC_TRAPPED_CP15_MCR_MRC);
}
Dest = MiscNsBankedOp1;
'''
// the register is accessable, in other modes we trap if only if the register
// IS accessable.
if (!canWrite & !(hypTrap & !inUserMode(Cpsr) & !inSecureState(Scr, Cpsr))) {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
if (hypTrap) {
- return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP15_MCR_MRC);
+ return std::make_shared<HypervisorTrap>(machInst, imm,
+ EC_TRAPPED_CP15_MCR_MRC);
}
MiscNsBankedDest = Op1;
'''
// the register is accessable, in other modes we trap if only if the register
// IS accessable.
if (!canRead && !(hypTrap && !inUserMode(Cpsr) && !inSecureState(Scr, Cpsr))) {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
if (hypTrap) {
- return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP15_MCRR_MRRC);
+ return std::make_shared<HypervisorTrap>(machInst, imm,
+ EC_TRAPPED_CP15_MCRR_MRRC);
}
Dest = bits(MiscNsBankedOp164, 63, 32);
Dest2 = bits(MiscNsBankedOp164, 31, 0);
// the register is accessable, in other modes we trap if only if the register
// IS accessable.
if (!canWrite & !(hypTrap & !inUserMode(Cpsr) & !inSecureState(Scr, Cpsr))) {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
if (hypTrap) {
- return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP15_MCRR_MRRC);
+ return std::make_shared<HypervisorTrap>(machInst, imm,
+ EC_TRAPPED_CP15_MCRR_MRRC);
}
MiscNsBankedDest64 = ((uint64_t) Op1 << 32) | Op2;
'''
// If the barrier is due to a CP15 access check for hyp traps
if ((imm != 0) && mcrMrc15TrapToHyp(MISCREG_CP15ISB, Hcr, Cpsr, Scr,
Hdcr, Hstr, Hcptr, imm)) {
- return new HypervisorTrap(machInst, imm,
+ return std::make_shared<HypervisorTrap>(machInst, imm,
EC_TRAPPED_CP15_MCR_MRC);
}
- fault = new FlushPipe;
+ fault = std::make_shared<FlushPipe>();
'''
isbIop = InstObjParams("isb", "Isb", "ImmOp",
{"code": isbCode,
// If the barrier is due to a CP15 access check for hyp traps
if ((imm != 0) && mcrMrc15TrapToHyp(MISCREG_CP15DSB, Hcr, Cpsr, Scr,
Hdcr, Hstr, Hcptr, imm)) {
- return new HypervisorTrap(machInst, imm,
+ return std::make_shared<HypervisorTrap>(machInst, imm,
EC_TRAPPED_CP15_MCR_MRC);
}
- fault = new FlushPipe;
+ fault = std::make_shared<FlushPipe>();
'''
dsbIop = InstObjParams("dsb", "Dsb", "ImmOp",
{"code": dsbCode,
// If the barrier is due to a CP15 access check for hyp traps
if ((imm != 0) && mcrMrc15TrapToHyp(MISCREG_CP15DMB, Hcr, Cpsr, Scr,
Hdcr, Hstr, Hcptr, imm)) {
- return new HypervisorTrap(machInst, imm,
+ return std::make_shared<HypervisorTrap>(machInst, imm,
EC_TRAPPED_CP15_MCR_MRC);
}
'''
let {{
svcCode = '''
- fault = new SupervisorCall(machInst, bits(machInst, 20, 5));
+ fault = std::make_shared<SupervisorCall>(machInst, bits(machInst, 20, 5));
'''
svcIop = InstObjParams("svc", "Svc64", "ArmStaticInst",
if (!ArmSystem::haveSecurity(xc->tcBase()) || inUserMode(cpsr) || scr.smd) {
fault = disabledFault();
} else {
- fault = new SecureMonitorCall(machInst);
+ fault = std::make_shared<SecureMonitorCall>(machInst);
}
'''
subst("RegRegRegImmOp64", extrIop);
unknownCode = '''
- return new UndefinedInstruction(machInst, true);
+ return std::make_shared<UndefinedInstruction>(machInst, true);
'''
unknown64Iop = InstObjParams("unknown", "Unknown64", "UnknownOp64",
unknownCode)
exec_output += BasicExecute.subst(unknown64Iop)
isbIop = InstObjParams("isb", "Isb64", "ArmStaticInst",
- "fault = new FlushPipe;", ['IsSerializeAfter'])
+ "fault = std::make_shared<FlushPipe>();",
+ ['IsSerializeAfter'])
header_output += BasicDeclare.subst(isbIop)
decoder_output += BasicConstructor64.subst(isbIop)
exec_output += BasicExecute.subst(isbIop)
dsbIop = InstObjParams("dsb", "Dsb64", "ArmStaticInst",
- "fault = new FlushPipe;",
+ "fault = std::make_shared<FlushPipe>();",
['IsMemBarrier', 'IsSerializeAfter'])
header_output += BasicDeclare.subst(dsbIop)
decoder_output += BasicConstructor64.subst(dsbIop)
readDestCode = 'destElem = gtoh(destReg.elements[i]);'
eWalkCode += '''
if (imm < 0 && imm >= eCount) {
- fault = new UndefinedInstruction(machInst, false, mnemonic);
+ fault = std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
} else {
for (unsigned i = 0; i < eCount; i++) {
Element srcElem1 = gtoh(srcReg1.elements[i]);
readDestCode = 'destElem = gtoh(destReg.elements[i]);'
eWalkCode += '''
if (imm < 0 && imm >= eCount) {
- fault = new UndefinedInstruction(machInst, false, mnemonic);
+ fault = std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
} else {
for (unsigned i = 0; i < eCount; i++) {
Element srcElem1 = gtoh(srcReg1.elements[i]);
readDestCode = 'destReg = destRegs[i];'
eWalkCode += '''
if (imm < 0 && imm >= eCount) {
- fault = new UndefinedInstruction(machInst, false, mnemonic);
+ fault = std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
} else {
for (unsigned i = 0; i < rCount; i++) {
FloatReg srcReg1 = srcRegs1[i];
} else {
index -= eCount;
if (index >= eCount) {
- fault = new UndefinedInstruction(machInst, false, mnemonic);
+ fault = std::make_shared<UndefinedInstruction>(machInst,
+ false,
+ mnemonic);
} else {
destReg.elements[i] = srcReg2.elements[index];
}
} else {
index -= eCount;
if (index >= eCount) {
- fault = new UndefinedInstruction(machInst, false, mnemonic);
+ fault = std::make_shared<UndefinedInstruction>(
+ machInst, false, mnemonic);
} else {
destReg.elements[i] = srcReg2.elements[index];
}
SPAlignmentCheckCodeNeon = '''
if (baseIsSP && bits(XURa, 3, 0) &&
SPAlignmentCheckEnabled(xc->tcBase())) {
- return new SPAlignmentFault();
+ return std::make_shared<SPAlignmentFault>();
}
'''
eaCode = SPAlignmentCheckCodeNeon + '''
swpPreAccCode = '''
if (!((SCTLR)Sctlr).sw) {
- return new UndefinedInstruction(machInst, false, mnemonic);
+ return std::make_shared<UndefinedInstruction>(machInst, false,
+ mnemonic);
}
'''
SPAlignmentCheckCode = '''
if (baseIsSP && bits(XBase, 3, 0) &&
SPAlignmentCheckEnabled(xc->tcBase())) {
- return new SPAlignmentFault();
+ return std::make_shared<SPAlignmentFault>();
}
'''
}};
if (trapEnCheck) {
CPSR cpsrEnCheck = Cpsr;
if (cpsrEnCheck.mode == MODE_HYP) {
- return new UndefinedInstruction(machInst, issEnCheck,
+ return std::make_shared<UndefinedInstruction>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR);
} else {
if (!inSecureState(Scr, Cpsr)) {
- return new HypervisorTrap(machInst, issEnCheck,
+ return std::make_shared<HypervisorTrap>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR);
}
}
if (trapEnCheck) {
CPSR cpsrEnCheck = Cpsr;
if (cpsrEnCheck.mode == MODE_HYP) {
- return new UndefinedInstruction(machInst, issEnCheck,
+ return std::make_shared<UndefinedInstruction>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR, mnemonic);
} else {
if (!inSecureState(Scr, Cpsr)) {
- return new HypervisorTrap(machInst, issEnCheck,
+ return std::make_shared<HypervisorTrap>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR);
}
}
CPSR cpsrEnCheck = Cpsr;
ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsrEnCheck.el;
if (!vfpNeon64Enabled(Cpacr64, el))
- return new SupervisorTrap(machInst, 0x1E00000,
+ return std::make_shared<SupervisorTrap>(machInst, 0x1E00000,
EC_TRAPPED_SIMD_FP);
if (ArmSystem::haveVirtualization(xc->tcBase()) && el <= EL2) {
HCPTR cptrEnCheck = xc->tcBase()->readMiscReg(MISCREG_CPTR_EL2);
if (cptrEnCheck.tfp)
- return new HypervisorTrap(machInst, 0x1E00000,
+ return std::make_shared<HypervisorTrap>(machInst, 0x1E00000,
EC_TRAPPED_SIMD_FP);
}
if (ArmSystem::haveSecurity(xc->tcBase())) {
HCPTR cptrEnCheck = xc->tcBase()->readMiscReg(MISCREG_CPTR_EL3);
if (cptrEnCheck.tfp)
- return new SecureMonitorTrap(machInst, 0x1E00000,
+ return std::make_shared<SecureMonitorTrap>(machInst, 0x1E00000,
EC_TRAPPED_SIMD_FP);
}
'''
if (trapEnCheck) {
CPSR cpsrEnCheck = Cpsr;
if (cpsrEnCheck.mode == MODE_HYP) {
- return new UndefinedInstruction(machInst, issEnCheck,
+ return std::make_shared<UndefinedInstruction>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR, mnemonic);
} else {
if (!inSecureState(Scr, Cpsr)) {
- return new HypervisorTrap(machInst, issEnCheck,
+ return std::make_shared<HypervisorTrap>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR);
}
}
if (trapEnCheck) {
CPSR cpsrEnCheck = Cpsr;
if (cpsrEnCheck.mode == MODE_HYP) {
- return new UndefinedInstruction(machInst, issEnCheck,
+ return std::make_shared<UndefinedInstruction>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR, mnemonic);
} else {
if (!inSecureState(Scr, Cpsr)) {
- return new HypervisorTrap(machInst, issEnCheck,
+ return std::make_shared<HypervisorTrap>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR);
}
}
if (trapEnCheck) {
CPSR cpsrEnCheck = Cpsr;
if (cpsrEnCheck.mode == MODE_HYP) {
- return new UndefinedInstruction(machInst, issEnCheck,
+ return std::make_shared<UndefinedInstruction>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR, mnemonic);
} else {
if (!inSecureState(Scr, Cpsr)) {
- return new HypervisorTrap(machInst, issEnCheck,
+ return std::make_shared<HypervisorTrap>(
+ machInst, issEnCheck,
EC_TRAPPED_HCPTR);
}
}
* Giacomo Gabrielli
*/
+#include <memory>
+
#include "arch/arm/faults.hh"
#include "arch/arm/stage2_mmu.hh"
#include "arch/arm/system.hh"
// this fault to re-execute the faulting instruction which should clean
// up everything.
if (currState->vaddr_tainted == _req->getVaddr()) {
- return new ReExec;
+ return std::make_shared<ReExec>();
}
}
if (currState->transState->squashed()) {
// finish the translation which will delete the translation object
- currState->transState->finish(new UnimpFault("Squashed Inst"),
- currState->req, currState->tc, currState->mode);
+ currState->transState->finish(
+ std::make_shared<UnimpFault>("Squashed Inst"),
+ currState->req, currState->tc, currState->mode);
} else {
// translate the request now that we know it will work
tlb->translateTiming(currState->req, currState->tc,
// Check if table walk is allowed when Security Extensions are enabled
if (haveSecurity && currState->ttbcr.pd0) {
if (currState->isFetch)
- return new PrefetchAbort(currState->vaddr_tainted,
- ArmFault::TranslationLL + L1,
- isStage2,
- ArmFault::VmsaTran);
+ return std::make_shared<PrefetchAbort>(
+ currState->vaddr_tainted,
+ ArmFault::TranslationLL + L1,
+ isStage2,
+ ArmFault::VmsaTran);
else
- return new DataAbort(currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess, currState->isWrite,
- ArmFault::TranslationLL + L1, isStage2,
- ArmFault::VmsaTran);
+ return std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ TlbEntry::DomainType::NoAccess, currState->isWrite,
+ ArmFault::TranslationLL + L1, isStage2,
+ ArmFault::VmsaTran);
}
ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
MISCREG_TTBR0, currState->tc, !currState->isSecure));
// Check if table walk is allowed when Security Extensions are enabled
if (haveSecurity && currState->ttbcr.pd1) {
if (currState->isFetch)
- return new PrefetchAbort(currState->vaddr_tainted,
- ArmFault::TranslationLL + L1,
- isStage2,
- ArmFault::VmsaTran);
+ return std::make_shared<PrefetchAbort>(
+ currState->vaddr_tainted,
+ ArmFault::TranslationLL + L1,
+ isStage2,
+ ArmFault::VmsaTran);
else
- return new DataAbort(currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess, currState->isWrite,
- ArmFault::TranslationLL + L1, isStage2,
- ArmFault::VmsaTran);
+ return std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ TlbEntry::DomainType::NoAccess, currState->isWrite,
+ ArmFault::TranslationLL + L1, isStage2,
+ ArmFault::VmsaTran);
}
ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
MISCREG_TTBR1, currState->tc, !currState->isSecure));
// Check if table walk is allowed
if (currState->ttbcr.epd0) {
if (currState->isFetch)
- return new PrefetchAbort(currState->vaddr_tainted,
- ArmFault::TranslationLL + L1,
- isStage2,
- ArmFault::LpaeTran);
+ return std::make_shared<PrefetchAbort>(
+ currState->vaddr_tainted,
+ ArmFault::TranslationLL + L1,
+ isStage2,
+ ArmFault::LpaeTran);
else
- return new DataAbort(currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess,
- currState->isWrite,
- ArmFault::TranslationLL + L1,
- isStage2,
- ArmFault::LpaeTran);
+ return std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ TlbEntry::DomainType::NoAccess,
+ currState->isWrite,
+ ArmFault::TranslationLL + L1,
+ isStage2,
+ ArmFault::LpaeTran);
}
ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
MISCREG_TTBR0, currState->tc, !currState->isSecure));
// Check if table walk is allowed
if (currState->ttbcr.epd1) {
if (currState->isFetch)
- return new PrefetchAbort(currState->vaddr_tainted,
- ArmFault::TranslationLL + L1,
- isStage2,
- ArmFault::LpaeTran);
+ return std::make_shared<PrefetchAbort>(
+ currState->vaddr_tainted,
+ ArmFault::TranslationLL + L1,
+ isStage2,
+ ArmFault::LpaeTran);
else
- return new DataAbort(currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess,
- currState->isWrite,
- ArmFault::TranslationLL + L1,
- isStage2,
- ArmFault::LpaeTran);
+ return std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ TlbEntry::DomainType::NoAccess,
+ currState->isWrite,
+ ArmFault::TranslationLL + L1,
+ isStage2,
+ ArmFault::LpaeTran);
}
ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
MISCREG_TTBR1, currState->tc, !currState->isSecure));
} else {
// Out of boundaries -> translation fault
if (currState->isFetch)
- return new PrefetchAbort(currState->vaddr_tainted,
- ArmFault::TranslationLL + L1,
- isStage2,
- ArmFault::LpaeTran);
+ return std::make_shared<PrefetchAbort>(
+ currState->vaddr_tainted,
+ ArmFault::TranslationLL + L1,
+ isStage2,
+ ArmFault::LpaeTran);
else
- return new DataAbort(currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess,
- currState->isWrite, ArmFault::TranslationLL + L1,
- isStage2, ArmFault::LpaeTran);
+ return std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ TlbEntry::DomainType::NoAccess,
+ currState->isWrite, ArmFault::TranslationLL + L1,
+ isStage2, ArmFault::LpaeTran);
}
}
if (fault) {
Fault f;
if (currState->isFetch)
- f = new PrefetchAbort(currState->vaddr_tainted,
- ArmFault::TranslationLL + L0, isStage2,
- ArmFault::LpaeTran);
+ f = std::make_shared<PrefetchAbort>(
+ currState->vaddr_tainted,
+ ArmFault::TranslationLL + L0, isStage2,
+ ArmFault::LpaeTran);
else
- f = new DataAbort(currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess,
- currState->isWrite,
- ArmFault::TranslationLL + L0,
- isStage2, ArmFault::LpaeTran);
+ f = std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ TlbEntry::DomainType::NoAccess,
+ currState->isWrite,
+ ArmFault::TranslationLL + L0,
+ isStage2, ArmFault::LpaeTran);
if (currState->timing) {
pending = false;
DPRINTF(TLB, "Address size fault before any lookup\n");
Fault f;
if (currState->isFetch)
- f = new PrefetchAbort(currState->vaddr_tainted,
- ArmFault::AddressSizeLL + start_lookup_level,
- isStage2,
- ArmFault::LpaeTran);
+ f = std::make_shared<PrefetchAbort>(
+ currState->vaddr_tainted,
+ ArmFault::AddressSizeLL + start_lookup_level,
+ isStage2,
+ ArmFault::LpaeTran);
else
- f = new DataAbort(currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess,
- currState->isWrite,
- ArmFault::AddressSizeLL + start_lookup_level,
- isStage2,
- ArmFault::LpaeTran);
+ f = std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ TlbEntry::DomainType::NoAccess,
+ currState->isWrite,
+ ArmFault::AddressSizeLL + start_lookup_level,
+ isStage2,
+ ArmFault::LpaeTran);
if (currState->timing) {
DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
if (currState->isFetch)
currState->fault =
- new PrefetchAbort(currState->vaddr_tainted,
- ArmFault::TranslationLL + L1,
- isStage2,
- ArmFault::VmsaTran);
+ std::make_shared<PrefetchAbort>(
+ currState->vaddr_tainted,
+ ArmFault::TranslationLL + L1,
+ isStage2,
+ ArmFault::VmsaTran);
else
currState->fault =
- new DataAbort(currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess,
- currState->isWrite,
- ArmFault::TranslationLL + L1, isStage2,
- ArmFault::VmsaTran);
+ std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ TlbEntry::DomainType::NoAccess,
+ currState->isWrite,
+ ArmFault::TranslationLL + L1, isStage2,
+ ArmFault::VmsaTran);
return;
case L1Descriptor::Section:
if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
* AccessFlag0
*/
- currState->fault = new DataAbort(currState->vaddr_tainted,
- currState->l1Desc.domain(),
- currState->isWrite,
- ArmFault::AccessFlagLL + L1,
- isStage2,
- ArmFault::VmsaTran);
+ currState->fault = std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ currState->l1Desc.domain(),
+ currState->isWrite,
+ ArmFault::AccessFlagLL + L1,
+ isStage2,
+ ArmFault::VmsaTran);
}
if (currState->l1Desc.supersection()) {
panic("Haven't implemented supersections\n");
currState->longDesc.lookupLevel,
ArmFault::TranslationLL + currState->longDesc.lookupLevel);
if (currState->isFetch)
- currState->fault = new PrefetchAbort(
+ currState->fault = std::make_shared<PrefetchAbort>(
currState->vaddr_tainted,
ArmFault::TranslationLL + currState->longDesc.lookupLevel,
isStage2,
ArmFault::LpaeTran);
else
- currState->fault = new DataAbort(
+ currState->fault = std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
currState->isWrite,
}
if (fault) {
if (currState->isFetch)
- currState->fault = new PrefetchAbort(
+ currState->fault = std::make_shared<PrefetchAbort>(
currState->vaddr_tainted,
(aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
currState->longDesc.lookupLevel,
isStage2,
ArmFault::LpaeTran);
else
- currState->fault = new DataAbort(
+ currState->fault = std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess, currState->isWrite,
(aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
currState->longDesc.lookupLevel);
if (currState->isFetch)
- currState->fault = new PrefetchAbort(
+ currState->fault = std::make_shared<PrefetchAbort>(
currState->vaddr_tainted,
ArmFault::AddressSizeLL
+ currState->longDesc.lookupLevel,
isStage2,
ArmFault::LpaeTran);
else
- currState->fault = new DataAbort(
+ currState->fault = std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess, currState->isWrite,
ArmFault::AddressSizeLL
currState->req = NULL;
}
if (currState->isFetch)
- currState->fault =
- new PrefetchAbort(currState->vaddr_tainted,
- ArmFault::TranslationLL + L2,
- isStage2,
- ArmFault::VmsaTran);
+ currState->fault = std::make_shared<PrefetchAbort>(
+ currState->vaddr_tainted,
+ ArmFault::TranslationLL + L2,
+ isStage2,
+ ArmFault::VmsaTran);
else
- currState->fault =
- new DataAbort(currState->vaddr_tainted, currState->l1Desc.domain(),
- currState->isWrite, ArmFault::TranslationLL + L2,
- isStage2,
- ArmFault::VmsaTran);
+ currState->fault = std::make_shared<DataAbort>(
+ currState->vaddr_tainted, currState->l1Desc.domain(),
+ currState->isWrite, ArmFault::TranslationLL + L2,
+ isStage2,
+ ArmFault::VmsaTran);
return;
}
DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
currState->sctlr.afe, currState->l2Desc.ap());
- currState->fault =
- new DataAbort(currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess, currState->isWrite,
- ArmFault::AccessFlagLL + L2, isStage2,
- ArmFault::VmsaTran);
+ currState->fault = std::make_shared<DataAbort>(
+ currState->vaddr_tainted,
+ TlbEntry::DomainType::NoAccess, currState->isWrite,
+ ArmFault::AccessFlagLL + L2, isStage2,
+ ArmFault::VmsaTran);
}
insertTableEntry(currState->l2Desc, false);
#include "mem/request.hh"
#include "params/ArmTableWalker.hh"
#include "sim/eventq.hh"
-#include "sim/fault_fwd.hh"
class ThreadContext;
* Steve Reinhardt
*/
+#include <memory>
#include <string>
#include <vector>
if (sctlr.a || !(flags & AllowUnaligned)) {
if (vaddr & mask(flags & AlignmentMask)) {
// LPAE is always disabled in SE mode
- return new DataAbort(vaddr_tainted,
- TlbEntry::DomainType::NoAccess, is_write,
- ArmFault::AlignmentFault, isStage2,
- ArmFault::VmsaTran);
+ return std::make_shared<DataAbort>(
+ vaddr_tainted,
+ TlbEntry::DomainType::NoAccess, is_write,
+ ArmFault::AlignmentFault, isStage2,
+ ArmFault::VmsaTran);
}
}
}
Process *p = tc->getProcessPtr();
if (!p->pTable->translate(vaddr, paddr))
- return Fault(new GenericPageTableFault(vaddr_tainted));
+ return std::make_shared<GenericPageTableFault>(vaddr_tainted);
req->setPaddr(paddr);
return NoFault;
// as a device or strongly ordered.
if (isStage2 && req->isPTWalk() && hcr.ptw &&
(te->mtype != TlbEntry::MemoryType::Normal)) {
- return new DataAbort(vaddr, te->domain, is_write,
- ArmFault::PermissionLL + te->lookupLevel,
- isStage2, tranMethod);
+ return std::make_shared<DataAbort>(
+ vaddr, te->domain, is_write,
+ ArmFault::PermissionLL + te->lookupLevel,
+ isStage2, tranMethod);
}
// Generate an alignment fault for unaligned data accesses to device or
if (te->mtype != TlbEntry::MemoryType::Normal) {
if (vaddr & mask(flags & AlignmentMask)) {
alignFaults++;
- return new DataAbort(vaddr, TlbEntry::DomainType::NoAccess, is_write,
- ArmFault::AlignmentFault, isStage2,
- tranMethod);
+ return std::make_shared<DataAbort>(
+ vaddr, TlbEntry::DomainType::NoAccess, is_write,
+ ArmFault::AlignmentFault, isStage2,
+ tranMethod);
}
}
}
if (req->isPrefetch()) {
// Here we can safely use the fault status for the short
// desc. format in all cases
- return new PrefetchAbort(vaddr, ArmFault::PrefetchUncacheable,
- isStage2, tranMethod);
+ return std::make_shared<PrefetchAbort>(
+ vaddr, ArmFault::PrefetchUncacheable,
+ isStage2, tranMethod);
}
}
" domain: %#x write:%d\n", dacr,
static_cast<uint8_t>(te->domain), is_write);
if (is_fetch)
- return new PrefetchAbort(vaddr,
- ArmFault::DomainLL + te->lookupLevel,
- isStage2, tranMethod);
+ return std::make_shared<PrefetchAbort>(
+ vaddr,
+ ArmFault::DomainLL + te->lookupLevel,
+ isStage2, tranMethod);
else
- return new DataAbort(vaddr, te->domain, is_write,
- ArmFault::DomainLL + te->lookupLevel,
- isStage2, tranMethod);
+ return std::make_shared<DataAbort>(
+ vaddr, te->domain, is_write,
+ ArmFault::DomainLL + te->lookupLevel,
+ isStage2, tranMethod);
case 1:
// Continue with permissions check
break;
DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
"priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
- return new PrefetchAbort(vaddr,
- ArmFault::PermissionLL + te->lookupLevel,
- isStage2, tranMethod);
+ return std::make_shared<PrefetchAbort>(
+ vaddr,
+ ArmFault::PermissionLL + te->lookupLevel,
+ isStage2, tranMethod);
} else if (abt | hapAbt) {
permsFaults++;
DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
" write:%d\n", ap, is_priv, is_write);
- return new DataAbort(vaddr, te->domain, is_write,
- ArmFault::PermissionLL + te->lookupLevel,
- isStage2 | !abt, tranMethod);
+ return std::make_shared<DataAbort>(
+ vaddr, te->domain, is_write,
+ ArmFault::PermissionLL + te->lookupLevel,
+ isStage2 | !abt, tranMethod);
}
return NoFault;
}
// as a device or strongly ordered.
if (isStage2 && req->isPTWalk() && hcr.ptw &&
(te->mtype != TlbEntry::MemoryType::Normal)) {
- return new DataAbort(vaddr_tainted, te->domain, is_write,
- ArmFault::PermissionLL + te->lookupLevel,
- isStage2, ArmFault::LpaeTran);
+ return std::make_shared<DataAbort>(
+ vaddr_tainted, te->domain, is_write,
+ ArmFault::PermissionLL + te->lookupLevel,
+ isStage2, ArmFault::LpaeTran);
}
// Generate an alignment fault for unaligned accesses to device or
if (te->mtype != TlbEntry::MemoryType::Normal) {
if (vaddr & mask(flags & AlignmentMask)) {
alignFaults++;
- return new DataAbort(vaddr_tainted,
- TlbEntry::DomainType::NoAccess, is_write,
- ArmFault::AlignmentFault, isStage2,
- ArmFault::LpaeTran);
+ return std::make_shared<DataAbort>(
+ vaddr_tainted,
+ TlbEntry::DomainType::NoAccess, is_write,
+ ArmFault::AlignmentFault, isStage2,
+ ArmFault::LpaeTran);
}
}
}
if (req->isPrefetch()) {
// Here we can safely use the fault status for the short
// desc. format in all cases
- return new PrefetchAbort(vaddr_tainted,
- ArmFault::PrefetchUncacheable,
- isStage2, ArmFault::LpaeTran);
+ return std::make_shared<PrefetchAbort>(
+ vaddr_tainted,
+ ArmFault::PrefetchUncacheable,
+ isStage2, ArmFault::LpaeTran);
}
}
ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
// Use PC value instead of vaddr because vaddr might be aligned to
// cache line and should not be the address reported in FAR
- return new PrefetchAbort(req->getPC(),
- ArmFault::PermissionLL + te->lookupLevel,
- isStage2, ArmFault::LpaeTran);
+ return std::make_shared<PrefetchAbort>(
+ req->getPC(),
+ ArmFault::PermissionLL + te->lookupLevel,
+ isStage2, ArmFault::LpaeTran);
} else {
permsFaults++;
DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
"priv:%d write:%d\n", ap, is_priv, is_write);
- return new DataAbort(vaddr_tainted, te->domain, is_write,
- ArmFault::PermissionLL + te->lookupLevel,
- isStage2, ArmFault::LpaeTran);
+ return std::make_shared<DataAbort>(
+ vaddr_tainted, te->domain, is_write,
+ ArmFault::PermissionLL + te->lookupLevel,
+ isStage2, ArmFault::LpaeTran);
}
}
// Generate an alignment fault for unaligned PC
if (aarch64 && is_fetch && (req->getPC() & mask(2))) {
- return new PCAlignmentFault(req->getPC());
+ return std::make_shared<PCAlignmentFault>(req->getPC());
}
// If this is a clrex instruction, provide a PA of 0 with no fault
if (sctlr.a || !(flags & AllowUnaligned)) {
if (vaddr & mask(flags & AlignmentMask)) {
alignFaults++;
- return new DataAbort(vaddr_tainted,
- TlbEntry::DomainType::NoAccess, is_write,
- ArmFault::AlignmentFault, isStage2,
- tranMethod);
+ return std::make_shared<DataAbort>(
+ vaddr_tainted,
+ TlbEntry::DomainType::NoAccess, is_write,
+ ArmFault::AlignmentFault, isStage2,
+ tranMethod);
}
}
}
// Unaligned accesses to Device memory should always cause an
// abort regardless of sctlr.a
alignFaults++;
- return new DataAbort(vaddr_tainted,
- TlbEntry::DomainType::NoAccess, is_write,
- ArmFault::AlignmentFault, isStage2,
- tranMethod);
+ return std::make_shared<DataAbort>(
+ vaddr_tainted,
+ TlbEntry::DomainType::NoAccess, is_write,
+ ArmFault::AlignmentFault, isStage2,
+ tranMethod);
}
// Check for a trickbox generated address fault
if (fault == NoFault) {
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
if (aarch64 && is_fetch && cpsr.il == 1) {
- return new IllegalInstSetStateFault();
+ return std::make_shared<IllegalInstSetStateFault>();
}
}
// any further with the memory access (here we can safely use the
// fault status for the short desc. format in all cases)
prefetchFaults++;
- return new PrefetchAbort(vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
+ return std::make_shared<PrefetchAbort>(
+ vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
}
if (is_fetch)
#include "dev/dma_device.hh"
#include "mem/request.hh"
#include "params/ArmTLB.hh"
-#include "sim/fault_fwd.hh"
#include "sim/probe/pmu.hh"
#include "sim/tlb.hh"
* Authors: Ali Saidi
*/
+#include <memory>
#include "arch/arm/faults.hh"
#include "arch/arm/isa_traits.hh"
// FPEXC.EN = 0
- static Fault reset = new Reset;
+ static Fault reset = std::make_shared<Reset>();
reset->invoke(tc);
}
#include "base/types.hh"
#include "mem/request.hh"
#include "sim/byteswap.hh"
-#include "sim/fault_fwd.hh"
#include "sim/insttracer.hh"
/// Read from memory in timing mode.
if (status.im && cause.ip) {
DPRINTF(Interrupt, "Interrupt! IM[7:0]=%d IP[7:0]=%d \n",
(unsigned)status.im, (unsigned)cause.ip);
- return new InterruptFault;
+ return std::make_shared<InterruptFault>();
}
}
#include "arch/mips/registers.hh"
#include "arch/mips/types.hh"
#include "sim/eventq.hh"
-#include "sim/fault_fwd.hh"
#include "sim/sim_object.hh"
class BaseCPU;
0x4: decode FullSystemInt {
0: syscall_se({{ xc->syscall(R2); }},
IsSerializeAfter, IsNonSpeculative);
- default: syscall({{ fault = new SystemCallFault(); }});
+ default: syscall({{ fault = std::make_shared<SystemCallFault>(); }});
}
0x7: sync({{ ; }}, IsMemBarrier);
- 0x5: break({{fault = new BreakpointFault();}});
+ 0x5: break({{fault = std::make_shared<BreakpointFault>();}});
}
}
Rd = result = Rs + Rt;
if (FullSystem &&
findOverflow(32, result, Rs, Rt)) {
- fault = new IntegerOverflowFault();
+ fault = std::make_shared<IntegerOverflowFault>();
}
}});
0x1: addu({{ Rd_sw = Rs_sw + Rt_sw;}});
Rd = result = Rs - Rt;
if (FullSystem &&
findOverflow(32, result, Rs, ~Rt)) {
- fault = new IntegerOverflowFault();
+ fault = std::make_shared<IntegerOverflowFault>();
}
}});
0x3: subu({{ Rd_sw = Rs_sw - Rt_sw; }});
Rt = result = Rs + imm;
if (FullSystem &&
findOverflow(32, result, Rs, imm)) {
- fault = new IntegerOverflowFault();
+ fault = std::make_shared<IntegerOverflowFault>();
}
}});
0x1: addiu({{ Rt_sw = Rs_sw + imm; }});
} else {
// Enable this else branch once we
// actually set values for Config on init
- fault = new ReservedInstructionFault();
+ fault = std::make_shared<ReservedInstructionFault>();
}
Status = status;
}});
Rt = status;
status.ie = 1;
} else {
- fault = new ReservedInstructionFault();
+ fault = std::make_shared<ReservedInstructionFault>();
}
}});
default:CP0Unimpl::unknown();
// Rev 2 of the architecture
panic("Shadow Sets Not Fully Implemented.\n");
} else {
- fault = new ReservedInstructionFault();
+ fault = std::make_shared<ReservedInstructionFault>();
}
}});
0xE: wrpgpr({{
// Rev 2 of the architecture
panic("Shadow Sets Not Fully Implemented.\n");
} else {
- fault = new ReservedInstructionFault();
+ fault = std::make_shared<ReservedInstructionFault>();
}
}});
}
%(op_wb)s;
}
} else {
- fault = new CoprocessorUnusableFault(0);
+ fault = std::make_shared<CoprocessorUnusableFault>(0);
}
return fault;
}
if (isCoprocessorEnabled(xc, 1)) {
%(code)s;
} else {
- fault = new CoprocessorUnusableFault(1);
+ fault = std::make_shared<CoprocessorUnusableFault>(1);
}
if(fault == NoFault)
if(isMMUTLB(xc)){
%(code)s;
} else {
- fault = new ReservedInstructionFault();
+ fault = std::make_shared<ReservedInstructionFault>();
}
} else {
- fault = new CoprocessorUnusableFault(0);
+ fault = std::make_shared<CoprocessorUnusableFault>(0);
}
} else { // Syscall Emulation Mode - No TLB Instructions
- fault = new ReservedInstructionFault();
+ fault = std::make_shared<ReservedInstructionFault>();
}
if (fault == NoFault) {
}
else
{
- fault = new DspStateDisabledFault();
+ fault = std::make_shared<DspStateDisabledFault>();
}
}
else
{
- fault = new ReservedInstructionFault();
+ fault = std::make_shared<ReservedInstructionFault>();
}
if(fault == NoFault)
}
else
{
- fault = new DspStateDisabledFault();
+ fault = std::make_shared<DspStateDisabledFault>();
}
}
else
{
- fault = new ReservedInstructionFault();
+ fault = std::make_shared<ReservedInstructionFault>();
}
if(fault == NoFault)
//@TODO: Implement correct CP0 checks to see if the CP1
// unit is enable or not
if (!isCoprocessorEnabled(xc, 1))
- return new CoprocessorUnusableFault(1);
+ return std::make_shared<CoprocessorUnusableFault>(1);
return NoFault;
}
if( ACSRC > 0 && !isDspEnabled(xc) )
{
- fault = new DspStateDisabledFault();
+ fault = std::make_shared<DspStateDisabledFault>();
}
else
{
if( ACDST > 0 && !isDspEnabled(xc) )
{
- fault = new DspStateDisabledFault();
+ fault = std::make_shared<DspStateDisabledFault>();
}
else
{
%(code)s;
}
} else {
- fault = new CoprocessorUnusableFault(0);
+ fault = std::make_shared<CoprocessorUnusableFault>(0);
}
if(fault == NoFault)
if (config3.mt == 1) {
%(code)s;
} else {
- fault = new ReservedInstructionFault();
+ fault = std::make_shared<ReservedInstructionFault>();
}
} else {
- fault = new CoprocessorUnusableFault(0);
+ fault = std::make_shared<CoprocessorUnusableFault>(0);
}
if(fault == NoFault)
code ='bool cond;\n' + code
code += 'if (cond) {\n'
- code += 'fault = new TrapFault();\n};'
+ code += 'fault = std::make_shared<TrapFault>();\n};'
iop = InstObjParams(name, Name, 'MipsStaticInst', code, flags)
header_output = BasicDeclare.subst(iop)
code ='bool cond;\n' + code
code += 'if (cond) {\n'
- code += 'fault = new TrapFault();\n};'
+ code += 'fault = std::make_shared<TrapFault>();\n};'
iop = InstObjParams(name, Name, 'MipsStaticInst', code, flags)
header_output = BasicDeclare.subst(iop)
decoder_output = BasicConstructor.subst(iop)
{
if (FullSystem) {
if (!isCoprocessorEnabled(xc, 0))
- return new CoprocessorUnusableFault(0);
+ return std::make_shared<CoprocessorUnusableFault>(0);
else
- return new ReservedInstructionFault;
+ return std::make_shared<ReservedInstructionFault>();
} else {
panic("attempt to execute unimplemented instruction '%s' "
"(inst %#08x, opcode %#x, binary:%s)",
{
if (FullSystem) {
if (!isCoprocessorEnabled(xc, 1))
- return new CoprocessorUnusableFault(1);
+ return std::make_shared<CoprocessorUnusableFault>(1);
else
- return new ReservedInstructionFault;
+ return std::make_shared<ReservedInstructionFault>();
} else {
panic("attempt to execute unimplemented instruction '%s' "
"(inst %#08x, opcode %#x, binary:%s)",
{
if (FullSystem) {
if (!isCoprocessorEnabled(xc, 2))
- return new CoprocessorUnusableFault(2);
+ return std::make_shared<CoprocessorUnusableFault>(2);
else
- return new ReservedInstructionFault;
+ return std::make_shared<ReservedInstructionFault>();
} else {
panic("attempt to execute unimplemented instruction '%s' "
"(inst %#08x, opcode %#x, binary:%s)",
Unknown::execute(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
{
- return new ReservedInstructionFault;
+ return std::make_shared<ReservedInstructionFault>();
}
}};
tc->readMiscRegNoEffect(MISCREG_VPE_CONTROL);
vpeControl.excpt = 1;
tc->setMiscReg(MISCREG_VPE_CONTROL, vpeControl);
- fault = new ThreadFault();
+ fault = std::make_shared<ThreadFault>();
}
}
VPEControlReg vpeControl = tc->readMiscReg(MISCREG_VPE_CONTROL);
vpeControl.excpt = 2;
tc->setMiscReg(MISCREG_VPE_CONTROL, vpeControl);
- fault = new ThreadFault();
+ fault = std::make_shared<ThreadFault>();
} else {
}
} else if (src_reg != -2) {
if (vpeControl.ysi == 1 && tcStatus.dt == 1 ) {
vpeControl.excpt = 4;
- fault = new ThreadFault();
+ fault = std::make_shared<ThreadFault>();
} else {
}
}
#include "base/statistics.hh"
#include "mem/request.hh"
#include "params/MipsTLB.hh"
-#include "sim/fault_fwd.hh"
#include "sim/sim_object.hh"
#include "sim/tlb.hh"
panic("attempt to execute unimplemented instruction '%s' "
"(inst 0x%08x, opcode 0x%x, binary:%s)", mnemonic, machInst, OPCODE,
inst2string(machInst));
- return new UnimplementedOpcodeFault;
+ return std::make_shared<UnimplementedOpcodeFault>();
}
Fault
panic("attempt to execute unknown instruction at %#x"
"(inst 0x%08x, opcode 0x%x, binary: %s)",
xc->pcState().pc(), machInst, OPCODE, inst2string(machInst));
- return new UnimplementedOpcodeFault;
+ return std::make_shared<UnimplementedOpcodeFault>();
}
}};
if (req->getVaddr() & 0x3) {
DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
req->getSize());
- return new AlignmentFault();
+ return std::make_shared<AlignmentFault>();
}
Process * p = tc->getProcessPtr();
#include "base/statistics.hh"
#include "mem/request.hh"
#include "params/PowerTLB.hh"
-#include "sim/fault_fwd.hh"
#include "sim/tlb.hh"
class ThreadContext;
if (pstate.ie) {
if (interrupts[IT_HINTP]) {
// This will be cleaned by a HINTP write
- return new HstickMatch;
+ return std::make_shared<HstickMatch>();
}
if (interrupts[IT_INT_VEC]) {
// this will be cleared by an ASI read (or write)
- return new InterruptVector;
+ return std::make_shared<InterruptVector>();
}
}
} else {
if (interrupts[IT_TRAP_LEVEL_ZERO]) {
// this is cleared by deasserting HPSTATE::tlz
- return new TrapLevelZero;
+ return std::make_shared<TrapLevelZero>();
}
// HStick matches always happen in priv mode (ie doesn't matter)
if (interrupts[IT_HINTP]) {
- return new HstickMatch;
+ return std::make_shared<HstickMatch>();
}
if (interrupts[IT_INT_VEC]) {
// this will be cleared by an ASI read (or write)
- return new InterruptVector;
+ return std::make_shared<InterruptVector>();
}
if (pstate.ie) {
if (interrupts[IT_CPU_MONDO]) {
- return new CpuMondo;
+ return std::make_shared<CpuMondo>();
}
if (interrupts[IT_DEV_MONDO]) {
- return new DevMondo;
+ return std::make_shared<DevMondo>();
}
if (interrupts[IT_SOFT_INT]) {
int level = InterruptLevel(interrupts[IT_SOFT_INT]);
- return new InterruptLevelN(level);
+ return std::make_shared<InterruptLevelN>(level);
}
if (interrupts[IT_RES_ERROR]) {
- return new ResumableError;
+ return std::make_shared<ResumableError>();
}
} // !hpriv && pstate.ie
} // !hpriv
if (pstate.pef && xc->readMiscReg(MISCREG_FPRS) & 0x4) {
return NoFault;
} else {
- return new FpDisabled;
+ return std::make_shared<FpDisabled>();
}
} else {
return NoFault;
0x0: decode OP2
{
// Throw an illegal instruction acception
- 0x0: Trap::illtrap({{fault = new IllegalInstruction;}});
+ 0x0: Trap::illtrap({{fault = std::make_shared<IllegalInstruction>();}});
format BranchN
{
// bpcc
0x0C: subc({{Rd_sdw = Rs1_sdw + (~Rs2_or_imm13) + 1 - Ccr<0:0>}});
0x0D: udivx({{
if (Rs2_or_imm13 == 0)
- fault = new DivisionByZero;
+ fault = std::make_shared<DivisionByZero>();
else
Rd_udw = Rs1_udw / Rs2_or_imm13;
}});
0x0E: udiv({{
if (Rs2_or_imm13 == 0) {
- fault = new DivisionByZero;
+ fault = std::make_shared<DivisionByZero>();
} else {
Rd_udw = ((Y << 32) | Rs1_udw<31:0>) / Rs2_or_imm13;
if (Rd_udw >> 32 != 0)
}});
0x0F: sdiv({{
if (Rs2_or_imm13_sdw == 0) {
- fault = new DivisionByZero;
+ fault = std::make_shared<DivisionByZero>();
} else {
Rd_udw = ((int64_t)((Y << 32) |
Rs1_sdw<31:0>)) / Rs2_or_imm13_sdw;
}}, sub=True);
0x1D: IntOpCcRes::udivxcc({{
if (Rs2_or_imm13_udw == 0)
- fault = new DivisionByZero;
+ fault = std::make_shared<DivisionByZero>();
else
Rd = Rs1_udw / Rs2_or_imm13_udw;}});
0x1E: IntOpCcRes::udivcc({{
uint32_t val2 = Rs2_or_imm13_udw;
int32_t overflow = 0;
if (val2 == 0) {
- fault = new DivisionByZero;
+ fault = std::make_shared<DivisionByZero>();
} else {
resTemp = (uint64_t)((Y << 32) | Rs1_udw<31:0>) / val2;
overflow = (resTemp<63:32> != 0);
int64_t val2 = Rs2_or_imm13_sdw<31:0>;
bool overflow = false, underflow = false;
if (val2 == 0) {
- fault = new DivisionByZero;
+ fault = std::make_shared<DivisionByZero>();
} else {
Rd = (int64_t)((Y << 32) | Rs1_sdw<31:0>) / val2;
overflow = ((int64_t)Rd >= std::numeric_limits<int32_t>::max());
bool overflow = (op1 & mask(2)) || (op2 & mask(2)) ||
findOverflow(32, res, op1, op2);
if (overflow)
- fault = new TagOverflow;
+ fault = std::make_shared<TagOverflow>();
}}, iv={{overflow}});
0x23: tsubcctv({{
int64_t res, op1 = Rs1, op2 = Rs2_or_imm13;
bool overflow = (op1 & mask(2)) || (op2 & mask(2)) ||
findOverflow(32, res, op1, ~op2);
if (overflow)
- fault = new TagOverflow;
+ fault = std::make_shared<TagOverflow>();
}}, iv={{overflow}}, sub=True);
0x24: mulscc({{
int32_t savedLSB = Rs1<0:>;
0x2B: BasicOperate::flushw({{
if (NWindows - 2 - Cansave != 0) {
if (Otherwin)
- fault = new SpillNOther(4*Wstate<5:3>);
+ fault = std::make_shared<SpillNOther>(4*Wstate<5:3>);
else
- fault = new SpillNNormal(4*Wstate<2:0>);
+ fault = std::make_shared<SpillNNormal>(4*Wstate<2:0>);
}
}});
0x2C: decode MOVCC3
}
0x2D: sdivx({{
if (Rs2_or_imm13_sdw == 0)
- fault = new DivisionByZero;
+ fault = std::make_shared<DivisionByZero>();
else
Rd_sdw = Rs1_sdw / Rs2_or_imm13_sdw;
}});
- 0x2E: Trap::popc({{fault = new IllegalInstruction;}});
+ 0x2E: Trap::popc({{fault = std::make_shared<IllegalInstruction>();}});
0x2F: decode RCOND3
{
0x1: movreq({{Rd = (Rs1_sdw == 0) ? Rs2_or_imm10 : Rd;}});
// 0x04-0x05 should cause an illegal instruction exception
0x06: NoPriv::wrfprs({{Fprs = Rs1 ^ Rs2_or_imm13;}});
// 0x07-0x0E should cause an illegal instruction exception
- 0x0F: Trap::softreset({{fault = new SoftwareInitiatedReset;}});
+ 0x0F: Trap::softreset({{fault = std::make_shared<SoftwareInitiatedReset>();}});
0x10: Priv::wrpcr({{Pcr = Rs1 ^ Rs2_or_imm13;}});
0x11: Priv::wrpic({{Pic = Rs1 ^ Rs2_or_imm13;}}, {{Pcr<0:>}});
// 0x12 should cause an illegal instruction exception
0x13: NoPriv::wrgsr({{
if (Fprs<2:> == 0 || Pstate.pef == 0)
- return new FpDisabled;
+ return std::make_shared<FpDisabled>();
Gsr = Rs1 ^ Rs2_or_imm13;
}});
0x14: Priv::wrsoftint_set({{SoftintSet = Rs1 ^ Rs2_or_imm13;}});
0x17: Priv::wrtick_cmpr({{TickCmpr = Rs1 ^ Rs2_or_imm13;}});
0x18: NoPriv::wrstick({{
if (!Hpstate.hpriv)
- return new IllegalInstruction;
+ return std::make_shared<IllegalInstruction>();
Stick = Rs1 ^ Rs2_or_imm13;
}});
0x19: Priv::wrstick_cmpr({{StickCmpr = Rs1 ^ Rs2_or_imm13;}});
0x55: fcmpes({{
uint8_t fcc = 0;
if (std::isnan(Frs1s) || std::isnan(Frs2s))
- fault = new FpExceptionIEEE754;
+ fault = std::make_shared<FpExceptionIEEE754>();
if (Frs1s < Frs2s)
fcc = 1;
else if (Frs1s > Frs2s)
0x56: fcmped({{
uint8_t fcc = 0;
if (std::isnan(Frs1) || std::isnan(Frs2))
- fault = new FpExceptionIEEE754;
+ fault = std::make_shared<FpExceptionIEEE754>();
if (Frs1 < Frs2)
fcc = 1;
else if (Frs1 > Frs2)
0x37: FailUnimpl::fmul8ulx16();
0x38: FailUnimpl::fmuld8sux16();
0x39: FailUnimpl::fmuld8ulx16();
- 0x3A: Trap::fpack32({{fault = new IllegalInstruction;}});
- 0x3B: Trap::fpack16({{fault = new IllegalInstruction;}});
- 0x3D: Trap::fpackfix({{fault = new IllegalInstruction;}});
- 0x3E: Trap::pdist({{fault = new IllegalInstruction;}});
+ 0x3A: Trap::fpack32({{fault = std::make_shared<IllegalInstruction>();}});
+ 0x3B: Trap::fpack16({{fault = std::make_shared<IllegalInstruction>();}});
+ 0x3D: Trap::fpackfix({{fault = std::make_shared<IllegalInstruction>();}});
+ 0x3E: Trap::pdist({{fault = std::make_shared<IllegalInstruction>();}});
0x48: BasicOperate::faligndata({{
uint64_t msbX = Frs1_udw;
uint64_t lsbX = Frs2_udw;
((lsbX & lsbMask) >> lsbShift);
}
}});
- 0x4B: Trap::fpmerge({{fault = new IllegalInstruction;}});
+ 0x4B: Trap::fpmerge({{fault = std::make_shared<IllegalInstruction>();}});
0x4C: FailUnimpl::bshuffle();
0x4D: FailUnimpl::fexpand();
0x50: FailUnimpl::fpadd16();
0x7D: FailUnimpl::fors();
0x7E: FpBasic::fone({{Frd_udw = std::numeric_limits<uint64_t>::max();}});
0x7F: FpBasic::fones({{Frds_uw = std::numeric_limits<uint32_t>::max();}});
- 0x80: Trap::shutdown({{fault = new IllegalInstruction;}});
+ 0x80: Trap::shutdown({{fault = std::make_shared<IllegalInstruction>();}});
0x81: FailUnimpl::siam();
}
// M5 special opcodes use the reserved IMPDEP2A opcode space
panic("M5 panic instruction called at pc = %#x.", PC);
}}, No_OpClass, IsNonSpeculative);
}
- default: Trap::impdep2({{fault = new IllegalInstruction;}});
+ default: Trap::impdep2({{fault = std::make_shared<IllegalInstruction>();}});
}
0x38: Branch::jmpl({{
Addr target = Rs1 + Rs2_or_imm13;
if (target & 0x3) {
- fault = new MemAddressNotAligned;
+ fault = std::make_shared<MemAddressNotAligned>();
} else {
if (Pstate.am)
Rd = (PC)<31:0>;
// faults.
if (Canrestore == 0) {
if (Otherwin)
- fault = new FillNOther(4*Wstate<5:3>);
+ fault = std::make_shared<FillNOther>(4*Wstate<5:3>);
else
- fault = new FillNNormal(4*Wstate<2:0>);
+ fault = std::make_shared<FillNNormal>(4*Wstate<2:0>);
} else if (target & 0x3) { // Check for alignment faults
- fault = new MemAddressNotAligned;
+ fault = std::make_shared<MemAddressNotAligned>();
} else {
NNPC = target;
Cwp = (Cwp - 1 + NWindows) % NWindows;
if (passesCondition(Ccr<3:0>, COND2)) {
int lTrapNum = I ? (Rs1 + SW_TRAP) : (Rs1 + Rs2);
DPRINTF(Sparc, "The trap number is %d\n", lTrapNum);
- fault = new TrapInstruction(lTrapNum);
+ fault = std::make_shared<TrapInstruction>(lTrapNum);
}
}}, IsSerializeAfter, IsNonSpeculative, IsSyscall);
0x2: Trap::tccx({{
if (passesCondition(Ccr<7:4>, COND2)) {
int lTrapNum = I ? (Rs1 + SW_TRAP) : (Rs1 + Rs2);
DPRINTF(Sparc, "The trap number is %d\n", lTrapNum);
- fault = new TrapInstruction(lTrapNum);
+ fault = std::make_shared<TrapInstruction>(lTrapNum);
}
}}, IsSerializeAfter, IsNonSpeculative, IsSyscall);
}
0x3C: save({{
if (Cansave == 0) {
if (Otherwin)
- fault = new SpillNOther(4*Wstate<5:3>);
+ fault = std::make_shared<SpillNOther>(4*Wstate<5:3>);
else
- fault = new SpillNNormal(4*Wstate<2:0>);
+ fault = std::make_shared<SpillNNormal>(4*Wstate<2:0>);
} else if (Cleanwin - Canrestore == 0) {
- fault = new CleanWindow;
+ fault = std::make_shared<CleanWindow>();
} else {
Cwp = (Cwp + 1) % NWindows;
Rd_next = Rs1 + Rs2_or_imm13;
0x3D: restore({{
if (Canrestore == 0) {
if (Otherwin)
- fault = new FillNOther(4*Wstate<5:3>);
+ fault = std::make_shared<FillNOther>(4*Wstate<5:3>);
else
- fault = new FillNNormal(4*Wstate<2:0>);
+ fault = std::make_shared<FillNNormal>(4*Wstate<2:0>);
} else {
Cwp = (Cwp - 1 + NWindows) % NWindows;
Rd_prev = Rs1 + Rs2_or_imm13;
Fsr = Mem_udw;}});
default: FailUnimpl::ldfsrOther();
}
- 0x22: ldqf({{fault = new FpDisabled;}});
+ 0x22: ldqf({{fault = std::make_shared<FpDisabled>();}});
0x23: Load::lddf({{Frd_udw = Mem_udw;}});
0x24: Store::stf({{Mem_uw = Frds_uw;}});
0x25: decode RD {
Mem_udw = Fsr;}});
default: FailUnimpl::stfsrOther();
}
- 0x26: stqf({{fault = new FpDisabled;}});
+ 0x26: stqf({{fault = std::make_shared<FpDisabled>();}});
0x27: Store::stdf({{Mem_udw = Frd_udw;}});
0x2D: Nop::prefetch({{ }});
0x30: LoadAlt::ldfa({{Frds_uw = Mem_uw;}});
- 0x32: ldqfa({{fault = new FpDisabled;}});
+ 0x32: ldqfa({{fault = std::make_shared<FpDisabled>();}});
format LoadAlt {
0x33: decode EXT_ASI {
// ASI_NUCLEUS
0xDB: FailUnimpl::ldshortf_16sl();
// Not an ASI which is legal with lddfa
default: Trap::lddfa_bad_asi(
- {{fault = new DataAccessException;}});
+ {{fault = std::make_shared<DataAccessException>();}});
}
}
0x34: Store::stfa({{Mem_uw = Frds_uw;}});
- 0x36: stqfa({{fault = new FpDisabled;}});
+ 0x36: stqfa({{fault = std::make_shared<FpDisabled>();}});
format StoreAlt {
0x37: decode EXT_ASI {
// ASI_NUCLEUS
0xDB: FailUnimpl::stshortf_16sl();
// Not an ASI which is legal with lddfa
default: Trap::stdfa_bad_asi(
- {{fault = new DataAccessException;}});
+ {{fault = std::make_shared<DataAccessException>();}});
}
}
0x3C: CasAlt::casa({{
# and we're dealing with doubles
BlockAlignmentFaultCheck = '''
if (RD & 0xe)
- fault = new IllegalInstruction;
+ fault = std::make_shared<IllegalInstruction>();
else if (EA & 0x3f)
- fault = new MemAddressNotAligned;
+ fault = std::make_shared<MemAddressNotAligned>();
'''
TwinAlignmentFaultCheck = '''
if (RD & 0x1)
- fault = new IllegalInstruction;
+ fault = std::make_shared<IllegalInstruction>();
else if (EA & 0xf)
- fault = new MemAddressNotAligned;
+ fault = std::make_shared<MemAddressNotAligned>();
'''
# XXX Need to take care of pstate.hpriv as well. The lower ASIs
# are split into ones that are available in priv and hpriv, and
if ((!Pstate.priv && !Hpstate.hpriv &&
!asiIsUnPriv((ASI)EXT_ASI)) ||
(!Hpstate.hpriv && asiIsHPriv((ASI)EXT_ASI)))
- fault = new PrivilegedAction;
+ fault = std::make_shared<PrivilegedAction>();
else if (asiIsAsIfUser((ASI)EXT_ASI) && !Pstate.priv)
- fault = new PrivilegedAction;
+ fault = std::make_shared<PrivilegedAction>();
'''
TruncateEA = '''
// If the processor isn't in privileged mode, fault out right away
if (%(check)s)
- return new PrivilegedAction;
+ return std::make_shared<PrivilegedAction>();
if (%(tlCheck)s)
- return new IllegalInstruction;
+ return std::make_shared<IllegalInstruction>();
Fault fault = NoFault;
%(code)s;
def format FpUnimpl(*flags) {{
fpunimpl_code = '''
Fsr = insertBits(Fsr, 16, 14, 3);
- fault = new FpExceptionOther;
+ fault = std::make_shared<FpExceptionOther>();
'''
iop = InstObjParams(name, Name, 'FpUnimpl', fpunimpl_code, flags)
header_output = BasicDeclare.subst(iop)
Fault Unknown::execute(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
{
- return new IllegalInstruction;
+ return std::make_shared<IllegalInstruction>();
}
}};
// If the access is unaligned trap
if (vaddr & 0x3) {
writeSfsr(false, ct, false, OtherFault, asi);
- return new MemAddressNotAligned;
+ return std::make_shared<MemAddressNotAligned>();
}
if (addr_mask)
if (!validVirtualAddress(vaddr, addr_mask)) {
writeSfsr(false, ct, false, VaOutOfRange, asi);
- return new InstructionAccessException;
+ return std::make_shared<InstructionAccessException>();
}
if (!lsu_im) {
if (e == NULL || !e->valid) {
writeTagAccess(vaddr, context);
if (real) {
- return new InstructionRealTranslationMiss;
+ return std::make_shared<InstructionRealTranslationMiss>();
} else {
if (FullSystem)
- return new FastInstructionAccessMMUMiss;
+ return std::make_shared<FastInstructionAccessMMUMiss>();
else
- return new FastInstructionAccessMMUMiss(req->getVaddr());
+ return std::make_shared<FastInstructionAccessMMUMiss>(
+ req->getVaddr());
}
}
if (!priv && e->pte.priv()) {
writeTagAccess(vaddr, context);
writeSfsr(false, ct, false, PrivViolation, asi);
- return new InstructionAccessException;
+ return std::make_shared<InstructionAccessException>();
}
// cache translation date for next translation
if (!priv && !hpriv && !asiIsUnPriv(asi)) {
// It appears that context should be Nucleus in these cases?
writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
- return new PrivilegedAction;
+ return std::make_shared<PrivilegedAction>();
}
if (!hpriv && asiIsHPriv(asi)) {
writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
}
if (asiIsPrimary(asi)) {
// If the asi is unaligned trap
if (unaligned) {
writeSfsr(vaddr, false, ct, false, OtherFault, asi);
- return new MemAddressNotAligned;
+ return std::make_shared<MemAddressNotAligned>();
}
if (addr_mask)
if (!validVirtualAddress(vaddr, addr_mask)) {
writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
}
if ((!lsu_dm && !hpriv && !red) || asiIsReal(asi)) {
writeTagAccess(vaddr, context);
DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
if (real) {
- return new DataRealTranslationMiss;
+ return std::make_shared<DataRealTranslationMiss>();
} else {
if (FullSystem)
- return new FastDataAccessMMUMiss;
+ return std::make_shared<FastDataAccessMMUMiss>();
else
- return new FastDataAccessMMUMiss(req->getVaddr());
+ return std::make_shared<FastDataAccessMMUMiss>(
+ req->getVaddr());
}
}
if (!priv && e->pte.priv()) {
writeTagAccess(vaddr, context);
writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
}
if (write && !e->pte.writable()) {
writeTagAccess(vaddr, context);
writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
- return new FastDataAccessProtection;
+ return std::make_shared<FastDataAccessProtection>();
}
if (e->pte.nofault() && !asiIsNoFault(asi)) {
writeTagAccess(vaddr, context);
writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
}
if (e->pte.sideffect() && asiIsNoFault(asi)) {
writeTagAccess(vaddr, context);
writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
}
if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
if (!hpriv) {
writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
if (priv)
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
else
- return new PrivilegedAction;
+ return std::make_shared<PrivilegedAction>();
}
if ((asi == ASI_SWVR_UDB_INTR_W && !write) ||
(asi == ASI_SWVR_UDB_INTR_R && write)) {
writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
}
goto regAccessOk;
handleScratchRegAccess:
if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
}
goto regAccessOk;
handleQueueRegAccess:
if (!priv && !hpriv) {
writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
- return new PrivilegedAction;
+ return std::make_shared<PrivilegedAction>();
}
if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
}
goto regAccessOk;
if (!hpriv) {
writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
if (priv)
- return new DataAccessException;
+ return std::make_shared<DataAccessException>();
else
- return new PrivilegedAction;
+ return std::make_shared<PrivilegedAction>();
}
goto regAccessOk;
#include "base/misc.hh"
#include "mem/request.hh"
#include "params/SparcTLB.hh"
-#include "sim/fault_fwd.hh"
#include "sim/tlb.hh"
class ThreadContext;
void
initCPU(ThreadContext *tc, int cpuId)
{
- static Fault por = new PowerOnReset();
+ static Fault por = std::make_shared<PowerOnReset>();
if (cpuId == 0)
por->invoke(tc);
}
#include "base/misc.hh"
#include "cpu/static_inst.hh"
#include "cpu/thread_context.hh"
-#include "sim/fault_fwd.hh"
#include "sim/full_system.hh"
namespace SparcISA
* Authors: Gabe Black
*/
+#include <memory>
+
#include "arch/x86/regs/apic.hh"
#include "arch/x86/interrupts.hh"
#include "arch/x86/intmessage.hh"
if (pendingUnmaskableInt) {
if (pendingSmi) {
DPRINTF(LocalApic, "Generated SMI fault object.\n");
- return new SystemManagementInterrupt();
+ return std::make_shared<SystemManagementInterrupt>();
} else if (pendingNmi) {
DPRINTF(LocalApic, "Generated NMI fault object.\n");
- return new NonMaskableInterrupt(nmiVector);
+ return std::make_shared<NonMaskableInterrupt>(nmiVector);
} else if (pendingInit) {
DPRINTF(LocalApic, "Generated INIT fault object.\n");
- return new InitInterrupt(initVector);
+ return std::make_shared<InitInterrupt>(initVector);
} else if (pendingStartup) {
DPRINTF(LocalApic, "Generating SIPI fault object.\n");
- return new StartupInterrupt(startupVector);
+ return std::make_shared<StartupInterrupt>(startupVector);
} else {
panic("pendingUnmaskableInt set, but no unmaskable "
"ints were pending.\n");
}
} else if (pendingExtInt) {
DPRINTF(LocalApic, "Generated external interrupt fault object.\n");
- return new ExternalInterrupt(extIntVector);
+ return std::make_shared<ExternalInterrupt>(extIntVector);
} else {
DPRINTF(LocalApic, "Generated regular interrupt fault object.\n");
// The only thing left are fixed and lowest priority interrupts.
- return new ExternalInterrupt(IRRV);
+ return std::make_shared<ExternalInterrupt>(IRRV);
}
}
} else if (LEGACY_REPNE) {
// The repne prefix is illegal
return new MicroFault(machInst, "illprefix", 0,
- new InvalidOpcode, 0);
+ std::make_shared<InvalidOpcode>(), 0);
} else {
%s
}
Fault Unknown::execute(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
{
- return new InvalidOpcode();
+ return std::make_shared<InvalidOpcode>();
}
}};
ld t1, seg, sib, disp, dataSize="env.dataSize * 2"
srli t2, t1, "env.dataSize * 8"
sub t1, t1, reg, flags=(ECF,)
- fault "new BoundRange", flags=(CECF,)
+ fault "std::make_shared<BoundRange>()", flags=(CECF,)
sub t2, reg, t2, flags=(ECF,)
- fault "new BoundRange", flags=(CECF,)
+ fault "std::make_shared<BoundRange>()", flags=(CECF,)
};
def macroop BOUND_R_P {
- fault "new UnimpInstFault"
+ fault "std::make_shared<UnimpInstFault>()"
};
'''
# if temp_RIP > CS.limit throw #GP(0)
rdlimit t6, cs, dataSize=8
sub t0, t1, t6, flags=(ECF,)
- fault "new GeneralProtection(0)", flags=(CECF,)
+ fault "std::make_shared<GeneralProtection>(0)", flags=(CECF,)
#(temp_CPL!=CPL)
srli t7, t4, 4
# if t7 isn't 0 or -1, it wasn't canonical.
br label("doPopStackStuff"), flags=(CEZF,)
addi t0, t7, 1, flags=(EZF,), dataSize=ssz
- fault "new GeneralProtection(0)", flags=(nCEZF,)
+ fault "std::make_shared<GeneralProtection>(0)", flags=(nCEZF,)
doPopStackStuff:
# POP.v temp_RSP
# t1 has the offset and t2 has the new selector.
# This is intended to run in protected mode.
andi t0, t2, 0xFC, flags=(EZF,), dataSize=2
- fault "new GeneralProtection(0)", flags=(CEZF,)
+ fault "std::make_shared<GeneralProtection>(0)", flags=(CEZF,)
andi t3, t2, 0xF8, dataSize=8
andi t0, t2, 0x4, flags=(EZF,), dataSize=2
br rom_local_label("farJmpGlobalDescriptor"), flags=(CEZF,)
microcode = '''
def macroop UD2
{
- fault "new InvalidOpcode()"
+ fault "std::make_shared<InvalidOpcode>()"
};
'''
def macroop FADDP_M
{
- fault "new UnimpInstFault"
+ fault "std::make_shared<UnimpInstFault>()"
};
def macroop FADDP_P
{
- fault "new UnimpInstFault"
+ fault "std::make_shared<UnimpInstFault>()"
};
# FIADD
def macroop FSUBP_M
{
- fault "new UnimpInstFault"
+ fault "std::make_shared<UnimpInstFault>()"
};
def macroop FSUBP_P
{
- fault "new UnimpInstFault"
+ fault "std::make_shared<UnimpInstFault>()"
};
# FISUB
def macroop FXCH_M
{
- fault "new UnimpInstFault"
+ fault "std::make_shared<UnimpInstFault>()"
};
def macroop FXCH_P
{
- fault "new UnimpInstFault"
+ fault "std::make_shared<UnimpInstFault>()"
};
'''
%(op_decl)s
%(op_rd)s
if (%(cond_test)s) {
- return new GenericISA::M5DebugFault(func, message);
+ return std::make_shared<GenericISA::M5DebugFault>(func,
+ message);
} else {
return NoFault;
}
uint64_t dividend = remainder;
//Do the division.
if (divisor == 0) {
- fault = new DivideByZero;
+ fault = std::make_shared<DivideByZero>();
} else {
divide(dividend, divisor, quotient, remainder);
//Record the final results.
//If we overshot, do nothing. This lets us unrool division loops a
//little.
if (divisor == 0) {
- fault = new DivideByZero;
+ fault = std::make_shared<DivideByZero>();
} else if (remaining) {
if (divisor & (ULL(1) << 63)) {
while (remaining && !(dividend & (ULL(1) << 63))) {
CR4 cr4 = CR4Op;
DR7 dr7 = DR7Op;
if ((cr4.de == 1 && (src1 == 4 || src1 == 5)) || src1 >= 8) {
- fault = new InvalidOpcode();
+ fault = std::make_shared<InvalidOpcode>();
} else if (dr7.gd) {
- fault = new DebugException();
+ fault = std::make_shared<DebugException>();
} else {
%s
}
CR4 cr4 = CR4Op;
DR7 dr7 = DR7Op;
if ((cr4.de == 1 && (dest == 4 || dest == 5)) || dest >= 8) {
- fault = new InvalidOpcode();
+ fault = std::make_shared<InvalidOpcode>();
} else if ((dest == 6 || dest == 7) && bits(psrc1, 63, 32) &&
machInst.mode.mode == LongMode) {
- fault = new GeneralProtection(0);
+ fault = std::make_shared<GeneralProtection>(0);
} else if (dr7.gd) {
- fault = new DebugException();
+ fault = std::make_shared<DebugException>();
} else {
DebugDest = psrc1;
}
src1, "InstRegIndex(NUM_INTREGS)", flags, dataSize)
rdcrCode = '''
if (src1 == 1 || (src1 > 4 && src1 < 8) || (src1 > 8)) {
- fault = new InvalidOpcode();
+ fault = std::make_shared<InvalidOpcode>();
} else {
%s
}
src1, "InstRegIndex(NUM_INTREGS)", flags, dataSize)
code = '''
if (dest == 1 || (dest > 4 && dest < 8) || (dest > 8)) {
- fault = new InvalidOpcode();
+ fault = std::make_shared<InvalidOpcode>();
} else {
// There are *s in the line below so it doesn't confuse the
// parser. They may be unnecessary.
(!cr0.pe && cr0.pg) ||
(!cr0.cd && cr0.nw) ||
(cr0.pg && efer.lme && !oldCr4.pae))
- fault = new GeneralProtection(0);
+ fault = std::make_shared<GeneralProtection>(0);
}
break;
case 2:
// PAE can't be disabled in long mode.
if (bits(newVal, 63, 11) ||
(machInst.mode.mode == LongMode && !cr4.pae))
- fault = new GeneralProtection(0);
+ fault = std::make_shared<GeneralProtection>(0);
}
break;
case 8:
{
if (bits(newVal, 63, 4))
- fault = new GeneralProtection(0);
+ fault = std::make_shared<GeneralProtection>(0);
}
default:
- fault = new GenericISA::M5PanicFault(
+ fault = std::make_shared<GenericISA::M5PanicFault>(
"Unrecognized control register %d.\\n", dest);
}
ControlDest = newVal;
case SegCSCheck:
// Make sure it's the right type
if (desc.s == 0 || desc.type.codeOrData != 1) {
- fault = new GeneralProtection(0);
+ fault = std::make_shared<GeneralProtection>(0);
} else if (m5reg.cpl != desc.dpl) {
- fault = new GeneralProtection(0);
+ fault = std::make_shared<GeneralProtection>(0);
}
break;
case SegCallGateCheck:
- fault = new GenericISA::M5PanicFault("CS checks for far "
+ fault = std::make_shared<GenericISA::M5PanicFault>(
+ "CS checks for far "
"calls/jumps through call gates not implemented.\\n");
break;
case SegSoftIntGateCheck:
// Check permissions.
if (desc.dpl < m5reg.cpl) {
- fault = new GeneralProtection(selector);
+ fault = std::make_shared<GeneralProtection>(selector);
break;
}
// Fall through on purpose
// Make sure the gate's the right type.
if ((m5reg.mode == LongMode && (desc.type & 0xe) != 0xe) ||
((desc.type & 0x6) != 0x6)) {
- fault = new GeneralProtection(0);
+ fault = std::make_shared<GeneralProtection>(0);
}
break;
case SegSSCheck:
if (selector.si || selector.ti) {
if (!desc.p) {
- fault = new StackFault(selector);
+ fault = std::make_shared<StackFault>(selector);
} else if (!(desc.s == 1 && desc.type.codeOrData == 0 &&
desc.type.w) ||
(desc.dpl != m5reg.cpl) ||
(selector.rpl != m5reg.cpl)) {
- fault = new GeneralProtection(selector);
+ fault = std::make_shared<GeneralProtection>(selector);
}
} else if (m5reg.submode != SixtyFourBitMode ||
m5reg.cpl == 3) {
- fault = new GeneralProtection(selector);
+ fault = std::make_shared<GeneralProtection>(selector);
}
break;
case SegIretCheck:
!(desc.s == 1 && desc.type.codeOrData == 1) ||
(!desc.type.c && desc.dpl != selector.rpl) ||
(desc.type.c && desc.dpl > selector.rpl)) {
- fault = new GeneralProtection(selector);
+ fault = std::make_shared<GeneralProtection>(selector);
} else if (!desc.p) {
- fault = new SegmentNotPresent(selector);
+ fault = std::make_shared<SegmentNotPresent>(selector);
}
break;
}
case SegIntCSCheck:
if (m5reg.mode == LongMode) {
if (desc.l != 1 || desc.d != 0) {
- fault = new GeneralProtection(selector);
+ fault = std::make_shared<GeneralProtection>(selector);
}
} else {
- fault = new GenericISA::M5PanicFault("Interrupt CS "
+ fault = std::make_shared<GenericISA::M5PanicFault>(
+ "Interrupt CS "
"checks not implemented in legacy mode.\\n");
}
break;
case SegTRCheck:
if (!selector.si || selector.ti) {
- fault = new GeneralProtection(selector);
+ fault = std::make_shared<GeneralProtection>(selector);
}
break;
case SegTSSCheck:
if (!desc.p) {
- fault = new SegmentNotPresent(selector);
+ fault = std::make_shared<SegmentNotPresent>(selector);
} else if (!(desc.type == 0x9 ||
(desc.type == 1 &&
m5reg.mode != LongMode))) {
- fault = new GeneralProtection(selector);
+ fault = std::make_shared<GeneralProtection>(selector);
}
break;
case SegInGDTCheck:
if (selector.ti) {
- fault = new GeneralProtection(selector);
+ fault = std::make_shared<GeneralProtection>(selector);
}
break;
case SegLDTCheck:
if (!desc.p) {
- fault = new SegmentNotPresent(selector);
+ fault = std::make_shared<SegmentNotPresent>(selector);
} else if (desc.type != 0x2) {
- fault = new GeneralProtection(selector);
+ fault = std::make_shared<GeneralProtection>(selector);
}
break;
default:
- fault = new GenericISA::M5PanicFault(
+ fault = std::make_shared<GenericISA::M5PanicFault>(
"Undefined segment check type.\\n");
}
'''
replaceBits(target, 31, 16, bits(desc, 63, 48));
break;
default:
- fault = new GenericISA::M5PanicFault(
+ fault = std::make_shared<GenericISA::M5PanicFault>(
"Wrdh used with wrong descriptor type!\\n");
}
DestReg = target;
while (true) {
if (selector.si || selector.ti) {
if (!desc.p) {
- fault = new GenericISA::M5PanicFault(
+ fault = std::make_shared<GenericISA::M5PanicFault>(
"Segment not present.\\n");
break;
}
if (!desc.s) {
// The expand down bit happens to be set for gates.
if (desc.type.e) {
- fault = new GenericISA::M5PanicFault(
+ fault = std::make_shared<GenericISA::M5PanicFault>(
"Gate descriptor encountered.\\n");
break;
}
#include "base/types.hh"
#include "sim/byteswap.hh"
-#include "sim/fault_fwd.hh"
#include "sim/insttracer.hh"
namespace X86ISA
* Authors: Gabe Black
*/
+#include <memory>
+
#include "arch/x86/pagetable.hh"
#include "arch/x86/pagetable_walker.hh"
#include "arch/x86/tlb.hh"
currState->req->getVaddr());
// finish the translation which will delete the translation object
- currState->translation->finish(new UnimpFault("Squashed Inst"),
- currState->req, currState->tc, currState->mode);
+ currState->translation->finish(
+ std::make_shared<UnimpFault>("Squashed Inst"),
+ currState->req, currState->tc, currState->mode);
// delete the current request
delete currState;
HandyM5Reg m5reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
if (mode == BaseTLB::Execute && !enableNX)
mode = BaseTLB::Read;
- return new PageFault(entry.vaddr, present, mode, m5reg.cpl == 3, false);
+ return std::make_shared<PageFault>(entry.vaddr, present, mode,
+ m5reg.cpl == 3, false);
}
/* end namespace X86ISA */ }
*/
#include <cstring>
+#include <memory>
#include "arch/generic/mmapped_ipr.hh"
#include "arch/x86/insts/microldstop.hh"
MiscRegIndex regNum;
if (!msrAddrToIndex(regNum, vaddr))
- return new GeneralProtection(0);
+ return std::make_shared<GeneralProtection>(0);
//The index is multiplied by the size of a MiscReg so that
//any memory dependence calculations will not see these as
if (!(seg == SEGMENT_REG_TSG || seg == SYS_SEGMENT_REG_IDTR ||
seg == SEGMENT_REG_HS || seg == SEGMENT_REG_LS)
&& !tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg)))
- return new GeneralProtection(0);
+ return std::make_shared<GeneralProtection>(0);
bool expandDown = false;
SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg));
if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
if (!attr.writable && (mode == Write || storeCheck))
- return new GeneralProtection(0);
+ return std::make_shared<GeneralProtection>(0);
if (!attr.readable && mode == Read)
- return new GeneralProtection(0);
+ return std::make_shared<GeneralProtection>(0);
expandDown = attr.expandDown;
}
DPRINTF(TLB, "Checking an expand down segment.\n");
warn_once("Expand down segments are untested.\n");
if (offset <= limit || endOffset <= limit)
- return new GeneralProtection(0);
+ return std::make_shared<GeneralProtection>(0);
} else {
if (offset > limit || endOffset > limit)
- return new GeneralProtection(0);
+ return std::make_shared<GeneralProtection>(0);
}
}
if (m5Reg.submode != SixtyFourBitMode ||
}
}
if (!success) {
- return new PageFault(vaddr, true, mode, true, false);
+ return std::make_shared<PageFault>(vaddr, true, mode,
+ true, false);
} else {
Addr alignedVaddr = p->pTable->pageAlign(vaddr);
DPRINTF(TLB, "Mapping %#x to %#x\n", alignedVaddr,
// The page must have been present to get into the TLB in
// the first place. We'll assume the reserved bits are
// fine even though we're not checking them.
- return new PageFault(vaddr, true, mode, inUser, false);
+ return std::make_shared<PageFault>(vaddr, true, mode, inUser,
+ false);
}
if (storeCheck && badWrite) {
// This would fault if this were a write, so return a page
// fault that reflects that happening.
- return new PageFault(vaddr, true, Write, inUser, false);
+ return std::make_shared<PageFault>(vaddr, true, Write, inUser,
+ false);
}
Addr paddr = entry->paddr | (vaddr & mask(entry->logBytes));
#include "mem/mem_object.hh"
#include "mem/request.hh"
#include "params/X86TLB.hh"
-#include "sim/fault_fwd.hh"
#include "sim/sim_object.hh"
#include "sim/tlb.hh"
#include "base/trace.hh"
#include "cpu/thread_context.hh"
#include "debug/VtoPhys.hh"
-#include "sim/fault_fwd.hh"
using namespace std;
#include <inttypes.h>
#include <cassert>
+#include <memory>
#include <ostream>
#include "base/refcnt.hh"
const PortID InvalidPortID = (PortID)-1;
class FaultBase;
-typedef RefCountingPtr<FaultBase> Fault;
+typedef std::shared_ptr<FaultBase> Fault;
+
+#ifndef SWIG // Swig gets really confused by decltype
+// Rather than creating a shared_ptr instance and assigning it nullptr,
+// we just create an alias.
+constexpr decltype(nullptr) NoFault = nullptr;
+#endif
#endif // __BASE_TYPES_HH__
#include "cpu/translation.hh"
#include "mem/packet.hh"
#include "sim/byteswap.hh"
-#include "sim/fault_fwd.hh"
#include "sim/system.hh"
#include "sim/tlb.hh"
#include "config/the_isa.hh"
#include "cpu/static_inst_fwd.hh"
#include "cpu/translation.hh"
-#include "sim/fault_fwd.hh"
/**
* The ExecContext is an abstract base class the provides the
#include "cpu/reg_class.hh"
#include "debug/InOrderDynInst.hh"
#include "mem/request.hh"
-#include "sim/fault_fwd.hh"
#include "sim/full_system.hh"
using namespace std;
#if THE_ISA == ALPHA_ISA
// Can only do a hwrei when in pal mode.
if (!(this->instAddr() & 0x3))
- return new AlphaISA::UnimplementedOpcodeFault;
+ return std::make_shared<AlphaISA::UnimplementedOpcodeFault>();
// Set the next PC based on the value of the EXC_ADDR IPR.
AlphaISA::PCState pc = this->pcState();
#include "cpu/thread_context.hh"
#include "debug/InOrderDynInst.hh"
#include "mem/packet.hh"
-#include "sim/fault_fwd.hh"
#include "sim/system.hh"
#if THE_ISA == ALPHA_ISA
#if THE_ISA == ALPHA_ISA
// Can only do a hwrei when in pal mode.
if (!(this->instAddr() & 0x3))
- return new AlphaISA::UnimplementedOpcodeFault;
+ return std::make_shared<AlphaISA::UnimplementedOpcodeFault>();
// Set the next PC based on the value of the EXC_ADDR IPR.
AlphaISA::PCState pc = this->pcState();
#include "debug/LSQUnit.hh"
#include "mem/packet.hh"
#include "mem/port.hh"
-#include "sim/fault_fwd.hh"
struct DerivO3CPUParams;
delete sreqLow;
delete sreqHigh;
}
- return new GenericISA::M5PanicFault(
- "Uncachable load [sn:%llx] PC %s\n",
- load_inst->seqNum, load_inst->pcState());
+ return std::make_shared<GenericISA::M5PanicFault>(
+ "Uncachable load [sn:%llx] PC %s\n",
+ load_inst->seqNum, load_inst->pcState());
}
// Check the SQ for any previous stores that might lead to forwarding
pkt->getAddr(), ld_inst->seqNum);
// Mark the load for re-execution
- ld_inst->fault = new ReExec;
+ ld_inst->fault = std::make_shared<ReExec>();
} else {
DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n",
pkt->getAddr(), ld_inst->seqNum);
++lsqMemOrderViolation;
- return new GenericISA::M5PanicFault(
- "Detected fault with inst [sn:%lli] and "
- "[sn:%lli] at address %#x\n",
- inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
+ return std::make_shared<GenericISA::M5PanicFault>(
+ "Detected fault with inst [sn:%lli] and "
+ "[sn:%lli] at address %#x\n",
+ inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
}
}
++lsqMemOrderViolation;
- return new GenericISA::M5PanicFault("Detected fault with "
- "inst [sn:%lli] and [sn:%lli] at address %#x\n",
- inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
+ return std::make_shared<GenericISA::M5PanicFault>(
+ "Detected fault with "
+ "inst [sn:%lli] and [sn:%lli] at address %#x\n",
+ inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
}
}
#include "cpu/static_inst_fwd.hh"
#include "cpu/thread_context.hh"
#include "enums/StaticInstFlags.hh"
-#include "sim/fault_fwd.hh"
// forward declarations
class Packet;
+++ /dev/null
-/*
- * Copyright (c) 2010 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Gabe Black
- */
-
-#ifndef __SIM_FAULT_FWD_HH__
-#define __SIM_FAULT_FWD_HH__
-
-#include "base/refcnt.hh"
-
-class FaultBase;
-typedef RefCountingPtr<FaultBase> Fault;
-
-FaultBase * const NoFault = 0;
-
-#endif // __SIM_FAULT_FWD_HH__
#ifndef __FAULTS_HH__
#define __FAULTS_HH__
-#include "base/refcnt.hh"
#include "base/types.hh"
#include "cpu/static_inst.hh"
-#include "sim/fault_fwd.hh"
#include "sim/stats.hh"
class ThreadContext;
// all faults returned using the Fault type) will use the
// generic FaultBase name.
-class FaultBase : public RefCounted
+class FaultBase
{
public:
virtual FaultName name() const = 0;
#include "base/misc.hh"
#include "mem/request.hh"
-#include "sim/fault_fwd.hh"
#include "sim/sim_object.hh"
class ThreadContext;