2 * Copyright (c) 2010-2013,2016-2018 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2007-2008 The Florida State University
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #ifndef __ARCH_ARM_INSTS_STATICINST_HH__
42 #define __ARCH_ARM_INSTS_STATICINST_HH__
46 #include "arch/arm/faults.hh"
47 #include "arch/arm/utility.hh"
48 #include "arch/arm/isa.hh"
49 #include "arch/arm/self_debug.hh"
50 #include "arch/arm/system.hh"
51 #include "base/trace.hh"
52 #include "cpu/exec_context.hh"
53 #include "cpu/static_inst.hh"
54 #include "sim/byteswap.hh"
55 #include "sim/full_system.hh"
60 class ArmStaticInst : public StaticInst
66 int32_t shift_rm_imm(uint32_t base, uint32_t shamt,
67 uint32_t type, uint32_t cfval) const;
68 int32_t shift_rm_rs(uint32_t base, uint32_t shamt,
69 uint32_t type, uint32_t cfval) const;
71 bool shift_carry_imm(uint32_t base, uint32_t shamt,
72 uint32_t type, uint32_t cfval) const;
73 bool shift_carry_rs(uint32_t base, uint32_t shamt,
74 uint32_t type, uint32_t cfval) const;
76 int64_t shiftReg64(uint64_t base, uint64_t shiftAmt,
77 ArmShiftType type, uint8_t width) const;
78 int64_t extendReg64(uint64_t base, ArmExtendType type,
79 uint64_t shiftAmt, uint8_t width) const;
83 saturateOp(int32_t &res, int64_t op1, int64_t op2, bool sub=false)
85 int64_t midRes = sub ? (op1 - op2) : (op1 + op2);
86 if (bits(midRes, width) != bits(midRes, width - 1)) {
88 res = (LL(1) << (width - 1)) - 1;
90 res = -(LL(1) << (width - 1));
99 satInt(int32_t &res, int64_t op, int width)
102 if (op >= (LL(1) << width)) {
103 res = (LL(1) << width) - 1;
105 } else if (op < -(LL(1) << width)) {
106 res = -(LL(1) << width);
116 uSaturateOp(uint32_t &res, int64_t op1, int64_t op2, bool sub=false)
118 int64_t midRes = sub ? (op1 - op2) : (op1 + op2);
119 if (midRes >= (LL(1) << width)) {
120 res = (LL(1) << width) - 1;
122 } else if (midRes < 0) {
132 uSatInt(int32_t &res, int64_t op, int width)
134 if (op >= (LL(1) << width)) {
135 res = (LL(1) << width) - 1;
147 ArmStaticInst(const char *mnem, ExtMachInst _machInst,
149 : StaticInst(mnem, _machInst, __opClass)
151 aarch64 = machInst.aarch64;
152 if (bits(machInst, 28, 24) == 0x10)
153 intWidth = 64; // Force 64-bit width for ADR/ADRP
155 intWidth = (aarch64 && bits(machInst, 31)) ? 64 : 32;
158 /// Print a register name for disassembly given the unique
159 /// dependence tag number (FP or int).
160 void printIntReg(std::ostream &os, RegIndex reg_idx,
161 uint8_t opWidth = 0) const;
162 void printFloatReg(std::ostream &os, RegIndex reg_idx) const;
163 void printVecReg(std::ostream &os, RegIndex reg_idx,
164 bool isSveVecReg = false) const;
165 void printVecPredReg(std::ostream &os, RegIndex reg_idx) const;
166 void printCCReg(std::ostream &os, RegIndex reg_idx) const;
167 void printMiscReg(std::ostream &os, RegIndex reg_idx) const;
168 void printMnemonic(std::ostream &os,
169 const std::string &suffix = "",
170 bool withPred = true,
171 bool withCond64 = false,
172 ConditionCode cond64 = COND_UC) const;
173 void printTarget(std::ostream &os, Addr target,
174 const Loader::SymbolTable *symtab) const;
175 void printCondition(std::ostream &os, unsigned code,
176 bool noImplicit=false) const;
177 void printMemSymbol(std::ostream &os, const Loader::SymbolTable *symtab,
178 const std::string &prefix, const Addr addr,
179 const std::string &suffix) const;
180 void printShiftOperand(std::ostream &os, IntRegIndex rm,
181 bool immShift, uint32_t shiftAmt,
182 IntRegIndex rs, ArmShiftType type) const;
183 void printExtendOperand(bool firstOperand, std::ostream &os,
184 IntRegIndex rm, ArmExtendType type,
185 int64_t shiftAmt) const;
186 void printPFflags(std::ostream &os, int flag) const;
188 void printDataInst(std::ostream &os, bool withImm) const;
189 void printDataInst(std::ostream &os, bool withImm, bool immShift, bool s,
190 IntRegIndex rd, IntRegIndex rn, IntRegIndex rm,
191 IntRegIndex rs, uint32_t shiftAmt, ArmShiftType type,
195 advancePC(PCState &pcState) const override
200 std::string generateDisassembly(
201 Addr pc, const Loader::SymbolTable *symtab) const override;
204 activateBreakpoint(ThreadContext *tc)
206 auto *isa = static_cast<ArmISA::ISA *>(tc->getIsaPtr());
207 SelfDebug * sd = isa->getSelfDebug();
211 static inline uint32_t
212 cpsrWriteByInstr(CPSR cpsr, uint32_t val, SCR scr, NSACR nsacr,
213 uint8_t byteMask, bool affectState, bool nmfi, ThreadContext *tc)
215 bool privileged = (cpsr.mode != MODE_USER);
216 bool haveVirt = ArmSystem::haveVirtualization(tc);
217 bool haveSecurity = ArmSystem::haveSecurity(tc);
218 bool isSecure = inSecureState(scr, cpsr) || !haveSecurity;
220 uint32_t bitMask = 0;
222 if (affectState && byteMask==0xF){
223 activateBreakpoint(tc);
225 if (bits(byteMask, 3)) {
226 unsigned lowIdx = affectState ? 24 : 27;
227 bitMask = bitMask | mask(31, lowIdx);
229 if (bits(byteMask, 2)) {
230 bitMask = bitMask | mask(19, 16);
232 if (bits(byteMask, 1)) {
233 unsigned highIdx = affectState ? 15 : 9;
234 unsigned lowIdx = (privileged && (isSecure || scr.aw || haveVirt))
236 bitMask = bitMask | mask(highIdx, lowIdx);
238 if (bits(byteMask, 0)) {
241 if ( (!nmfi || !((val >> 6) & 0x1)) &&
242 (isSecure || scr.fw || haveVirt) ) {
245 // Now check the new mode is allowed
246 OperatingMode newMode = (OperatingMode) (val & mask(5));
247 OperatingMode oldMode = (OperatingMode)(uint32_t)cpsr.mode;
248 if (!badMode(tc, newMode)) {
249 bool validModeChange = true;
250 // Check for attempts to enter modes only permitted in
251 // Secure state from Non-secure state. These are Monitor
252 // mode ('10110'), and FIQ mode ('10001') if the Security
253 // Extensions have reserved it.
254 if (!isSecure && newMode == MODE_MON)
255 validModeChange = false;
256 if (!isSecure && newMode == MODE_FIQ && nsacr.rfr == '1')
257 validModeChange = false;
258 // There is no Hyp mode ('11010') in Secure state, so that
260 if (scr.ns == 0 && newMode == MODE_HYP)
261 validModeChange = false;
262 // Cannot move into Hyp mode directly from a Non-secure
264 if (!isSecure && oldMode != MODE_HYP && newMode == MODE_HYP)
265 validModeChange = false;
266 // Cannot move out of Hyp mode with this function except
267 // on an exception return
268 if (oldMode == MODE_HYP && newMode != MODE_HYP && !affectState)
269 validModeChange = false;
270 // Must not change to 64 bit when running in 32 bit mode
271 if (!opModeIs64(oldMode) && opModeIs64(newMode))
272 validModeChange = false;
274 // If we passed all of the above then set the bit mask to
275 // copy the mode accross
276 if (validModeChange) {
277 bitMask = bitMask | mask(5);
279 warn_once("Illegal change to CPSR mode attempted\n");
282 warn_once("Ignoring write of bad mode to CPSR.\n");
286 bitMask = bitMask | (1 << 5);
289 return ((uint32_t)cpsr & ~bitMask) | (val & bitMask);
292 static inline uint32_t
293 spsrWriteByInstr(uint32_t spsr, uint32_t val,
294 uint8_t byteMask, bool affectState)
296 uint32_t bitMask = 0;
298 if (bits(byteMask, 3))
299 bitMask = bitMask | mask(31, 24);
300 if (bits(byteMask, 2))
301 bitMask = bitMask | mask(19, 16);
302 if (bits(byteMask, 1))
303 bitMask = bitMask | mask(15, 8);
304 if (bits(byteMask, 0))
305 bitMask = bitMask | mask(7, 0);
307 return ((spsr & ~bitMask) | (val & bitMask));
311 readPC(ExecContext *xc)
313 return xc->pcState().instPC();
317 setNextPC(ExecContext *xc, Addr val)
319 PCState pc = xc->pcState();
326 cSwap(T val, bool big)
335 template<class T, class E>
337 cSwap(T val, bool big)
339 const unsigned count = sizeof(T) / sizeof(E);
344 conv.tVal = htole(val);
346 for (unsigned i = 0; i < count; i++) {
347 conv.eVals[i] = letobe(conv.eVals[i]);
350 for (unsigned i = 0; i < count; i++) {
351 conv.eVals[i] = conv.eVals[i];
354 return letoh(conv.tVal);
357 // Perform an interworking branch.
359 setIWNextPC(ExecContext *xc, Addr val)
361 PCState pc = xc->pcState();
366 // Perform an interworking branch in ARM mode, a regular branch
369 setAIWNextPC(ExecContext *xc, Addr val)
371 PCState pc = xc->pcState();
377 disabledFault() const
379 return std::make_shared<UndefinedInstruction>(machInst, false,
383 // Utility function used by checkForWFxTrap32 and checkForWFxTrap64
384 // Returns true if processor has to trap a WFI/WFE instruction.
385 bool isWFxTrapping(ThreadContext *tc,
386 ExceptionLevel targetEL, bool isWfe) const;
389 * Trigger a Software Breakpoint.
391 * See aarch32/exceptions/debug/AArch32.SoftwareBreakpoint in the
392 * ARM ARM psueodcode library.
394 Fault softwareBreakpoint32(ExecContext *xc, uint16_t imm) const;
397 * Trap an access to Advanced SIMD or FP registers due to access
400 * See aarch64/exceptions/traps/AArch64.AdvSIMDFPAccessTrap in the
401 * ARM ARM psueodcode library.
403 * @param el Target EL for the trap
405 Fault advSIMDFPAccessTrap64(ExceptionLevel el) const;
409 * Check an Advaned SIMD access against CPTR_EL2 and CPTR_EL3.
411 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDTrap in the
412 * ARM ARM psueodcode library.
414 Fault checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const;
417 * Check an Advaned SIMD access against CPACR_EL1, CPTR_EL2, and
420 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDEnabled in the
421 * ARM ARM psueodcode library.
423 Fault checkFPAdvSIMDEnabled64(ThreadContext *tc,
424 CPSR cpsr, CPACR cpacr) const;
427 * Check if a VFP/SIMD access from aarch32 should be allowed.
429 * See aarch32/exceptions/traps/AArch32.CheckAdvSIMDOrFPEnabled in the
430 * ARM ARM psueodcode library.
432 Fault checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
433 CPSR cpsr, CPACR cpacr,
434 NSACR nsacr, FPEXC fpexc,
435 bool fpexc_check, bool advsimd) const;
438 * Check if WFE/WFI instruction execution in aarch32 should be trapped.
440 * See aarch32/exceptions/traps/AArch32.checkForWFxTrap in the
441 * ARM ARM psueodcode library.
443 Fault checkForWFxTrap32(ThreadContext *tc,
444 ExceptionLevel tgtEl, bool isWfe) const;
447 * Check if WFE/WFI instruction execution in aarch64 should be trapped.
449 * See aarch64/exceptions/traps/AArch64.checkForWFxTrap in the
450 * ARM ARM psueodcode library.
452 Fault checkForWFxTrap64(ThreadContext *tc,
453 ExceptionLevel tgtEl, bool isWfe) const;
456 * WFE/WFI trapping helper function.
458 Fault trapWFx(ThreadContext *tc, CPSR cpsr, SCR scr, bool isWfe) const;
461 * Check if SETEND instruction execution in aarch32 should be trapped.
463 * See aarch32/exceptions/traps/AArch32.CheckSETENDEnabled in the
464 * ARM ARM pseudocode library.
466 Fault checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const;
469 * UNDEFINED behaviour in AArch32
471 * See aarch32/exceptions/traps/AArch32.UndefinedFault in the
472 * ARM ARM pseudocode library.
474 Fault undefinedFault32(ThreadContext *tc, ExceptionLevel el) const;
477 * UNDEFINED behaviour in AArch64
479 * See aarch64/exceptions/traps/AArch64.UndefinedFault in the
480 * ARM ARM pseudocode library.
482 Fault undefinedFault64(ThreadContext *tc, ExceptionLevel el) const;
485 * Trap an access to SVE registers due to access control bits.
487 * @param el Target EL for the trap.
489 Fault sveAccessTrap(ExceptionLevel el) const;
492 * Check an SVE access against CPACR_EL1, CPTR_EL2, and CPTR_EL3.
494 Fault checkSveEnabled(ThreadContext *tc, CPSR cpsr, CPACR cpacr) const;
497 * Get the new PSTATE from a SPSR register in preparation for an
500 * See shared/functions/system/SetPSTATEFromPSR in the ARM ARM
501 * pseudocode library.
503 CPSR getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const;
506 * Return true if exceptions normally routed to EL1 are being handled
507 * at an Exception level using AArch64, because either EL1 is using
508 * AArch64 or TGE is in force and EL2 is using AArch64.
510 * See aarch32/exceptions/exceptions/AArch32.GeneralExceptionsToAArch64
511 * in the ARM ARM pseudocode library.
513 bool generalExceptionsToAArch64(ThreadContext *tc,
514 ExceptionLevel pstateEL) const;
518 annotateFault(ArmFault *fault) {}
526 /** Returns the byte size of current instruction */
530 return (!machInst.thumb || machInst.bigThumb) ? 4 : 2;
534 * Returns the real encoding of the instruction:
535 * the machInst field is in fact always 64 bit wide and
536 * contains some instruction metadata, which means it differs
537 * from the real opcode.
542 return static_cast<MachInst>(machInst & (mask(instSize() * 8)));
546 asBytes(void *buf, size_t max_size) override
548 return simpleAsBytes(buf, max_size, machInst);
551 static unsigned getCurSveVecLenInBits(ThreadContext *tc);
554 getCurSveVecLenInQWords(ThreadContext *tc)
556 return getCurSveVecLenInBits(tc) >> 6;
561 getCurSveVecLen(ThreadContext *tc)
563 return getCurSveVecLenInBits(tc) / (8 * sizeof(T));
568 #endif //__ARCH_ARM_INSTS_STATICINST_HH__