8501715d5dee6f528fefbe89e3f8d171df5ab416
2 * Copyright (c) 2010-2014, 2016 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Stephen Hines
44 #include "arch/arm/insts/static_inst.hh"
46 #include "arch/arm/faults.hh"
47 #include "base/condcodes.hh"
48 #include "base/cprintf.hh"
49 #include "base/loader/symtab.hh"
50 #include "cpu/reg_class.hh"
54 // Shift Rm by an immediate value
56 ArmStaticInst::shift_rm_imm(uint32_t base
, uint32_t shamt
,
57 uint32_t type
, uint32_t cfval
) const
60 ArmShiftType shiftType
;
61 shiftType
= (ArmShiftType
)type
;
74 return (base
>> 31) | -((base
& (1 << 31)) >> 31);
76 return (base
>> shamt
) | -((base
& (1 << 31)) >> shamt
);
79 return (cfval
<< 31) | (base
>> 1); // RRX
81 return (base
<< (32 - shamt
)) | (base
>> shamt
);
83 ccprintf(std::cerr
, "Unhandled shift type\n");
91 ArmStaticInst::shiftReg64(uint64_t base
, uint64_t shiftAmt
,
92 ArmShiftType type
, uint8_t width
) const
94 shiftAmt
= shiftAmt
% width
;
95 ArmShiftType shiftType
;
96 shiftType
= (ArmShiftType
)type
;
101 return base
<< shiftAmt
;
106 return (base
& mask(width
)) >> shiftAmt
;
111 int sign_bit
= bits(base
, intWidth
- 1);
113 base
= sign_bit
? (base
| ~mask(intWidth
- shiftAmt
)) : base
;
114 return base
& mask(intWidth
);
120 return (base
<< (width
- shiftAmt
)) | (base
>> shiftAmt
);
122 ccprintf(std::cerr
, "Unhandled shift type\n");
130 ArmStaticInst::extendReg64(uint64_t base
, ArmExtendType type
,
131 uint64_t shiftAmt
, uint8_t width
) const
133 bool sign_extend
= false;
165 len
= len
<= width
- shiftAmt
? len
: width
- shiftAmt
;
166 uint64_t tmp
= (uint64_t) bits(base
, len
- 1, 0) << shiftAmt
;
168 int sign_bit
= bits(tmp
, len
+ shiftAmt
- 1);
169 tmp
= sign_bit
? (tmp
| ~mask(len
+ shiftAmt
)) : tmp
;
171 return tmp
& mask(width
);
176 ArmStaticInst::shift_rm_rs(uint32_t base
, uint32_t shamt
,
177 uint32_t type
, uint32_t cfval
) const
179 enum ArmShiftType shiftType
;
180 shiftType
= (enum ArmShiftType
) type
;
188 return base
<< shamt
;
193 return base
>> shamt
;
196 return (base
>> 31) | -((base
& (1 << 31)) >> 31);
198 return (base
>> shamt
) | -((base
& (1 << 31)) >> shamt
);
200 shamt
= shamt
& 0x1f;
204 return (base
<< (32 - shamt
)) | (base
>> shamt
);
206 ccprintf(std::cerr
, "Unhandled shift type\n");
214 // Generate C for a shift by immediate
216 ArmStaticInst::shift_carry_imm(uint32_t base
, uint32_t shamt
,
217 uint32_t type
, uint32_t cfval
) const
219 enum ArmShiftType shiftType
;
220 shiftType
= (enum ArmShiftType
) type
;
228 return (base
>> (32 - shamt
)) & 1;
233 return (base
>> (shamt
- 1)) & 1;
238 return (base
>> (shamt
- 1)) & 1;
240 shamt
= shamt
& 0x1f;
242 return (base
& 1); // RRX
244 return (base
>> (shamt
- 1)) & 1;
246 ccprintf(std::cerr
, "Unhandled shift type\n");
254 // Generate C for a shift by Rs
256 ArmStaticInst::shift_carry_rs(uint32_t base
, uint32_t shamt
,
257 uint32_t type
, uint32_t cfval
) const
259 enum ArmShiftType shiftType
;
260 shiftType
= (enum ArmShiftType
) type
;
271 return (base
>> (32 - shamt
)) & 1;
276 return (base
>> (shamt
- 1)) & 1;
280 return (base
>> (shamt
- 1)) & 1;
282 shamt
= shamt
& 0x1f;
285 return (base
>> (shamt
- 1)) & 1;
287 ccprintf(std::cerr
, "Unhandled shift type\n");
295 ArmStaticInst::printIntReg(std::ostream
&os
, RegIndex reg_idx
) const
298 if (reg_idx
== INTREG_UREG0
)
299 ccprintf(os
, "ureg0");
300 else if (reg_idx
== INTREG_SPX
)
301 ccprintf(os
, "%s%s", (intWidth
== 32) ? "w" : "", "sp");
302 else if (reg_idx
== INTREG_X31
)
303 ccprintf(os
, "%szr", (intWidth
== 32) ? "w" : "x");
305 ccprintf(os
, "%s%d", (intWidth
== 32) ? "w" : "x", reg_idx
);
311 case StackPointerReg
:
314 case FramePointerReg
:
317 case ReturnAddressReg
:
321 ccprintf(os
, "r%d", reg_idx
);
328 ArmStaticInst::printFloatReg(std::ostream
&os
, RegIndex reg_idx
) const
330 ccprintf(os
, "f%d", reg_idx
);
334 ArmStaticInst::printVecReg(std::ostream
&os
, RegIndex reg_idx
) const
336 ccprintf(os
, "v%d", reg_idx
);
340 ArmStaticInst::printCCReg(std::ostream
&os
, RegIndex reg_idx
) const
342 ccprintf(os
, "cc_%s", ArmISA::ccRegName
[reg_idx
]);
346 ArmStaticInst::printMiscReg(std::ostream
&os
, RegIndex reg_idx
) const
348 assert(reg_idx
< NUM_MISCREGS
);
349 ccprintf(os
, "%s", ArmISA::miscRegName
[reg_idx
]);
353 ArmStaticInst::printMnemonic(std::ostream
&os
,
354 const std::string
&suffix
,
357 ConditionCode cond64
) const
359 os
<< " " << mnemonic
;
360 if (withPred
&& !aarch64
) {
361 printCondition(os
, machInst
.condCode
);
363 } else if (withCond64
) {
365 printCondition(os
, cond64
);
368 if (machInst
.bigThumb
)
374 ArmStaticInst::printTarget(std::ostream
&os
, Addr target
,
375 const SymbolTable
*symtab
) const
380 if (symtab
&& symtab
->findNearestSymbol(target
, symbol
, symbolAddr
)) {
381 ccprintf(os
, "<%s", symbol
);
382 if (symbolAddr
!= target
)
383 ccprintf(os
, "+%d>", target
- symbolAddr
);
387 ccprintf(os
, "%#x", target
);
392 ArmStaticInst::printCondition(std::ostream
&os
,
394 bool noImplicit
) const
440 // This one is implicit.
450 panic("Unrecognized condition code %d.\n", code
);
455 ArmStaticInst::printMemSymbol(std::ostream
&os
,
456 const SymbolTable
*symtab
,
457 const std::string
&prefix
,
459 const std::string
&suffix
) const
463 if (symtab
&& symtab
->findNearestSymbol(addr
, symbol
, symbolAddr
)) {
464 ccprintf(os
, "%s%s", prefix
, symbol
);
465 if (symbolAddr
!= addr
)
466 ccprintf(os
, "+%d", addr
- symbolAddr
);
467 ccprintf(os
, suffix
);
472 ArmStaticInst::printShiftOperand(std::ostream
&os
,
477 ArmShiftType type
) const
479 bool firstOp
= false;
481 if (rm
!= INTREG_ZERO
) {
487 if ((type
== LSR
|| type
== ASR
) && immShift
&& shiftAmt
== 0)
492 if (immShift
&& shiftAmt
== 0) {
511 if (immShift
&& shiftAmt
== 0) {
523 panic("Tried to disassemble unrecognized shift type.\n");
529 os
<< "#" << shiftAmt
;
536 ArmStaticInst::printExtendOperand(bool firstOperand
, std::ostream
&os
,
537 IntRegIndex rm
, ArmExtendType type
,
538 int64_t shiftAmt
) const
543 if (type
== UXTX
&& shiftAmt
== 0)
546 case UXTB
: ccprintf(os
, ", UXTB");
548 case UXTH
: ccprintf(os
, ", UXTH");
550 case UXTW
: ccprintf(os
, ", UXTW");
552 case UXTX
: ccprintf(os
, ", LSL");
554 case SXTB
: ccprintf(os
, ", SXTB");
556 case SXTH
: ccprintf(os
, ", SXTH");
558 case SXTW
: ccprintf(os
, ", SXTW");
560 case SXTX
: ccprintf(os
, ", SXTW");
563 if (type
== UXTX
|| shiftAmt
)
564 ccprintf(os
, " #%d", shiftAmt
);
568 ArmStaticInst::printDataInst(std::ostream
&os
, bool withImm
,
569 bool immShift
, bool s
, IntRegIndex rd
, IntRegIndex rn
,
570 IntRegIndex rm
, IntRegIndex rs
, uint32_t shiftAmt
,
571 ArmShiftType type
, uint64_t imm
) const
573 printMnemonic(os
, s
? "s" : "");
577 if (rd
!= INTREG_ZERO
) {
583 if (rn
!= INTREG_ZERO
) {
593 ccprintf(os
, "#%ld", imm
);
595 printShiftOperand(os
, rm
, immShift
, shiftAmt
, rs
, type
);
600 ArmStaticInst::generateDisassembly(Addr pc
,
601 const SymbolTable
*symtab
) const
603 std::stringstream ss
;
610 ArmStaticInst::advSIMDFPAccessTrap64(ExceptionLevel el
) const
614 return std::make_shared
<SupervisorTrap
>(machInst
, 0x1E00000,
617 return std::make_shared
<HypervisorTrap
>(machInst
, 0x1E00000,
620 return std::make_shared
<SecureMonitorTrap
>(machInst
, 0x1E00000,
624 panic("Illegal EL in advSIMDFPAccessTrap64\n");
630 ArmStaticInst::checkFPAdvSIMDTrap64(ThreadContext
*tc
, CPSR cpsr
) const
632 const ExceptionLevel el
= (ExceptionLevel
) (uint8_t)cpsr
.el
;
634 if (ArmSystem::haveVirtualization(tc
) && el
<= EL2
) {
635 HCPTR cptrEnCheck
= tc
->readMiscReg(MISCREG_CPTR_EL2
);
637 return advSIMDFPAccessTrap64(EL2
);
640 if (ArmSystem::haveSecurity(tc
)) {
641 HCPTR cptrEnCheck
= tc
->readMiscReg(MISCREG_CPTR_EL3
);
643 return advSIMDFPAccessTrap64(EL3
);
650 ArmStaticInst::checkFPAdvSIMDEnabled64(ThreadContext
*tc
,
651 CPSR cpsr
, CPACR cpacr
) const
653 const ExceptionLevel el
= (ExceptionLevel
) (uint8_t)cpsr
.el
;
654 if ((el
== EL0
&& cpacr
.fpen
!= 0x3) ||
655 (el
== EL1
&& !(cpacr
.fpen
& 0x1)))
656 return advSIMDFPAccessTrap64(EL1
);
658 return checkFPAdvSIMDTrap64(tc
, cpsr
);
662 ArmStaticInst::checkAdvSIMDOrFPEnabled32(ThreadContext
*tc
,
663 CPSR cpsr
, CPACR cpacr
,
664 NSACR nsacr
, FPEXC fpexc
,
665 bool fpexc_check
, bool advsimd
) const
667 const bool have_virtualization
= ArmSystem::haveVirtualization(tc
);
668 const bool have_security
= ArmSystem::haveSecurity(tc
);
669 const bool is_secure
= inSecureState(tc
);
670 const ExceptionLevel cur_el
= opModeToEL(currOpMode(tc
));
672 if (cur_el
== EL0
&& ELIs64(tc
, EL1
))
673 return checkFPAdvSIMDEnabled64(tc
, cpsr
, cpacr
);
675 uint8_t cpacr_cp10
= cpacr
.cp10
;
676 bool cpacr_asedis
= cpacr
.asedis
;
678 if (have_security
&& !ELIs64(tc
, EL3
) && !is_secure
) {
686 if (advsimd
&& cpacr_asedis
)
687 return disabledFault();
689 if ((cur_el
== EL0
&& cpacr_cp10
!= 0x3) ||
690 (cur_el
!= EL0
&& !(cpacr_cp10
& 0x1)))
691 return disabledFault();
694 if (fpexc_check
&& !fpexc
.en
)
695 return disabledFault();
697 // -- aarch32/exceptions/traps/AArch32.CheckFPAdvSIMDTrap --
699 if (have_virtualization
&& !is_secure
&& ELIs64(tc
, EL2
))
700 return checkFPAdvSIMDTrap64(tc
, cpsr
);
702 if (have_virtualization
&& !is_secure
) {
703 HCPTR hcptr
= tc
->readMiscReg(MISCREG_HCPTR
);
704 bool hcptr_cp10
= hcptr
.tcp10
;
705 bool hcptr_tase
= hcptr
.tase
;
707 if (have_security
&& !ELIs64(tc
, EL3
) && !is_secure
) {
714 if ((advsimd
&& hcptr_tase
) || hcptr_cp10
) {
715 const uint32_t iss
= advsimd
? (1 << 5) : 0xA;
717 return std::make_shared
<UndefinedInstruction
>(
719 EC_TRAPPED_HCPTR
, mnemonic
);
721 return std::make_shared
<HypervisorTrap
>(
729 if (have_security
&& ELIs64(tc
, EL3
)) {
730 HCPTR cptrEnCheck
= tc
->readMiscReg(MISCREG_CPTR_EL3
);
732 return advSIMDFPAccessTrap64(EL3
);
740 getRestoredITBits(ThreadContext
*tc
, CPSR spsr
)
742 // See: shared/functions/system/RestoredITBits in the ARM ARM
744 const ExceptionLevel el
= opModeToEL((OperatingMode
) (uint8_t)spsr
.mode
);
745 const uint8_t it
= itState(spsr
);
747 if (!spsr
.t
|| spsr
.il
)
750 // The IT bits are forced to zero when they are set to a reserved
752 if (bits(it
, 7, 4) != 0 && bits(it
, 3, 0) == 0)
755 const bool itd
= el
== EL2
?
756 ((SCTLR
)tc
->readMiscReg(MISCREG_HSCTLR
)).itd
:
757 ((SCTLR
)tc
->readMiscReg(MISCREG_SCTLR
)).itd
;
759 // The IT bits are forced to zero when returning to A32 state, or
760 // when returning to an EL with the ITD bit set to 1, and the IT
761 // bits are describing a multi-instruction block.
762 if (itd
&& bits(it
, 2, 0) != 0)
769 illegalExceptionReturn(ThreadContext
*tc
, CPSR cpsr
, CPSR spsr
)
771 const OperatingMode mode
= (OperatingMode
) (uint8_t)spsr
.mode
;
775 const OperatingMode cur_mode
= (OperatingMode
) (uint8_t)cpsr
.mode
;
776 const ExceptionLevel target_el
= opModeToEL(mode
);
777 if (target_el
> opModeToEL(cur_mode
))
780 if (target_el
== EL3
&& !ArmSystem::haveSecurity(tc
))
783 if (target_el
== EL2
&& !ArmSystem::haveVirtualization(tc
))
788 if (!ArmSystem::highestELIs64(tc
))
793 if (target_el
== EL0
&& spsr
.sp
)
795 if (target_el
== EL2
&& !((SCR
)tc
->readMiscReg(MISCREG_SCR_EL3
)).ns
)
798 return badMode32(mode
);
805 ArmStaticInst::getPSTATEFromPSR(ThreadContext
*tc
, CPSR cpsr
, CPSR spsr
) const
809 // gem5 doesn't implement single-stepping, so force the SS bit to
813 if (illegalExceptionReturn(tc
, cpsr
, spsr
)) {
816 new_cpsr
.il
= spsr
.il
;
817 if (spsr
.width
&& badMode32((OperatingMode
)(uint8_t)spsr
.mode
)) {
819 } else if (spsr
.width
) {
820 new_cpsr
.mode
= spsr
.mode
;
822 new_cpsr
.el
= spsr
.el
;
823 new_cpsr
.sp
= spsr
.sp
;
827 new_cpsr
.nz
= spsr
.nz
;
830 if (new_cpsr
.width
) {
832 const ITSTATE it
= getRestoredITBits(tc
, spsr
);
834 new_cpsr
.ge
= spsr
.ge
;
836 new_cpsr
.aif
= spsr
.aif
;
838 new_cpsr
.it2
= it
.top6
;
839 new_cpsr
.it1
= it
.bottom2
;
842 new_cpsr
.daif
= spsr
.daif
;