1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
164 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
165 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
168 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
171 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
172 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
173 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
174 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
175 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
176 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
177 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
178 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
179 static const arm_feature_set arm_ext_v4t_5
=
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
181 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
182 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
183 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
184 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
185 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
186 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
188 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
189 static const arm_feature_set arm_ext_v6_notm
=
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
191 static const arm_feature_set arm_ext_v6_dsp
=
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
193 static const arm_feature_set arm_ext_barrier
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
195 static const arm_feature_set arm_ext_msr
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
197 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
198 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
199 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
200 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
201 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
202 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
203 static const arm_feature_set arm_ext_m
=
204 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
, ARM_EXT2_V8M
);
205 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
206 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
207 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
208 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
209 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
210 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
211 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
212 static const arm_feature_set arm_ext_v6t2_v8m
=
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics
=
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
217 static const arm_feature_set arm_ext_v8_2
=
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
219 /* FP16 instructions. */
220 static const arm_feature_set arm_ext_fp16
=
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
223 static const arm_feature_set arm_arch_any
= ARM_ANY
;
224 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1, -1);
225 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
226 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
227 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
229 static const arm_feature_set arm_cext_iwmmxt2
=
230 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
231 static const arm_feature_set arm_cext_iwmmxt
=
232 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
233 static const arm_feature_set arm_cext_xscale
=
234 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
235 static const arm_feature_set arm_cext_maverick
=
236 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
237 static const arm_feature_set fpu_fpa_ext_v1
=
238 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
239 static const arm_feature_set fpu_fpa_ext_v2
=
240 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
241 static const arm_feature_set fpu_vfp_ext_v1xd
=
242 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
243 static const arm_feature_set fpu_vfp_ext_v1
=
244 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
245 static const arm_feature_set fpu_vfp_ext_v2
=
246 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
247 static const arm_feature_set fpu_vfp_ext_v3xd
=
248 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
249 static const arm_feature_set fpu_vfp_ext_v3
=
250 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
251 static const arm_feature_set fpu_vfp_ext_d32
=
252 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
253 static const arm_feature_set fpu_neon_ext_v1
=
254 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
255 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
256 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
257 static const arm_feature_set fpu_vfp_fp16
=
258 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
259 static const arm_feature_set fpu_neon_ext_fma
=
260 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
261 static const arm_feature_set fpu_vfp_ext_fma
=
262 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
263 static const arm_feature_set fpu_vfp_ext_armv8
=
264 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
265 static const arm_feature_set fpu_vfp_ext_armv8xd
=
266 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
267 static const arm_feature_set fpu_neon_ext_armv8
=
268 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
269 static const arm_feature_set fpu_crypto_ext_armv8
=
270 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
271 static const arm_feature_set crc_ext_armv8
=
272 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
273 static const arm_feature_set fpu_neon_ext_v8_1
=
274 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
| FPU_NEON_EXT_RDMA
);
276 static int mfloat_abi_opt
= -1;
277 /* Record user cpu selection for object attributes. */
278 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
279 /* Must be long enough to hold any of the names in arm_cpus. */
280 static char selected_cpu_name
[20];
282 extern FLONUM_TYPE generic_floating_point_number
;
284 /* Return if no cpu was selected on command-line. */
286 no_cpu_selected (void)
288 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
293 static int meabi_flags
= EABI_DEFAULT
;
295 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
298 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
303 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
308 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
309 symbolS
* GOT_symbol
;
312 /* 0: assemble for ARM,
313 1: assemble for Thumb,
314 2: assemble for Thumb even though target CPU does not support thumb
316 static int thumb_mode
= 0;
317 /* A value distinct from the possible values for thumb_mode that we
318 can use to record whether thumb_mode has been copied into the
319 tc_frag_data field of a frag. */
320 #define MODE_RECORDED (1 << 4)
322 /* Specifies the intrinsic IT insn behavior mode. */
323 enum implicit_it_mode
325 IMPLICIT_IT_MODE_NEVER
= 0x00,
326 IMPLICIT_IT_MODE_ARM
= 0x01,
327 IMPLICIT_IT_MODE_THUMB
= 0x02,
328 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
330 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
332 /* If unified_syntax is true, we are processing the new unified
333 ARM/Thumb syntax. Important differences from the old ARM mode:
335 - Immediate operands do not require a # prefix.
336 - Conditional affixes always appear at the end of the
337 instruction. (For backward compatibility, those instructions
338 that formerly had them in the middle, continue to accept them
340 - The IT instruction may appear, and if it does is validated
341 against subsequent conditional affixes. It does not generate
344 Important differences from the old Thumb mode:
346 - Immediate operands do not require a # prefix.
347 - Most of the V6T2 instructions are only available in unified mode.
348 - The .N and .W suffixes are recognized and honored (it is an error
349 if they cannot be honored).
350 - All instructions set the flags if and only if they have an 's' affix.
351 - Conditional affixes may be used. They are validated against
352 preceding IT instructions. Unlike ARM mode, you cannot use a
353 conditional affix except in the scope of an IT instruction. */
355 static bfd_boolean unified_syntax
= FALSE
;
357 /* An immediate operand can start with #, and ld*, st*, pld operands
358 can contain [ and ]. We need to tell APP not to elide whitespace
359 before a [, which can appear as the first operand for pld.
360 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
361 const char arm_symbol_chars
[] = "#[]{}";
376 enum neon_el_type type
;
380 #define NEON_MAX_TYPE_ELS 4
384 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
388 enum it_instruction_type
393 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
394 if inside, should be the last one. */
395 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
396 i.e. BKPT and NOP. */
397 IT_INSN
/* The IT insn has been parsed. */
400 /* The maximum number of operands we need. */
401 #define ARM_IT_MAX_OPERANDS 6
406 unsigned long instruction
;
410 /* "uncond_value" is set to the value in place of the conditional field in
411 unconditional versions of the instruction, or -1 if nothing is
414 struct neon_type vectype
;
415 /* This does not indicate an actual NEON instruction, only that
416 the mnemonic accepts neon-style type suffixes. */
418 /* Set to the opcode if the instruction needs relaxation.
419 Zero if the instruction is not relaxed. */
423 bfd_reloc_code_real_type type
;
428 enum it_instruction_type it_insn_type
;
434 struct neon_type_el vectype
;
435 unsigned present
: 1; /* Operand present. */
436 unsigned isreg
: 1; /* Operand was a register. */
437 unsigned immisreg
: 1; /* .imm field is a second register. */
438 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
439 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
440 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
441 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
442 instructions. This allows us to disambiguate ARM <-> vector insns. */
443 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
444 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
445 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
446 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
447 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
448 unsigned writeback
: 1; /* Operand has trailing ! */
449 unsigned preind
: 1; /* Preindexed address. */
450 unsigned postind
: 1; /* Postindexed address. */
451 unsigned negative
: 1; /* Index register was negated. */
452 unsigned shifted
: 1; /* Shift applied to operation. */
453 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
454 } operands
[ARM_IT_MAX_OPERANDS
];
457 static struct arm_it inst
;
459 #define NUM_FLOAT_VALS 8
461 const char * fp_const
[] =
463 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
466 /* Number of littlenums required to hold an extended precision number. */
467 #define MAX_LITTLENUMS 6
469 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
479 #define CP_T_X 0x00008000
480 #define CP_T_Y 0x00400000
482 #define CONDS_BIT 0x00100000
483 #define LOAD_BIT 0x00100000
485 #define DOUBLE_LOAD_FLAG 0x00000001
489 const char * template_name
;
493 #define COND_ALWAYS 0xE
497 const char * template_name
;
501 struct asm_barrier_opt
503 const char * template_name
;
505 const arm_feature_set arch
;
508 /* The bit that distinguishes CPSR and SPSR. */
509 #define SPSR_BIT (1 << 22)
511 /* The individual PSR flag bits. */
512 #define PSR_c (1 << 16)
513 #define PSR_x (1 << 17)
514 #define PSR_s (1 << 18)
515 #define PSR_f (1 << 19)
520 bfd_reloc_code_real_type reloc
;
525 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
526 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
531 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
534 /* Bits for DEFINED field in neon_typed_alias. */
535 #define NTA_HASTYPE 1
536 #define NTA_HASINDEX 2
538 struct neon_typed_alias
540 unsigned char defined
;
542 struct neon_type_el eltype
;
545 /* ARM register categories. This includes coprocessor numbers and various
546 architecture extensions' registers. */
573 /* Structure for a hash table entry for a register.
574 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
575 information which states whether a vector type or index is specified (for a
576 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
582 unsigned char builtin
;
583 struct neon_typed_alias
* neon
;
586 /* Diagnostics used when we don't get a register of the expected type. */
587 const char * const reg_expected_msgs
[] =
589 N_("ARM register expected"),
590 N_("bad or missing co-processor number"),
591 N_("co-processor register expected"),
592 N_("FPA register expected"),
593 N_("VFP single precision register expected"),
594 N_("VFP/Neon double precision register expected"),
595 N_("Neon quad precision register expected"),
596 N_("VFP single or double precision register expected"),
597 N_("Neon double or quad precision register expected"),
598 N_("VFP single, double or Neon quad precision register expected"),
599 N_("VFP system register expected"),
600 N_("Maverick MVF register expected"),
601 N_("Maverick MVD register expected"),
602 N_("Maverick MVFX register expected"),
603 N_("Maverick MVDX register expected"),
604 N_("Maverick MVAX register expected"),
605 N_("Maverick DSPSC register expected"),
606 N_("iWMMXt data register expected"),
607 N_("iWMMXt control register expected"),
608 N_("iWMMXt scalar register expected"),
609 N_("XScale accumulator register expected"),
612 /* Some well known registers that we refer to directly elsewhere. */
618 /* ARM instructions take 4bytes in the object file, Thumb instructions
624 /* Basic string to match. */
625 const char * template_name
;
627 /* Parameters to instruction. */
628 unsigned int operands
[8];
630 /* Conditional tag - see opcode_lookup. */
631 unsigned int tag
: 4;
633 /* Basic instruction code. */
634 unsigned int avalue
: 28;
636 /* Thumb-format instruction code. */
639 /* Which architecture variant provides this instruction. */
640 const arm_feature_set
* avariant
;
641 const arm_feature_set
* tvariant
;
643 /* Function to call to encode instruction in ARM format. */
644 void (* aencode
) (void);
646 /* Function to call to encode instruction in Thumb format. */
647 void (* tencode
) (void);
650 /* Defines for various bits that we will want to toggle. */
651 #define INST_IMMEDIATE 0x02000000
652 #define OFFSET_REG 0x02000000
653 #define HWOFFSET_IMM 0x00400000
654 #define SHIFT_BY_REG 0x00000010
655 #define PRE_INDEX 0x01000000
656 #define INDEX_UP 0x00800000
657 #define WRITE_BACK 0x00200000
658 #define LDM_TYPE_2_OR_3 0x00400000
659 #define CPSI_MMOD 0x00020000
661 #define LITERAL_MASK 0xf000f000
662 #define OPCODE_MASK 0xfe1fffff
663 #define V4_STR_BIT 0x00000020
664 #define VLDR_VMOV_SAME 0x0040f000
666 #define T2_SUBS_PC_LR 0xf3de8f00
668 #define DATA_OP_SHIFT 21
670 #define T2_OPCODE_MASK 0xfe1fffff
671 #define T2_DATA_OP_SHIFT 21
673 #define A_COND_MASK 0xf0000000
674 #define A_PUSH_POP_OP_MASK 0x0fff0000
676 /* Opcodes for pushing/poping registers to/from the stack. */
677 #define A1_OPCODE_PUSH 0x092d0000
678 #define A2_OPCODE_PUSH 0x052d0004
679 #define A2_OPCODE_POP 0x049d0004
681 /* Codes to distinguish the arithmetic instructions. */
692 #define OPCODE_CMP 10
693 #define OPCODE_CMN 11
694 #define OPCODE_ORR 12
695 #define OPCODE_MOV 13
696 #define OPCODE_BIC 14
697 #define OPCODE_MVN 15
699 #define T2_OPCODE_AND 0
700 #define T2_OPCODE_BIC 1
701 #define T2_OPCODE_ORR 2
702 #define T2_OPCODE_ORN 3
703 #define T2_OPCODE_EOR 4
704 #define T2_OPCODE_ADD 8
705 #define T2_OPCODE_ADC 10
706 #define T2_OPCODE_SBC 11
707 #define T2_OPCODE_SUB 13
708 #define T2_OPCODE_RSB 14
710 #define T_OPCODE_MUL 0x4340
711 #define T_OPCODE_TST 0x4200
712 #define T_OPCODE_CMN 0x42c0
713 #define T_OPCODE_NEG 0x4240
714 #define T_OPCODE_MVN 0x43c0
716 #define T_OPCODE_ADD_R3 0x1800
717 #define T_OPCODE_SUB_R3 0x1a00
718 #define T_OPCODE_ADD_HI 0x4400
719 #define T_OPCODE_ADD_ST 0xb000
720 #define T_OPCODE_SUB_ST 0xb080
721 #define T_OPCODE_ADD_SP 0xa800
722 #define T_OPCODE_ADD_PC 0xa000
723 #define T_OPCODE_ADD_I8 0x3000
724 #define T_OPCODE_SUB_I8 0x3800
725 #define T_OPCODE_ADD_I3 0x1c00
726 #define T_OPCODE_SUB_I3 0x1e00
728 #define T_OPCODE_ASR_R 0x4100
729 #define T_OPCODE_LSL_R 0x4080
730 #define T_OPCODE_LSR_R 0x40c0
731 #define T_OPCODE_ROR_R 0x41c0
732 #define T_OPCODE_ASR_I 0x1000
733 #define T_OPCODE_LSL_I 0x0000
734 #define T_OPCODE_LSR_I 0x0800
736 #define T_OPCODE_MOV_I8 0x2000
737 #define T_OPCODE_CMP_I8 0x2800
738 #define T_OPCODE_CMP_LR 0x4280
739 #define T_OPCODE_MOV_HR 0x4600
740 #define T_OPCODE_CMP_HR 0x4500
742 #define T_OPCODE_LDR_PC 0x4800
743 #define T_OPCODE_LDR_SP 0x9800
744 #define T_OPCODE_STR_SP 0x9000
745 #define T_OPCODE_LDR_IW 0x6800
746 #define T_OPCODE_STR_IW 0x6000
747 #define T_OPCODE_LDR_IH 0x8800
748 #define T_OPCODE_STR_IH 0x8000
749 #define T_OPCODE_LDR_IB 0x7800
750 #define T_OPCODE_STR_IB 0x7000
751 #define T_OPCODE_LDR_RW 0x5800
752 #define T_OPCODE_STR_RW 0x5000
753 #define T_OPCODE_LDR_RH 0x5a00
754 #define T_OPCODE_STR_RH 0x5200
755 #define T_OPCODE_LDR_RB 0x5c00
756 #define T_OPCODE_STR_RB 0x5400
758 #define T_OPCODE_PUSH 0xb400
759 #define T_OPCODE_POP 0xbc00
761 #define T_OPCODE_BRANCH 0xe000
763 #define THUMB_SIZE 2 /* Size of thumb instruction. */
764 #define THUMB_PP_PC_LR 0x0100
765 #define THUMB_LOAD_BIT 0x0800
766 #define THUMB2_LOAD_BIT 0x00100000
768 #define BAD_ARGS _("bad arguments to instruction")
769 #define BAD_SP _("r13 not allowed here")
770 #define BAD_PC _("r15 not allowed here")
771 #define BAD_COND _("instruction cannot be conditional")
772 #define BAD_OVERLAP _("registers may not be the same")
773 #define BAD_HIREG _("lo register required")
774 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
775 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
776 #define BAD_BRANCH _("branch must be last instruction in IT block")
777 #define BAD_NOT_IT _("instruction not allowed in IT block")
778 #define BAD_FPU _("selected FPU does not support instruction")
779 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
780 #define BAD_IT_COND _("incorrect condition in IT block")
781 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
782 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
783 #define BAD_PC_ADDRESSING \
784 _("cannot use register index with PC-relative addressing")
785 #define BAD_PC_WRITEBACK \
786 _("cannot use writeback with PC-relative addressing")
787 #define BAD_RANGE _("branch out of range")
788 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
790 static struct hash_control
* arm_ops_hsh
;
791 static struct hash_control
* arm_cond_hsh
;
792 static struct hash_control
* arm_shift_hsh
;
793 static struct hash_control
* arm_psr_hsh
;
794 static struct hash_control
* arm_v7m_psr_hsh
;
795 static struct hash_control
* arm_reg_hsh
;
796 static struct hash_control
* arm_reloc_hsh
;
797 static struct hash_control
* arm_barrier_opt_hsh
;
799 /* Stuff needed to resolve the label ambiguity
808 symbolS
* last_label_seen
;
809 static int label_is_thumb_function_name
= FALSE
;
811 /* Literal pool structure. Held on a per-section
812 and per-sub-section basis. */
814 #define MAX_LITERAL_POOL_SIZE 1024
815 typedef struct literal_pool
817 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
818 unsigned int next_free_entry
;
824 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
826 struct literal_pool
* next
;
827 unsigned int alignment
;
830 /* Pointer to a linked list of literal pools. */
831 literal_pool
* list_of_pools
= NULL
;
833 typedef enum asmfunc_states
836 WAITING_ASMFUNC_NAME
,
840 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
843 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
845 static struct current_it now_it
;
849 now_it_compatible (int cond
)
851 return (cond
& ~1) == (now_it
.cc
& ~1);
855 conditional_insn (void)
857 return inst
.cond
!= COND_ALWAYS
;
860 static int in_it_block (void);
862 static int handle_it_state (void);
864 static void force_automatic_it_block_close (void);
866 static void it_fsm_post_encode (void);
868 #define set_it_insn_type(type) \
871 inst.it_insn_type = type; \
872 if (handle_it_state () == FAIL) \
877 #define set_it_insn_type_nonvoid(type, failret) \
880 inst.it_insn_type = type; \
881 if (handle_it_state () == FAIL) \
886 #define set_it_insn_type_last() \
889 if (inst.cond == COND_ALWAYS) \
890 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
892 set_it_insn_type (INSIDE_IT_LAST_INSN); \
898 /* This array holds the chars that always start a comment. If the
899 pre-processor is disabled, these aren't very useful. */
900 char arm_comment_chars
[] = "@";
902 /* This array holds the chars that only start a comment at the beginning of
903 a line. If the line seems to have the form '# 123 filename'
904 .line and .file directives will appear in the pre-processed output. */
905 /* Note that input_file.c hand checks for '#' at the beginning of the
906 first line of the input file. This is because the compiler outputs
907 #NO_APP at the beginning of its output. */
908 /* Also note that comments like this one will always work. */
909 const char line_comment_chars
[] = "#";
911 char arm_line_separator_chars
[] = ";";
913 /* Chars that can be used to separate mant
914 from exp in floating point numbers. */
915 const char EXP_CHARS
[] = "eE";
917 /* Chars that mean this number is a floating point constant. */
921 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
923 /* Prefix characters that indicate the start of an immediate
925 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
927 /* Separator character handling. */
929 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
932 skip_past_char (char ** str
, char c
)
934 /* PR gas/14987: Allow for whitespace before the expected character. */
935 skip_whitespace (*str
);
946 #define skip_past_comma(str) skip_past_char (str, ',')
948 /* Arithmetic expressions (possibly involving symbols). */
950 /* Return TRUE if anything in the expression is a bignum. */
953 walk_no_bignums (symbolS
* sp
)
955 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
958 if (symbol_get_value_expression (sp
)->X_add_symbol
)
960 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
961 || (symbol_get_value_expression (sp
)->X_op_symbol
962 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
968 static int in_my_get_expression
= 0;
970 /* Third argument to my_get_expression. */
971 #define GE_NO_PREFIX 0
972 #define GE_IMM_PREFIX 1
973 #define GE_OPT_PREFIX 2
974 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
975 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
976 #define GE_OPT_PREFIX_BIG 3
979 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
984 /* In unified syntax, all prefixes are optional. */
986 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
991 case GE_NO_PREFIX
: break;
993 if (!is_immediate_prefix (**str
))
995 inst
.error
= _("immediate expression requires a # prefix");
1001 case GE_OPT_PREFIX_BIG
:
1002 if (is_immediate_prefix (**str
))
1008 memset (ep
, 0, sizeof (expressionS
));
1010 save_in
= input_line_pointer
;
1011 input_line_pointer
= *str
;
1012 in_my_get_expression
= 1;
1013 seg
= expression (ep
);
1014 in_my_get_expression
= 0;
1016 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1018 /* We found a bad or missing expression in md_operand(). */
1019 *str
= input_line_pointer
;
1020 input_line_pointer
= save_in
;
1021 if (inst
.error
== NULL
)
1022 inst
.error
= (ep
->X_op
== O_absent
1023 ? _("missing expression") :_("bad expression"));
1028 if (seg
!= absolute_section
1029 && seg
!= text_section
1030 && seg
!= data_section
1031 && seg
!= bss_section
1032 && seg
!= undefined_section
)
1034 inst
.error
= _("bad segment");
1035 *str
= input_line_pointer
;
1036 input_line_pointer
= save_in
;
1043 /* Get rid of any bignums now, so that we don't generate an error for which
1044 we can't establish a line number later on. Big numbers are never valid
1045 in instructions, which is where this routine is always called. */
1046 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1047 && (ep
->X_op
== O_big
1048 || (ep
->X_add_symbol
1049 && (walk_no_bignums (ep
->X_add_symbol
)
1051 && walk_no_bignums (ep
->X_op_symbol
))))))
1053 inst
.error
= _("invalid constant");
1054 *str
= input_line_pointer
;
1055 input_line_pointer
= save_in
;
1059 *str
= input_line_pointer
;
1060 input_line_pointer
= save_in
;
1064 /* Turn a string in input_line_pointer into a floating point constant
1065 of type TYPE, and store the appropriate bytes in *LITP. The number
1066 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1067 returned, or NULL on OK.
1069 Note that fp constants aren't represent in the normal way on the ARM.
1070 In big endian mode, things are as expected. However, in little endian
1071 mode fp constants are big-endian word-wise, and little-endian byte-wise
1072 within the words. For example, (double) 1.1 in big endian mode is
1073 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1074 the byte sequence 99 99 f1 3f 9a 99 99 99.
1076 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1079 md_atof (int type
, char * litP
, int * sizeP
)
1082 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1114 return _("Unrecognized or unsupported floating point constant");
1117 t
= atof_ieee (input_line_pointer
, type
, words
);
1119 input_line_pointer
= t
;
1120 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1122 if (target_big_endian
)
1124 for (i
= 0; i
< prec
; i
++)
1126 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1127 litP
+= sizeof (LITTLENUM_TYPE
);
1132 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1133 for (i
= prec
- 1; i
>= 0; i
--)
1135 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1136 litP
+= sizeof (LITTLENUM_TYPE
);
1139 /* For a 4 byte float the order of elements in `words' is 1 0.
1140 For an 8 byte float the order is 1 0 3 2. */
1141 for (i
= 0; i
< prec
; i
+= 2)
1143 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1144 sizeof (LITTLENUM_TYPE
));
1145 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1146 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1147 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1154 /* We handle all bad expressions here, so that we can report the faulty
1155 instruction in the error message. */
1157 md_operand (expressionS
* exp
)
1159 if (in_my_get_expression
)
1160 exp
->X_op
= O_illegal
;
1163 /* Immediate values. */
1165 /* Generic immediate-value read function for use in directives.
1166 Accepts anything that 'expression' can fold to a constant.
1167 *val receives the number. */
1170 immediate_for_directive (int *val
)
1173 exp
.X_op
= O_illegal
;
1175 if (is_immediate_prefix (*input_line_pointer
))
1177 input_line_pointer
++;
1181 if (exp
.X_op
!= O_constant
)
1183 as_bad (_("expected #constant"));
1184 ignore_rest_of_line ();
1187 *val
= exp
.X_add_number
;
1192 /* Register parsing. */
1194 /* Generic register parser. CCP points to what should be the
1195 beginning of a register name. If it is indeed a valid register
1196 name, advance CCP over it and return the reg_entry structure;
1197 otherwise return NULL. Does not issue diagnostics. */
1199 static struct reg_entry
*
1200 arm_reg_parse_multi (char **ccp
)
1204 struct reg_entry
*reg
;
1206 skip_whitespace (start
);
1208 #ifdef REGISTER_PREFIX
1209 if (*start
!= REGISTER_PREFIX
)
1213 #ifdef OPTIONAL_REGISTER_PREFIX
1214 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1219 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1224 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1226 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1236 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1237 enum arm_reg_type type
)
1239 /* Alternative syntaxes are accepted for a few register classes. */
1246 /* Generic coprocessor register names are allowed for these. */
1247 if (reg
&& reg
->type
== REG_TYPE_CN
)
1252 /* For backward compatibility, a bare number is valid here. */
1254 unsigned long processor
= strtoul (start
, ccp
, 10);
1255 if (*ccp
!= start
&& processor
<= 15)
1259 case REG_TYPE_MMXWC
:
1260 /* WC includes WCG. ??? I'm not sure this is true for all
1261 instructions that take WC registers. */
1262 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1273 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1274 return value is the register number or FAIL. */
1277 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1280 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1283 /* Do not allow a scalar (reg+index) to parse as a register. */
1284 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1287 if (reg
&& reg
->type
== type
)
1290 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1297 /* Parse a Neon type specifier. *STR should point at the leading '.'
1298 character. Does no verification at this stage that the type fits the opcode
1305 Can all be legally parsed by this function.
1307 Fills in neon_type struct pointer with parsed information, and updates STR
1308 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1309 type, FAIL if not. */
1312 parse_neon_type (struct neon_type
*type
, char **str
)
1319 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1321 enum neon_el_type thistype
= NT_untyped
;
1322 unsigned thissize
= -1u;
1329 /* Just a size without an explicit type. */
1333 switch (TOLOWER (*ptr
))
1335 case 'i': thistype
= NT_integer
; break;
1336 case 'f': thistype
= NT_float
; break;
1337 case 'p': thistype
= NT_poly
; break;
1338 case 's': thistype
= NT_signed
; break;
1339 case 'u': thistype
= NT_unsigned
; break;
1341 thistype
= NT_float
;
1346 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1352 /* .f is an abbreviation for .f32. */
1353 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1358 thissize
= strtoul (ptr
, &ptr
, 10);
1360 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1363 as_bad (_("bad size %d in type specifier"), thissize
);
1371 type
->el
[type
->elems
].type
= thistype
;
1372 type
->el
[type
->elems
].size
= thissize
;
1377 /* Empty/missing type is not a successful parse. */
1378 if (type
->elems
== 0)
1386 /* Errors may be set multiple times during parsing or bit encoding
1387 (particularly in the Neon bits), but usually the earliest error which is set
1388 will be the most meaningful. Avoid overwriting it with later (cascading)
1389 errors by calling this function. */
1392 first_error (const char *err
)
1398 /* Parse a single type, e.g. ".s32", leading period included. */
1400 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1403 struct neon_type optype
;
1407 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1409 if (optype
.elems
== 1)
1410 *vectype
= optype
.el
[0];
1413 first_error (_("only one type should be specified for operand"));
1419 first_error (_("vector type expected"));
1431 /* Special meanings for indices (which have a range of 0-7), which will fit into
1434 #define NEON_ALL_LANES 15
1435 #define NEON_INTERLEAVE_LANES 14
1437 /* Parse either a register or a scalar, with an optional type. Return the
1438 register number, and optionally fill in the actual type of the register
1439 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1440 type/index information in *TYPEINFO. */
1443 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1444 enum arm_reg_type
*rtype
,
1445 struct neon_typed_alias
*typeinfo
)
1448 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1449 struct neon_typed_alias atype
;
1450 struct neon_type_el parsetype
;
1454 atype
.eltype
.type
= NT_invtype
;
1455 atype
.eltype
.size
= -1;
1457 /* Try alternate syntax for some types of register. Note these are mutually
1458 exclusive with the Neon syntax extensions. */
1461 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1469 /* Undo polymorphism when a set of register types may be accepted. */
1470 if ((type
== REG_TYPE_NDQ
1471 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1472 || (type
== REG_TYPE_VFSD
1473 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1474 || (type
== REG_TYPE_NSDQ
1475 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1476 || reg
->type
== REG_TYPE_NQ
))
1477 || (type
== REG_TYPE_MMXWC
1478 && (reg
->type
== REG_TYPE_MMXWCG
)))
1479 type
= (enum arm_reg_type
) reg
->type
;
1481 if (type
!= reg
->type
)
1487 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1489 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1491 first_error (_("can't redefine type for operand"));
1494 atype
.defined
|= NTA_HASTYPE
;
1495 atype
.eltype
= parsetype
;
1498 if (skip_past_char (&str
, '[') == SUCCESS
)
1500 if (type
!= REG_TYPE_VFD
)
1502 first_error (_("only D registers may be indexed"));
1506 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1508 first_error (_("can't change index for operand"));
1512 atype
.defined
|= NTA_HASINDEX
;
1514 if (skip_past_char (&str
, ']') == SUCCESS
)
1515 atype
.index
= NEON_ALL_LANES
;
1520 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1522 if (exp
.X_op
!= O_constant
)
1524 first_error (_("constant expression required"));
1528 if (skip_past_char (&str
, ']') == FAIL
)
1531 atype
.index
= exp
.X_add_number
;
1546 /* Like arm_reg_parse, but allow allow the following extra features:
1547 - If RTYPE is non-zero, return the (possibly restricted) type of the
1548 register (e.g. Neon double or quad reg when either has been requested).
1549 - If this is a Neon vector type with additional type information, fill
1550 in the struct pointed to by VECTYPE (if non-NULL).
1551 This function will fault on encountering a scalar. */
1554 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1555 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1557 struct neon_typed_alias atype
;
1559 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1564 /* Do not allow regname(... to parse as a register. */
1568 /* Do not allow a scalar (reg+index) to parse as a register. */
1569 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1571 first_error (_("register operand expected, but got scalar"));
1576 *vectype
= atype
.eltype
;
1583 #define NEON_SCALAR_REG(X) ((X) >> 4)
1584 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1586 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1587 have enough information to be able to do a good job bounds-checking. So, we
1588 just do easy checks here, and do further checks later. */
1591 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1595 struct neon_typed_alias atype
;
1597 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1599 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1602 if (atype
.index
== NEON_ALL_LANES
)
1604 first_error (_("scalar must have an index"));
1607 else if (atype
.index
>= 64 / elsize
)
1609 first_error (_("scalar index out of range"));
1614 *type
= atype
.eltype
;
1618 return reg
* 16 + atype
.index
;
1621 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1624 parse_reg_list (char ** strp
)
1626 char * str
= * strp
;
1630 /* We come back here if we get ranges concatenated by '+' or '|'. */
1633 skip_whitespace (str
);
1647 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1649 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1659 first_error (_("bad range in register list"));
1663 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1665 if (range
& (1 << i
))
1667 (_("Warning: duplicated register (r%d) in register list"),
1675 if (range
& (1 << reg
))
1676 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1678 else if (reg
<= cur_reg
)
1679 as_tsktsk (_("Warning: register range not in ascending order"));
1684 while (skip_past_comma (&str
) != FAIL
1685 || (in_range
= 1, *str
++ == '-'));
1688 if (skip_past_char (&str
, '}') == FAIL
)
1690 first_error (_("missing `}'"));
1698 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1701 if (exp
.X_op
== O_constant
)
1703 if (exp
.X_add_number
1704 != (exp
.X_add_number
& 0x0000ffff))
1706 inst
.error
= _("invalid register mask");
1710 if ((range
& exp
.X_add_number
) != 0)
1712 int regno
= range
& exp
.X_add_number
;
1715 regno
= (1 << regno
) - 1;
1717 (_("Warning: duplicated register (r%d) in register list"),
1721 range
|= exp
.X_add_number
;
1725 if (inst
.reloc
.type
!= 0)
1727 inst
.error
= _("expression too complex");
1731 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1732 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1733 inst
.reloc
.pc_rel
= 0;
1737 if (*str
== '|' || *str
== '+')
1743 while (another_range
);
1749 /* Types of registers in a list. */
1758 /* Parse a VFP register list. If the string is invalid return FAIL.
1759 Otherwise return the number of registers, and set PBASE to the first
1760 register. Parses registers of type ETYPE.
1761 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1762 - Q registers can be used to specify pairs of D registers
1763 - { } can be omitted from around a singleton register list
1764 FIXME: This is not implemented, as it would require backtracking in
1767 This could be done (the meaning isn't really ambiguous), but doesn't
1768 fit in well with the current parsing framework.
1769 - 32 D registers may be used (also true for VFPv3).
1770 FIXME: Types are ignored in these register lists, which is probably a
1774 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1779 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1783 unsigned long mask
= 0;
1786 if (skip_past_char (&str
, '{') == FAIL
)
1788 inst
.error
= _("expecting {");
1795 regtype
= REG_TYPE_VFS
;
1800 regtype
= REG_TYPE_VFD
;
1803 case REGLIST_NEON_D
:
1804 regtype
= REG_TYPE_NDQ
;
1808 if (etype
!= REGLIST_VFP_S
)
1810 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1811 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1815 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1818 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1825 base_reg
= max_regs
;
1829 int setmask
= 1, addregs
= 1;
1831 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1833 if (new_base
== FAIL
)
1835 first_error (_(reg_expected_msgs
[regtype
]));
1839 if (new_base
>= max_regs
)
1841 first_error (_("register out of range in list"));
1845 /* Note: a value of 2 * n is returned for the register Q<n>. */
1846 if (regtype
== REG_TYPE_NQ
)
1852 if (new_base
< base_reg
)
1853 base_reg
= new_base
;
1855 if (mask
& (setmask
<< new_base
))
1857 first_error (_("invalid register list"));
1861 if ((mask
>> new_base
) != 0 && ! warned
)
1863 as_tsktsk (_("register list not in ascending order"));
1867 mask
|= setmask
<< new_base
;
1870 if (*str
== '-') /* We have the start of a range expression */
1876 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1879 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1883 if (high_range
>= max_regs
)
1885 first_error (_("register out of range in list"));
1889 if (regtype
== REG_TYPE_NQ
)
1890 high_range
= high_range
+ 1;
1892 if (high_range
<= new_base
)
1894 inst
.error
= _("register range not in ascending order");
1898 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1900 if (mask
& (setmask
<< new_base
))
1902 inst
.error
= _("invalid register list");
1906 mask
|= setmask
<< new_base
;
1911 while (skip_past_comma (&str
) != FAIL
);
1915 /* Sanity check -- should have raised a parse error above. */
1916 if (count
== 0 || count
> max_regs
)
1921 /* Final test -- the registers must be consecutive. */
1923 for (i
= 0; i
< count
; i
++)
1925 if ((mask
& (1u << i
)) == 0)
1927 inst
.error
= _("non-contiguous register range");
1937 /* True if two alias types are the same. */
1940 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1948 if (a
->defined
!= b
->defined
)
1951 if ((a
->defined
& NTA_HASTYPE
) != 0
1952 && (a
->eltype
.type
!= b
->eltype
.type
1953 || a
->eltype
.size
!= b
->eltype
.size
))
1956 if ((a
->defined
& NTA_HASINDEX
) != 0
1957 && (a
->index
!= b
->index
))
1963 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1964 The base register is put in *PBASE.
1965 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1967 The register stride (minus one) is put in bit 4 of the return value.
1968 Bits [6:5] encode the list length (minus one).
1969 The type of the list elements is put in *ELTYPE, if non-NULL. */
1971 #define NEON_LANE(X) ((X) & 0xf)
1972 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1973 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1976 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1977 struct neon_type_el
*eltype
)
1984 int leading_brace
= 0;
1985 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1986 const char *const incr_error
= _("register stride must be 1 or 2");
1987 const char *const type_error
= _("mismatched element/structure types in list");
1988 struct neon_typed_alias firsttype
;
1990 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1995 struct neon_typed_alias atype
;
1996 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2000 first_error (_(reg_expected_msgs
[rtype
]));
2007 if (rtype
== REG_TYPE_NQ
)
2013 else if (reg_incr
== -1)
2015 reg_incr
= getreg
- base_reg
;
2016 if (reg_incr
< 1 || reg_incr
> 2)
2018 first_error (_(incr_error
));
2022 else if (getreg
!= base_reg
+ reg_incr
* count
)
2024 first_error (_(incr_error
));
2028 if (! neon_alias_types_same (&atype
, &firsttype
))
2030 first_error (_(type_error
));
2034 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2038 struct neon_typed_alias htype
;
2039 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2041 lane
= NEON_INTERLEAVE_LANES
;
2042 else if (lane
!= NEON_INTERLEAVE_LANES
)
2044 first_error (_(type_error
));
2049 else if (reg_incr
!= 1)
2051 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2055 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2058 first_error (_(reg_expected_msgs
[rtype
]));
2061 if (! neon_alias_types_same (&htype
, &firsttype
))
2063 first_error (_(type_error
));
2066 count
+= hireg
+ dregs
- getreg
;
2070 /* If we're using Q registers, we can't use [] or [n] syntax. */
2071 if (rtype
== REG_TYPE_NQ
)
2077 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2081 else if (lane
!= atype
.index
)
2083 first_error (_(type_error
));
2087 else if (lane
== -1)
2088 lane
= NEON_INTERLEAVE_LANES
;
2089 else if (lane
!= NEON_INTERLEAVE_LANES
)
2091 first_error (_(type_error
));
2096 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2098 /* No lane set by [x]. We must be interleaving structures. */
2100 lane
= NEON_INTERLEAVE_LANES
;
2103 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2104 || (count
> 1 && reg_incr
== -1))
2106 first_error (_("error parsing element/structure list"));
2110 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2112 first_error (_("expected }"));
2120 *eltype
= firsttype
.eltype
;
2125 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2128 /* Parse an explicit relocation suffix on an expression. This is
2129 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2130 arm_reloc_hsh contains no entries, so this function can only
2131 succeed if there is no () after the word. Returns -1 on error,
2132 BFD_RELOC_UNUSED if there wasn't any suffix. */
2135 parse_reloc (char **str
)
2137 struct reloc_entry
*r
;
2141 return BFD_RELOC_UNUSED
;
2146 while (*q
&& *q
!= ')' && *q
!= ',')
2151 if ((r
= (struct reloc_entry
*)
2152 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2159 /* Directives: register aliases. */
2161 static struct reg_entry
*
2162 insert_reg_alias (char *str
, unsigned number
, int type
)
2164 struct reg_entry
*new_reg
;
2167 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2169 if (new_reg
->builtin
)
2170 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2172 /* Only warn about a redefinition if it's not defined as the
2174 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2175 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2180 name
= xstrdup (str
);
2181 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2183 new_reg
->name
= name
;
2184 new_reg
->number
= number
;
2185 new_reg
->type
= type
;
2186 new_reg
->builtin
= FALSE
;
2187 new_reg
->neon
= NULL
;
2189 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2196 insert_neon_reg_alias (char *str
, int number
, int type
,
2197 struct neon_typed_alias
*atype
)
2199 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2203 first_error (_("attempt to redefine typed alias"));
2209 reg
->neon
= (struct neon_typed_alias
*)
2210 xmalloc (sizeof (struct neon_typed_alias
));
2211 *reg
->neon
= *atype
;
2215 /* Look for the .req directive. This is of the form:
2217 new_register_name .req existing_register_name
2219 If we find one, or if it looks sufficiently like one that we want to
2220 handle any error here, return TRUE. Otherwise return FALSE. */
2223 create_register_alias (char * newname
, char *p
)
2225 struct reg_entry
*old
;
2226 char *oldname
, *nbuf
;
2229 /* The input scrubber ensures that whitespace after the mnemonic is
2230 collapsed to single spaces. */
2232 if (strncmp (oldname
, " .req ", 6) != 0)
2236 if (*oldname
== '\0')
2239 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2242 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2246 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2247 the desired alias name, and p points to its end. If not, then
2248 the desired alias name is in the global original_case_string. */
2249 #ifdef TC_CASE_SENSITIVE
2252 newname
= original_case_string
;
2253 nlen
= strlen (newname
);
2256 nbuf
= (char *) alloca (nlen
+ 1);
2257 memcpy (nbuf
, newname
, nlen
);
2260 /* Create aliases under the new name as stated; an all-lowercase
2261 version of the new name; and an all-uppercase version of the new
2263 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2265 for (p
= nbuf
; *p
; p
++)
2268 if (strncmp (nbuf
, newname
, nlen
))
2270 /* If this attempt to create an additional alias fails, do not bother
2271 trying to create the all-lower case alias. We will fail and issue
2272 a second, duplicate error message. This situation arises when the
2273 programmer does something like:
2276 The second .req creates the "Foo" alias but then fails to create
2277 the artificial FOO alias because it has already been created by the
2279 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2283 for (p
= nbuf
; *p
; p
++)
2286 if (strncmp (nbuf
, newname
, nlen
))
2287 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2293 /* Create a Neon typed/indexed register alias using directives, e.g.:
2298 These typed registers can be used instead of the types specified after the
2299 Neon mnemonic, so long as all operands given have types. Types can also be
2300 specified directly, e.g.:
2301 vadd d0.s32, d1.s32, d2.s32 */
2304 create_neon_reg_alias (char *newname
, char *p
)
2306 enum arm_reg_type basetype
;
2307 struct reg_entry
*basereg
;
2308 struct reg_entry mybasereg
;
2309 struct neon_type ntype
;
2310 struct neon_typed_alias typeinfo
;
2311 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2314 typeinfo
.defined
= 0;
2315 typeinfo
.eltype
.type
= NT_invtype
;
2316 typeinfo
.eltype
.size
= -1;
2317 typeinfo
.index
= -1;
2321 if (strncmp (p
, " .dn ", 5) == 0)
2322 basetype
= REG_TYPE_VFD
;
2323 else if (strncmp (p
, " .qn ", 5) == 0)
2324 basetype
= REG_TYPE_NQ
;
2333 basereg
= arm_reg_parse_multi (&p
);
2335 if (basereg
&& basereg
->type
!= basetype
)
2337 as_bad (_("bad type for register"));
2341 if (basereg
== NULL
)
2344 /* Try parsing as an integer. */
2345 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2346 if (exp
.X_op
!= O_constant
)
2348 as_bad (_("expression must be constant"));
2351 basereg
= &mybasereg
;
2352 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2358 typeinfo
= *basereg
->neon
;
2360 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2362 /* We got a type. */
2363 if (typeinfo
.defined
& NTA_HASTYPE
)
2365 as_bad (_("can't redefine the type of a register alias"));
2369 typeinfo
.defined
|= NTA_HASTYPE
;
2370 if (ntype
.elems
!= 1)
2372 as_bad (_("you must specify a single type only"));
2375 typeinfo
.eltype
= ntype
.el
[0];
2378 if (skip_past_char (&p
, '[') == SUCCESS
)
2381 /* We got a scalar index. */
2383 if (typeinfo
.defined
& NTA_HASINDEX
)
2385 as_bad (_("can't redefine the index of a scalar alias"));
2389 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2391 if (exp
.X_op
!= O_constant
)
2393 as_bad (_("scalar index must be constant"));
2397 typeinfo
.defined
|= NTA_HASINDEX
;
2398 typeinfo
.index
= exp
.X_add_number
;
2400 if (skip_past_char (&p
, ']') == FAIL
)
2402 as_bad (_("expecting ]"));
2407 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2408 the desired alias name, and p points to its end. If not, then
2409 the desired alias name is in the global original_case_string. */
2410 #ifdef TC_CASE_SENSITIVE
2411 namelen
= nameend
- newname
;
2413 newname
= original_case_string
;
2414 namelen
= strlen (newname
);
2417 namebuf
= (char *) alloca (namelen
+ 1);
2418 strncpy (namebuf
, newname
, namelen
);
2419 namebuf
[namelen
] = '\0';
2421 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2422 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2424 /* Insert name in all uppercase. */
2425 for (p
= namebuf
; *p
; p
++)
2428 if (strncmp (namebuf
, newname
, namelen
))
2429 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2430 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2432 /* Insert name in all lowercase. */
2433 for (p
= namebuf
; *p
; p
++)
2436 if (strncmp (namebuf
, newname
, namelen
))
2437 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2438 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2443 /* Should never be called, as .req goes between the alias and the
2444 register name, not at the beginning of the line. */
2447 s_req (int a ATTRIBUTE_UNUSED
)
2449 as_bad (_("invalid syntax for .req directive"));
2453 s_dn (int a ATTRIBUTE_UNUSED
)
2455 as_bad (_("invalid syntax for .dn directive"));
2459 s_qn (int a ATTRIBUTE_UNUSED
)
2461 as_bad (_("invalid syntax for .qn directive"));
2464 /* The .unreq directive deletes an alias which was previously defined
2465 by .req. For example:
2471 s_unreq (int a ATTRIBUTE_UNUSED
)
2476 name
= input_line_pointer
;
2478 while (*input_line_pointer
!= 0
2479 && *input_line_pointer
!= ' '
2480 && *input_line_pointer
!= '\n')
2481 ++input_line_pointer
;
2483 saved_char
= *input_line_pointer
;
2484 *input_line_pointer
= 0;
2487 as_bad (_("invalid syntax for .unreq directive"));
2490 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2494 as_bad (_("unknown register alias '%s'"), name
);
2495 else if (reg
->builtin
)
2496 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2503 hash_delete (arm_reg_hsh
, name
, FALSE
);
2504 free ((char *) reg
->name
);
2509 /* Also locate the all upper case and all lower case versions.
2510 Do not complain if we cannot find one or the other as it
2511 was probably deleted above. */
2513 nbuf
= strdup (name
);
2514 for (p
= nbuf
; *p
; p
++)
2516 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2519 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2520 free ((char *) reg
->name
);
2526 for (p
= nbuf
; *p
; p
++)
2528 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2531 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2532 free ((char *) reg
->name
);
2542 *input_line_pointer
= saved_char
;
2543 demand_empty_rest_of_line ();
2546 /* Directives: Instruction set selection. */
2549 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2550 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2551 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2552 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2554 /* Create a new mapping symbol for the transition to STATE. */
2557 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2560 const char * symname
;
2567 type
= BSF_NO_FLAGS
;
2571 type
= BSF_NO_FLAGS
;
2575 type
= BSF_NO_FLAGS
;
2581 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2582 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2587 THUMB_SET_FUNC (symbolP
, 0);
2588 ARM_SET_THUMB (symbolP
, 0);
2589 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2593 THUMB_SET_FUNC (symbolP
, 1);
2594 ARM_SET_THUMB (symbolP
, 1);
2595 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2603 /* Save the mapping symbols for future reference. Also check that
2604 we do not place two mapping symbols at the same offset within a
2605 frag. We'll handle overlap between frags in
2606 check_mapping_symbols.
2608 If .fill or other data filling directive generates zero sized data,
2609 the mapping symbol for the following code will have the same value
2610 as the one generated for the data filling directive. In this case,
2611 we replace the old symbol with the new one at the same address. */
2614 if (frag
->tc_frag_data
.first_map
!= NULL
)
2616 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2617 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2619 frag
->tc_frag_data
.first_map
= symbolP
;
2621 if (frag
->tc_frag_data
.last_map
!= NULL
)
2623 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2624 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2625 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2627 frag
->tc_frag_data
.last_map
= symbolP
;
2630 /* We must sometimes convert a region marked as code to data during
2631 code alignment, if an odd number of bytes have to be padded. The
2632 code mapping symbol is pushed to an aligned address. */
2635 insert_data_mapping_symbol (enum mstate state
,
2636 valueT value
, fragS
*frag
, offsetT bytes
)
2638 /* If there was already a mapping symbol, remove it. */
2639 if (frag
->tc_frag_data
.last_map
!= NULL
2640 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2642 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2646 know (frag
->tc_frag_data
.first_map
== symp
);
2647 frag
->tc_frag_data
.first_map
= NULL
;
2649 frag
->tc_frag_data
.last_map
= NULL
;
2650 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2653 make_mapping_symbol (MAP_DATA
, value
, frag
);
2654 make_mapping_symbol (state
, value
+ bytes
, frag
);
2657 static void mapping_state_2 (enum mstate state
, int max_chars
);
2659 /* Set the mapping state to STATE. Only call this when about to
2660 emit some STATE bytes to the file. */
2662 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2664 mapping_state (enum mstate state
)
2666 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2668 if (mapstate
== state
)
2669 /* The mapping symbol has already been emitted.
2670 There is nothing else to do. */
2673 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2675 All ARM instructions require 4-byte alignment.
2676 (Almost) all Thumb instructions require 2-byte alignment.
2678 When emitting instructions into any section, mark the section
2681 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2682 but themselves require 2-byte alignment; this applies to some
2683 PC- relative forms. However, these cases will invovle implicit
2684 literal pool generation or an explicit .align >=2, both of
2685 which will cause the section to me marked with sufficient
2686 alignment. Thus, we don't handle those cases here. */
2687 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2689 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2690 /* This case will be evaluated later. */
2693 mapping_state_2 (state
, 0);
2696 /* Same as mapping_state, but MAX_CHARS bytes have already been
2697 allocated. Put the mapping symbol that far back. */
2700 mapping_state_2 (enum mstate state
, int max_chars
)
2702 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2704 if (!SEG_NORMAL (now_seg
))
2707 if (mapstate
== state
)
2708 /* The mapping symbol has already been emitted.
2709 There is nothing else to do. */
2712 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2713 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2715 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2716 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2719 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2722 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2723 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2727 #define mapping_state(x) ((void)0)
2728 #define mapping_state_2(x, y) ((void)0)
2731 /* Find the real, Thumb encoded start of a Thumb function. */
2735 find_real_start (symbolS
* symbolP
)
2738 const char * name
= S_GET_NAME (symbolP
);
2739 symbolS
* new_target
;
2741 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2742 #define STUB_NAME ".real_start_of"
2747 /* The compiler may generate BL instructions to local labels because
2748 it needs to perform a branch to a far away location. These labels
2749 do not have a corresponding ".real_start_of" label. We check
2750 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2751 the ".real_start_of" convention for nonlocal branches. */
2752 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2755 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2756 new_target
= symbol_find (real_start
);
2758 if (new_target
== NULL
)
2760 as_warn (_("Failed to find real start of function: %s\n"), name
);
2761 new_target
= symbolP
;
2769 opcode_select (int width
)
2776 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2777 as_bad (_("selected processor does not support THUMB opcodes"));
2780 /* No need to force the alignment, since we will have been
2781 coming from ARM mode, which is word-aligned. */
2782 record_alignment (now_seg
, 1);
2789 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2790 as_bad (_("selected processor does not support ARM opcodes"));
2795 frag_align (2, 0, 0);
2797 record_alignment (now_seg
, 1);
2802 as_bad (_("invalid instruction size selected (%d)"), width
);
2807 s_arm (int ignore ATTRIBUTE_UNUSED
)
2810 demand_empty_rest_of_line ();
2814 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2817 demand_empty_rest_of_line ();
2821 s_code (int unused ATTRIBUTE_UNUSED
)
2825 temp
= get_absolute_expression ();
2830 opcode_select (temp
);
2834 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2839 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2841 /* If we are not already in thumb mode go into it, EVEN if
2842 the target processor does not support thumb instructions.
2843 This is used by gcc/config/arm/lib1funcs.asm for example
2844 to compile interworking support functions even if the
2845 target processor should not support interworking. */
2849 record_alignment (now_seg
, 1);
2852 demand_empty_rest_of_line ();
2856 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2860 /* The following label is the name/address of the start of a Thumb function.
2861 We need to know this for the interworking support. */
2862 label_is_thumb_function_name
= TRUE
;
2865 /* Perform a .set directive, but also mark the alias as
2866 being a thumb function. */
2869 s_thumb_set (int equiv
)
2871 /* XXX the following is a duplicate of the code for s_set() in read.c
2872 We cannot just call that code as we need to get at the symbol that
2879 /* Especial apologies for the random logic:
2880 This just grew, and could be parsed much more simply!
2882 delim
= get_symbol_name (& name
);
2883 end_name
= input_line_pointer
;
2884 (void) restore_line_pointer (delim
);
2886 if (*input_line_pointer
!= ',')
2889 as_bad (_("expected comma after name \"%s\""), name
);
2891 ignore_rest_of_line ();
2895 input_line_pointer
++;
2898 if (name
[0] == '.' && name
[1] == '\0')
2900 /* XXX - this should not happen to .thumb_set. */
2904 if ((symbolP
= symbol_find (name
)) == NULL
2905 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2908 /* When doing symbol listings, play games with dummy fragments living
2909 outside the normal fragment chain to record the file and line info
2911 if (listing
& LISTING_SYMBOLS
)
2913 extern struct list_info_struct
* listing_tail
;
2914 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2916 memset (dummy_frag
, 0, sizeof (fragS
));
2917 dummy_frag
->fr_type
= rs_fill
;
2918 dummy_frag
->line
= listing_tail
;
2919 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2920 dummy_frag
->fr_symbol
= symbolP
;
2924 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2927 /* "set" symbols are local unless otherwise specified. */
2928 SF_SET_LOCAL (symbolP
);
2929 #endif /* OBJ_COFF */
2930 } /* Make a new symbol. */
2932 symbol_table_insert (symbolP
);
2937 && S_IS_DEFINED (symbolP
)
2938 && S_GET_SEGMENT (symbolP
) != reg_section
)
2939 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2941 pseudo_set (symbolP
);
2943 demand_empty_rest_of_line ();
2945 /* XXX Now we come to the Thumb specific bit of code. */
2947 THUMB_SET_FUNC (symbolP
, 1);
2948 ARM_SET_THUMB (symbolP
, 1);
2949 #if defined OBJ_ELF || defined OBJ_COFF
2950 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2954 /* Directives: Mode selection. */
2956 /* .syntax [unified|divided] - choose the new unified syntax
2957 (same for Arm and Thumb encoding, modulo slight differences in what
2958 can be represented) or the old divergent syntax for each mode. */
2960 s_syntax (int unused ATTRIBUTE_UNUSED
)
2964 delim
= get_symbol_name (& name
);
2966 if (!strcasecmp (name
, "unified"))
2967 unified_syntax
= TRUE
;
2968 else if (!strcasecmp (name
, "divided"))
2969 unified_syntax
= FALSE
;
2972 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2975 (void) restore_line_pointer (delim
);
2976 demand_empty_rest_of_line ();
2979 /* Directives: sectioning and alignment. */
2982 s_bss (int ignore ATTRIBUTE_UNUSED
)
2984 /* We don't support putting frags in the BSS segment, we fake it by
2985 marking in_bss, then looking at s_skip for clues. */
2986 subseg_set (bss_section
, 0);
2987 demand_empty_rest_of_line ();
2989 #ifdef md_elf_section_change_hook
2990 md_elf_section_change_hook ();
2995 s_even (int ignore ATTRIBUTE_UNUSED
)
2997 /* Never make frag if expect extra pass. */
2999 frag_align (1, 0, 0);
3001 record_alignment (now_seg
, 1);
3003 demand_empty_rest_of_line ();
3006 /* Directives: CodeComposer Studio. */
3008 /* .ref (for CodeComposer Studio syntax only). */
3010 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3012 if (codecomposer_syntax
)
3013 ignore_rest_of_line ();
3015 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3018 /* If name is not NULL, then it is used for marking the beginning of a
3019 function, wherease if it is NULL then it means the function end. */
3021 asmfunc_debug (const char * name
)
3023 static const char * last_name
= NULL
;
3027 gas_assert (last_name
== NULL
);
3030 if (debug_type
== DEBUG_STABS
)
3031 stabs_generate_asm_func (name
, name
);
3035 gas_assert (last_name
!= NULL
);
3037 if (debug_type
== DEBUG_STABS
)
3038 stabs_generate_asm_endfunc (last_name
, last_name
);
3045 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3047 if (codecomposer_syntax
)
3049 switch (asmfunc_state
)
3051 case OUTSIDE_ASMFUNC
:
3052 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3055 case WAITING_ASMFUNC_NAME
:
3056 as_bad (_(".asmfunc repeated."));
3059 case WAITING_ENDASMFUNC
:
3060 as_bad (_(".asmfunc without function."));
3063 demand_empty_rest_of_line ();
3066 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3070 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3072 if (codecomposer_syntax
)
3074 switch (asmfunc_state
)
3076 case OUTSIDE_ASMFUNC
:
3077 as_bad (_(".endasmfunc without a .asmfunc."));
3080 case WAITING_ASMFUNC_NAME
:
3081 as_bad (_(".endasmfunc without function."));
3084 case WAITING_ENDASMFUNC
:
3085 asmfunc_state
= OUTSIDE_ASMFUNC
;
3086 asmfunc_debug (NULL
);
3089 demand_empty_rest_of_line ();
3092 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3096 s_ccs_def (int name
)
3098 if (codecomposer_syntax
)
3101 as_bad (_(".def pseudo-op only available with -mccs flag."));
3104 /* Directives: Literal pools. */
3106 static literal_pool
*
3107 find_literal_pool (void)
3109 literal_pool
* pool
;
3111 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3113 if (pool
->section
== now_seg
3114 && pool
->sub_section
== now_subseg
)
3121 static literal_pool
*
3122 find_or_make_literal_pool (void)
3124 /* Next literal pool ID number. */
3125 static unsigned int latest_pool_num
= 1;
3126 literal_pool
* pool
;
3128 pool
= find_literal_pool ();
3132 /* Create a new pool. */
3133 pool
= (literal_pool
*) xmalloc (sizeof (* pool
));
3137 pool
->next_free_entry
= 0;
3138 pool
->section
= now_seg
;
3139 pool
->sub_section
= now_subseg
;
3140 pool
->next
= list_of_pools
;
3141 pool
->symbol
= NULL
;
3142 pool
->alignment
= 2;
3144 /* Add it to the list. */
3145 list_of_pools
= pool
;
3148 /* New pools, and emptied pools, will have a NULL symbol. */
3149 if (pool
->symbol
== NULL
)
3151 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3152 (valueT
) 0, &zero_address_frag
);
3153 pool
->id
= latest_pool_num
++;
3160 /* Add the literal in the global 'inst'
3161 structure to the relevant literal pool. */
3164 add_to_lit_pool (unsigned int nbytes
)
3166 #define PADDING_SLOT 0x1
3167 #define LIT_ENTRY_SIZE_MASK 0xFF
3168 literal_pool
* pool
;
3169 unsigned int entry
, pool_size
= 0;
3170 bfd_boolean padding_slot_p
= FALSE
;
3176 imm1
= inst
.operands
[1].imm
;
3177 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3178 : inst
.reloc
.exp
.X_unsigned
? 0
3179 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3180 if (target_big_endian
)
3183 imm2
= inst
.operands
[1].imm
;
3187 pool
= find_or_make_literal_pool ();
3189 /* Check if this literal value is already in the pool. */
3190 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3194 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3195 && (inst
.reloc
.exp
.X_op
== O_constant
)
3196 && (pool
->literals
[entry
].X_add_number
3197 == inst
.reloc
.exp
.X_add_number
)
3198 && (pool
->literals
[entry
].X_md
== nbytes
)
3199 && (pool
->literals
[entry
].X_unsigned
3200 == inst
.reloc
.exp
.X_unsigned
))
3203 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3204 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3205 && (pool
->literals
[entry
].X_add_number
3206 == inst
.reloc
.exp
.X_add_number
)
3207 && (pool
->literals
[entry
].X_add_symbol
3208 == inst
.reloc
.exp
.X_add_symbol
)
3209 && (pool
->literals
[entry
].X_op_symbol
3210 == inst
.reloc
.exp
.X_op_symbol
)
3211 && (pool
->literals
[entry
].X_md
== nbytes
))
3214 else if ((nbytes
== 8)
3215 && !(pool_size
& 0x7)
3216 && ((entry
+ 1) != pool
->next_free_entry
)
3217 && (pool
->literals
[entry
].X_op
== O_constant
)
3218 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3219 && (pool
->literals
[entry
].X_unsigned
3220 == inst
.reloc
.exp
.X_unsigned
)
3221 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3222 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3223 && (pool
->literals
[entry
+ 1].X_unsigned
3224 == inst
.reloc
.exp
.X_unsigned
))
3227 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3228 if (padding_slot_p
&& (nbytes
== 4))
3234 /* Do we need to create a new entry? */
3235 if (entry
== pool
->next_free_entry
)
3237 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3239 inst
.error
= _("literal pool overflow");
3245 /* For 8-byte entries, we align to an 8-byte boundary,
3246 and split it into two 4-byte entries, because on 32-bit
3247 host, 8-byte constants are treated as big num, thus
3248 saved in "generic_bignum" which will be overwritten
3249 by later assignments.
3251 We also need to make sure there is enough space for
3254 We also check to make sure the literal operand is a
3256 if (!(inst
.reloc
.exp
.X_op
== O_constant
3257 || inst
.reloc
.exp
.X_op
== O_big
))
3259 inst
.error
= _("invalid type for literal pool");
3262 else if (pool_size
& 0x7)
3264 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3266 inst
.error
= _("literal pool overflow");
3270 pool
->literals
[entry
] = inst
.reloc
.exp
;
3271 pool
->literals
[entry
].X_add_number
= 0;
3272 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3273 pool
->next_free_entry
+= 1;
3276 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3278 inst
.error
= _("literal pool overflow");
3282 pool
->literals
[entry
] = inst
.reloc
.exp
;
3283 pool
->literals
[entry
].X_op
= O_constant
;
3284 pool
->literals
[entry
].X_add_number
= imm1
;
3285 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3286 pool
->literals
[entry
++].X_md
= 4;
3287 pool
->literals
[entry
] = inst
.reloc
.exp
;
3288 pool
->literals
[entry
].X_op
= O_constant
;
3289 pool
->literals
[entry
].X_add_number
= imm2
;
3290 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3291 pool
->literals
[entry
].X_md
= 4;
3292 pool
->alignment
= 3;
3293 pool
->next_free_entry
+= 1;
3297 pool
->literals
[entry
] = inst
.reloc
.exp
;
3298 pool
->literals
[entry
].X_md
= 4;
3302 /* PR ld/12974: Record the location of the first source line to reference
3303 this entry in the literal pool. If it turns out during linking that the
3304 symbol does not exist we will be able to give an accurate line number for
3305 the (first use of the) missing reference. */
3306 if (debug_type
== DEBUG_DWARF2
)
3307 dwarf2_where (pool
->locs
+ entry
);
3309 pool
->next_free_entry
+= 1;
3311 else if (padding_slot_p
)
3313 pool
->literals
[entry
] = inst
.reloc
.exp
;
3314 pool
->literals
[entry
].X_md
= nbytes
;
3317 inst
.reloc
.exp
.X_op
= O_symbol
;
3318 inst
.reloc
.exp
.X_add_number
= pool_size
;
3319 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3325 tc_start_label_without_colon (void)
3327 bfd_boolean ret
= TRUE
;
3329 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3331 const char *label
= input_line_pointer
;
3333 while (!is_end_of_line
[(int) label
[-1]])
3338 as_bad (_("Invalid label '%s'"), label
);
3342 asmfunc_debug (label
);
3344 asmfunc_state
= WAITING_ENDASMFUNC
;
3350 /* Can't use symbol_new here, so have to create a symbol and then at
3351 a later date assign it a value. Thats what these functions do. */
3354 symbol_locate (symbolS
* symbolP
,
3355 const char * name
, /* It is copied, the caller can modify. */
3356 segT segment
, /* Segment identifier (SEG_<something>). */
3357 valueT valu
, /* Symbol value. */
3358 fragS
* frag
) /* Associated fragment. */
3361 char * preserved_copy_of_name
;
3363 name_length
= strlen (name
) + 1; /* +1 for \0. */
3364 obstack_grow (¬es
, name
, name_length
);
3365 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3367 #ifdef tc_canonicalize_symbol_name
3368 preserved_copy_of_name
=
3369 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3372 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3374 S_SET_SEGMENT (symbolP
, segment
);
3375 S_SET_VALUE (symbolP
, valu
);
3376 symbol_clear_list_pointers (symbolP
);
3378 symbol_set_frag (symbolP
, frag
);
3380 /* Link to end of symbol chain. */
3382 extern int symbol_table_frozen
;
3384 if (symbol_table_frozen
)
3388 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3390 obj_symbol_new_hook (symbolP
);
3392 #ifdef tc_symbol_new_hook
3393 tc_symbol_new_hook (symbolP
);
3397 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3398 #endif /* DEBUG_SYMS */
3402 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3405 literal_pool
* pool
;
3408 pool
= find_literal_pool ();
3410 || pool
->symbol
== NULL
3411 || pool
->next_free_entry
== 0)
3414 /* Align pool as you have word accesses.
3415 Only make a frag if we have to. */
3417 frag_align (pool
->alignment
, 0, 0);
3419 record_alignment (now_seg
, 2);
3422 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3423 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3425 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3427 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3428 (valueT
) frag_now_fix (), frag_now
);
3429 symbol_table_insert (pool
->symbol
);
3431 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3433 #if defined OBJ_COFF || defined OBJ_ELF
3434 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3437 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3440 if (debug_type
== DEBUG_DWARF2
)
3441 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3443 /* First output the expression in the instruction to the pool. */
3444 emit_expr (&(pool
->literals
[entry
]),
3445 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3448 /* Mark the pool as empty. */
3449 pool
->next_free_entry
= 0;
3450 pool
->symbol
= NULL
;
3454 /* Forward declarations for functions below, in the MD interface
3456 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3457 static valueT
create_unwind_entry (int);
3458 static void start_unwind_section (const segT
, int);
3459 static void add_unwind_opcode (valueT
, int);
3460 static void flush_pending_unwind (void);
3462 /* Directives: Data. */
3465 s_arm_elf_cons (int nbytes
)
3469 #ifdef md_flush_pending_output
3470 md_flush_pending_output ();
3473 if (is_it_end_of_statement ())
3475 demand_empty_rest_of_line ();
3479 #ifdef md_cons_align
3480 md_cons_align (nbytes
);
3483 mapping_state (MAP_DATA
);
3487 char *base
= input_line_pointer
;
3491 if (exp
.X_op
!= O_symbol
)
3492 emit_expr (&exp
, (unsigned int) nbytes
);
3495 char *before_reloc
= input_line_pointer
;
3496 reloc
= parse_reloc (&input_line_pointer
);
3499 as_bad (_("unrecognized relocation suffix"));
3500 ignore_rest_of_line ();
3503 else if (reloc
== BFD_RELOC_UNUSED
)
3504 emit_expr (&exp
, (unsigned int) nbytes
);
3507 reloc_howto_type
*howto
= (reloc_howto_type
*)
3508 bfd_reloc_type_lookup (stdoutput
,
3509 (bfd_reloc_code_real_type
) reloc
);
3510 int size
= bfd_get_reloc_size (howto
);
3512 if (reloc
== BFD_RELOC_ARM_PLT32
)
3514 as_bad (_("(plt) is only valid on branch targets"));
3515 reloc
= BFD_RELOC_UNUSED
;
3520 as_bad (_("%s relocations do not fit in %d bytes"),
3521 howto
->name
, nbytes
);
3524 /* We've parsed an expression stopping at O_symbol.
3525 But there may be more expression left now that we
3526 have parsed the relocation marker. Parse it again.
3527 XXX Surely there is a cleaner way to do this. */
3528 char *p
= input_line_pointer
;
3530 char *save_buf
= (char *) alloca (input_line_pointer
- base
);
3531 memcpy (save_buf
, base
, input_line_pointer
- base
);
3532 memmove (base
+ (input_line_pointer
- before_reloc
),
3533 base
, before_reloc
- base
);
3535 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3537 memcpy (base
, save_buf
, p
- base
);
3539 offset
= nbytes
- size
;
3540 p
= frag_more (nbytes
);
3541 memset (p
, 0, nbytes
);
3542 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3543 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3548 while (*input_line_pointer
++ == ',');
3550 /* Put terminator back into stream. */
3551 input_line_pointer
--;
3552 demand_empty_rest_of_line ();
3555 /* Emit an expression containing a 32-bit thumb instruction.
3556 Implementation based on put_thumb32_insn. */
3559 emit_thumb32_expr (expressionS
* exp
)
3561 expressionS exp_high
= *exp
;
3563 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3564 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3565 exp
->X_add_number
&= 0xffff;
3566 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3569 /* Guess the instruction size based on the opcode. */
3572 thumb_insn_size (int opcode
)
3574 if ((unsigned int) opcode
< 0xe800u
)
3576 else if ((unsigned int) opcode
>= 0xe8000000u
)
3583 emit_insn (expressionS
*exp
, int nbytes
)
3587 if (exp
->X_op
== O_constant
)
3592 size
= thumb_insn_size (exp
->X_add_number
);
3596 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3598 as_bad (_(".inst.n operand too big. "\
3599 "Use .inst.w instead"));
3604 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3605 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3607 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3609 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3610 emit_thumb32_expr (exp
);
3612 emit_expr (exp
, (unsigned int) size
);
3614 it_fsm_post_encode ();
3618 as_bad (_("cannot determine Thumb instruction size. " \
3619 "Use .inst.n/.inst.w instead"));
3622 as_bad (_("constant expression required"));
3627 /* Like s_arm_elf_cons but do not use md_cons_align and
3628 set the mapping state to MAP_ARM/MAP_THUMB. */
3631 s_arm_elf_inst (int nbytes
)
3633 if (is_it_end_of_statement ())
3635 demand_empty_rest_of_line ();
3639 /* Calling mapping_state () here will not change ARM/THUMB,
3640 but will ensure not to be in DATA state. */
3643 mapping_state (MAP_THUMB
);
3648 as_bad (_("width suffixes are invalid in ARM mode"));
3649 ignore_rest_of_line ();
3655 mapping_state (MAP_ARM
);
3664 if (! emit_insn (& exp
, nbytes
))
3666 ignore_rest_of_line ();
3670 while (*input_line_pointer
++ == ',');
3672 /* Put terminator back into stream. */
3673 input_line_pointer
--;
3674 demand_empty_rest_of_line ();
3677 /* Parse a .rel31 directive. */
3680 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3687 if (*input_line_pointer
== '1')
3688 highbit
= 0x80000000;
3689 else if (*input_line_pointer
!= '0')
3690 as_bad (_("expected 0 or 1"));
3692 input_line_pointer
++;
3693 if (*input_line_pointer
!= ',')
3694 as_bad (_("missing comma"));
3695 input_line_pointer
++;
3697 #ifdef md_flush_pending_output
3698 md_flush_pending_output ();
3701 #ifdef md_cons_align
3705 mapping_state (MAP_DATA
);
3710 md_number_to_chars (p
, highbit
, 4);
3711 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3712 BFD_RELOC_ARM_PREL31
);
3714 demand_empty_rest_of_line ();
3717 /* Directives: AEABI stack-unwind tables. */
3719 /* Parse an unwind_fnstart directive. Simply records the current location. */
3722 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3724 demand_empty_rest_of_line ();
3725 if (unwind
.proc_start
)
3727 as_bad (_("duplicate .fnstart directive"));
3731 /* Mark the start of the function. */
3732 unwind
.proc_start
= expr_build_dot ();
3734 /* Reset the rest of the unwind info. */
3735 unwind
.opcode_count
= 0;
3736 unwind
.table_entry
= NULL
;
3737 unwind
.personality_routine
= NULL
;
3738 unwind
.personality_index
= -1;
3739 unwind
.frame_size
= 0;
3740 unwind
.fp_offset
= 0;
3741 unwind
.fp_reg
= REG_SP
;
3743 unwind
.sp_restored
= 0;
3747 /* Parse a handlerdata directive. Creates the exception handling table entry
3748 for the function. */
3751 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3753 demand_empty_rest_of_line ();
3754 if (!unwind
.proc_start
)
3755 as_bad (MISSING_FNSTART
);
3757 if (unwind
.table_entry
)
3758 as_bad (_("duplicate .handlerdata directive"));
3760 create_unwind_entry (1);
3763 /* Parse an unwind_fnend directive. Generates the index table entry. */
3766 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3771 unsigned int marked_pr_dependency
;
3773 demand_empty_rest_of_line ();
3775 if (!unwind
.proc_start
)
3777 as_bad (_(".fnend directive without .fnstart"));
3781 /* Add eh table entry. */
3782 if (unwind
.table_entry
== NULL
)
3783 val
= create_unwind_entry (0);
3787 /* Add index table entry. This is two words. */
3788 start_unwind_section (unwind
.saved_seg
, 1);
3789 frag_align (2, 0, 0);
3790 record_alignment (now_seg
, 2);
3792 ptr
= frag_more (8);
3794 where
= frag_now_fix () - 8;
3796 /* Self relative offset of the function start. */
3797 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3798 BFD_RELOC_ARM_PREL31
);
3800 /* Indicate dependency on EHABI-defined personality routines to the
3801 linker, if it hasn't been done already. */
3802 marked_pr_dependency
3803 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3804 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3805 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3807 static const char *const name
[] =
3809 "__aeabi_unwind_cpp_pr0",
3810 "__aeabi_unwind_cpp_pr1",
3811 "__aeabi_unwind_cpp_pr2"
3813 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3814 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3815 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3816 |= 1 << unwind
.personality_index
;
3820 /* Inline exception table entry. */
3821 md_number_to_chars (ptr
+ 4, val
, 4);
3823 /* Self relative offset of the table entry. */
3824 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3825 BFD_RELOC_ARM_PREL31
);
3827 /* Restore the original section. */
3828 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3830 unwind
.proc_start
= NULL
;
3834 /* Parse an unwind_cantunwind directive. */
3837 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3839 demand_empty_rest_of_line ();
3840 if (!unwind
.proc_start
)
3841 as_bad (MISSING_FNSTART
);
3843 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3844 as_bad (_("personality routine specified for cantunwind frame"));
3846 unwind
.personality_index
= -2;
3850 /* Parse a personalityindex directive. */
3853 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3857 if (!unwind
.proc_start
)
3858 as_bad (MISSING_FNSTART
);
3860 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3861 as_bad (_("duplicate .personalityindex directive"));
3865 if (exp
.X_op
!= O_constant
3866 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3868 as_bad (_("bad personality routine number"));
3869 ignore_rest_of_line ();
3873 unwind
.personality_index
= exp
.X_add_number
;
3875 demand_empty_rest_of_line ();
3879 /* Parse a personality directive. */
3882 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3886 if (!unwind
.proc_start
)
3887 as_bad (MISSING_FNSTART
);
3889 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3890 as_bad (_("duplicate .personality directive"));
3892 c
= get_symbol_name (& name
);
3893 p
= input_line_pointer
;
3895 ++ input_line_pointer
;
3896 unwind
.personality_routine
= symbol_find_or_make (name
);
3898 demand_empty_rest_of_line ();
3902 /* Parse a directive saving core registers. */
3905 s_arm_unwind_save_core (void)
3911 range
= parse_reg_list (&input_line_pointer
);
3914 as_bad (_("expected register list"));
3915 ignore_rest_of_line ();
3919 demand_empty_rest_of_line ();
3921 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3922 into .unwind_save {..., sp...}. We aren't bothered about the value of
3923 ip because it is clobbered by calls. */
3924 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3925 && (range
& 0x3000) == 0x1000)
3927 unwind
.opcode_count
--;
3928 unwind
.sp_restored
= 0;
3929 range
= (range
| 0x2000) & ~0x1000;
3930 unwind
.pending_offset
= 0;
3936 /* See if we can use the short opcodes. These pop a block of up to 8
3937 registers starting with r4, plus maybe r14. */
3938 for (n
= 0; n
< 8; n
++)
3940 /* Break at the first non-saved register. */
3941 if ((range
& (1 << (n
+ 4))) == 0)
3944 /* See if there are any other bits set. */
3945 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3947 /* Use the long form. */
3948 op
= 0x8000 | ((range
>> 4) & 0xfff);
3949 add_unwind_opcode (op
, 2);
3953 /* Use the short form. */
3955 op
= 0xa8; /* Pop r14. */
3957 op
= 0xa0; /* Do not pop r14. */
3959 add_unwind_opcode (op
, 1);
3966 op
= 0xb100 | (range
& 0xf);
3967 add_unwind_opcode (op
, 2);
3970 /* Record the number of bytes pushed. */
3971 for (n
= 0; n
< 16; n
++)
3973 if (range
& (1 << n
))
3974 unwind
.frame_size
+= 4;
3979 /* Parse a directive saving FPA registers. */
3982 s_arm_unwind_save_fpa (int reg
)
3988 /* Get Number of registers to transfer. */
3989 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3992 exp
.X_op
= O_illegal
;
3994 if (exp
.X_op
!= O_constant
)
3996 as_bad (_("expected , <constant>"));
3997 ignore_rest_of_line ();
4001 num_regs
= exp
.X_add_number
;
4003 if (num_regs
< 1 || num_regs
> 4)
4005 as_bad (_("number of registers must be in the range [1:4]"));
4006 ignore_rest_of_line ();
4010 demand_empty_rest_of_line ();
4015 op
= 0xb4 | (num_regs
- 1);
4016 add_unwind_opcode (op
, 1);
4021 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4022 add_unwind_opcode (op
, 2);
4024 unwind
.frame_size
+= num_regs
* 12;
4028 /* Parse a directive saving VFP registers for ARMv6 and above. */
4031 s_arm_unwind_save_vfp_armv6 (void)
4036 int num_vfpv3_regs
= 0;
4037 int num_regs_below_16
;
4039 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4042 as_bad (_("expected register list"));
4043 ignore_rest_of_line ();
4047 demand_empty_rest_of_line ();
4049 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4050 than FSTMX/FLDMX-style ones). */
4052 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4054 num_vfpv3_regs
= count
;
4055 else if (start
+ count
> 16)
4056 num_vfpv3_regs
= start
+ count
- 16;
4058 if (num_vfpv3_regs
> 0)
4060 int start_offset
= start
> 16 ? start
- 16 : 0;
4061 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4062 add_unwind_opcode (op
, 2);
4065 /* Generate opcode for registers numbered in the range 0 .. 15. */
4066 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4067 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4068 if (num_regs_below_16
> 0)
4070 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4071 add_unwind_opcode (op
, 2);
4074 unwind
.frame_size
+= count
* 8;
4078 /* Parse a directive saving VFP registers for pre-ARMv6. */
4081 s_arm_unwind_save_vfp (void)
4087 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4090 as_bad (_("expected register list"));
4091 ignore_rest_of_line ();
4095 demand_empty_rest_of_line ();
4100 op
= 0xb8 | (count
- 1);
4101 add_unwind_opcode (op
, 1);
4106 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4107 add_unwind_opcode (op
, 2);
4109 unwind
.frame_size
+= count
* 8 + 4;
4113 /* Parse a directive saving iWMMXt data registers. */
4116 s_arm_unwind_save_mmxwr (void)
4124 if (*input_line_pointer
== '{')
4125 input_line_pointer
++;
4129 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4133 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4138 as_tsktsk (_("register list not in ascending order"));
4141 if (*input_line_pointer
== '-')
4143 input_line_pointer
++;
4144 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4147 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4150 else if (reg
>= hi_reg
)
4152 as_bad (_("bad register range"));
4155 for (; reg
< hi_reg
; reg
++)
4159 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4161 skip_past_char (&input_line_pointer
, '}');
4163 demand_empty_rest_of_line ();
4165 /* Generate any deferred opcodes because we're going to be looking at
4167 flush_pending_unwind ();
4169 for (i
= 0; i
< 16; i
++)
4171 if (mask
& (1 << i
))
4172 unwind
.frame_size
+= 8;
4175 /* Attempt to combine with a previous opcode. We do this because gcc
4176 likes to output separate unwind directives for a single block of
4178 if (unwind
.opcode_count
> 0)
4180 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4181 if ((i
& 0xf8) == 0xc0)
4184 /* Only merge if the blocks are contiguous. */
4187 if ((mask
& 0xfe00) == (1 << 9))
4189 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4190 unwind
.opcode_count
--;
4193 else if (i
== 6 && unwind
.opcode_count
>= 2)
4195 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4199 op
= 0xffff << (reg
- 1);
4201 && ((mask
& op
) == (1u << (reg
- 1))))
4203 op
= (1 << (reg
+ i
+ 1)) - 1;
4204 op
&= ~((1 << reg
) - 1);
4206 unwind
.opcode_count
-= 2;
4213 /* We want to generate opcodes in the order the registers have been
4214 saved, ie. descending order. */
4215 for (reg
= 15; reg
>= -1; reg
--)
4217 /* Save registers in blocks. */
4219 || !(mask
& (1 << reg
)))
4221 /* We found an unsaved reg. Generate opcodes to save the
4228 op
= 0xc0 | (hi_reg
- 10);
4229 add_unwind_opcode (op
, 1);
4234 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4235 add_unwind_opcode (op
, 2);
4244 ignore_rest_of_line ();
4248 s_arm_unwind_save_mmxwcg (void)
4255 if (*input_line_pointer
== '{')
4256 input_line_pointer
++;
4258 skip_whitespace (input_line_pointer
);
4262 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4266 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4272 as_tsktsk (_("register list not in ascending order"));
4275 if (*input_line_pointer
== '-')
4277 input_line_pointer
++;
4278 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4281 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4284 else if (reg
>= hi_reg
)
4286 as_bad (_("bad register range"));
4289 for (; reg
< hi_reg
; reg
++)
4293 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4295 skip_past_char (&input_line_pointer
, '}');
4297 demand_empty_rest_of_line ();
4299 /* Generate any deferred opcodes because we're going to be looking at
4301 flush_pending_unwind ();
4303 for (reg
= 0; reg
< 16; reg
++)
4305 if (mask
& (1 << reg
))
4306 unwind
.frame_size
+= 4;
4309 add_unwind_opcode (op
, 2);
4312 ignore_rest_of_line ();
4316 /* Parse an unwind_save directive.
4317 If the argument is non-zero, this is a .vsave directive. */
4320 s_arm_unwind_save (int arch_v6
)
4323 struct reg_entry
*reg
;
4324 bfd_boolean had_brace
= FALSE
;
4326 if (!unwind
.proc_start
)
4327 as_bad (MISSING_FNSTART
);
4329 /* Figure out what sort of save we have. */
4330 peek
= input_line_pointer
;
4338 reg
= arm_reg_parse_multi (&peek
);
4342 as_bad (_("register expected"));
4343 ignore_rest_of_line ();
4352 as_bad (_("FPA .unwind_save does not take a register list"));
4353 ignore_rest_of_line ();
4356 input_line_pointer
= peek
;
4357 s_arm_unwind_save_fpa (reg
->number
);
4361 s_arm_unwind_save_core ();
4366 s_arm_unwind_save_vfp_armv6 ();
4368 s_arm_unwind_save_vfp ();
4371 case REG_TYPE_MMXWR
:
4372 s_arm_unwind_save_mmxwr ();
4375 case REG_TYPE_MMXWCG
:
4376 s_arm_unwind_save_mmxwcg ();
4380 as_bad (_(".unwind_save does not support this kind of register"));
4381 ignore_rest_of_line ();
4386 /* Parse an unwind_movsp directive. */
4389 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4395 if (!unwind
.proc_start
)
4396 as_bad (MISSING_FNSTART
);
4398 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4401 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4402 ignore_rest_of_line ();
4406 /* Optional constant. */
4407 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4409 if (immediate_for_directive (&offset
) == FAIL
)
4415 demand_empty_rest_of_line ();
4417 if (reg
== REG_SP
|| reg
== REG_PC
)
4419 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4423 if (unwind
.fp_reg
!= REG_SP
)
4424 as_bad (_("unexpected .unwind_movsp directive"));
4426 /* Generate opcode to restore the value. */
4428 add_unwind_opcode (op
, 1);
4430 /* Record the information for later. */
4431 unwind
.fp_reg
= reg
;
4432 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4433 unwind
.sp_restored
= 1;
4436 /* Parse an unwind_pad directive. */
4439 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4443 if (!unwind
.proc_start
)
4444 as_bad (MISSING_FNSTART
);
4446 if (immediate_for_directive (&offset
) == FAIL
)
4451 as_bad (_("stack increment must be multiple of 4"));
4452 ignore_rest_of_line ();
4456 /* Don't generate any opcodes, just record the details for later. */
4457 unwind
.frame_size
+= offset
;
4458 unwind
.pending_offset
+= offset
;
4460 demand_empty_rest_of_line ();
4463 /* Parse an unwind_setfp directive. */
4466 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4472 if (!unwind
.proc_start
)
4473 as_bad (MISSING_FNSTART
);
4475 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4476 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4479 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4481 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4483 as_bad (_("expected <reg>, <reg>"));
4484 ignore_rest_of_line ();
4488 /* Optional constant. */
4489 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4491 if (immediate_for_directive (&offset
) == FAIL
)
4497 demand_empty_rest_of_line ();
4499 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4501 as_bad (_("register must be either sp or set by a previous"
4502 "unwind_movsp directive"));
4506 /* Don't generate any opcodes, just record the information for later. */
4507 unwind
.fp_reg
= fp_reg
;
4509 if (sp_reg
== REG_SP
)
4510 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4512 unwind
.fp_offset
-= offset
;
4515 /* Parse an unwind_raw directive. */
4518 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4521 /* This is an arbitrary limit. */
4522 unsigned char op
[16];
4525 if (!unwind
.proc_start
)
4526 as_bad (MISSING_FNSTART
);
4529 if (exp
.X_op
== O_constant
4530 && skip_past_comma (&input_line_pointer
) != FAIL
)
4532 unwind
.frame_size
+= exp
.X_add_number
;
4536 exp
.X_op
= O_illegal
;
4538 if (exp
.X_op
!= O_constant
)
4540 as_bad (_("expected <offset>, <opcode>"));
4541 ignore_rest_of_line ();
4547 /* Parse the opcode. */
4552 as_bad (_("unwind opcode too long"));
4553 ignore_rest_of_line ();
4555 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4557 as_bad (_("invalid unwind opcode"));
4558 ignore_rest_of_line ();
4561 op
[count
++] = exp
.X_add_number
;
4563 /* Parse the next byte. */
4564 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4570 /* Add the opcode bytes in reverse order. */
4572 add_unwind_opcode (op
[count
], 1);
4574 demand_empty_rest_of_line ();
4578 /* Parse a .eabi_attribute directive. */
4581 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4583 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4585 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4586 attributes_set_explicitly
[tag
] = 1;
4589 /* Emit a tls fix for the symbol. */
4592 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4596 #ifdef md_flush_pending_output
4597 md_flush_pending_output ();
4600 #ifdef md_cons_align
4604 /* Since we're just labelling the code, there's no need to define a
4607 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4608 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4609 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4610 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4612 #endif /* OBJ_ELF */
4614 static void s_arm_arch (int);
4615 static void s_arm_object_arch (int);
4616 static void s_arm_cpu (int);
4617 static void s_arm_fpu (int);
4618 static void s_arm_arch_extension (int);
4623 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4630 if (exp
.X_op
== O_symbol
)
4631 exp
.X_op
= O_secrel
;
4633 emit_expr (&exp
, 4);
4635 while (*input_line_pointer
++ == ',');
4637 input_line_pointer
--;
4638 demand_empty_rest_of_line ();
4642 /* This table describes all the machine specific pseudo-ops the assembler
4643 has to support. The fields are:
4644 pseudo-op name without dot
4645 function to call to execute this pseudo-op
4646 Integer arg to pass to the function. */
4648 const pseudo_typeS md_pseudo_table
[] =
4650 /* Never called because '.req' does not start a line. */
4651 { "req", s_req
, 0 },
4652 /* Following two are likewise never called. */
4655 { "unreq", s_unreq
, 0 },
4656 { "bss", s_bss
, 0 },
4657 { "align", s_align_ptwo
, 2 },
4658 { "arm", s_arm
, 0 },
4659 { "thumb", s_thumb
, 0 },
4660 { "code", s_code
, 0 },
4661 { "force_thumb", s_force_thumb
, 0 },
4662 { "thumb_func", s_thumb_func
, 0 },
4663 { "thumb_set", s_thumb_set
, 0 },
4664 { "even", s_even
, 0 },
4665 { "ltorg", s_ltorg
, 0 },
4666 { "pool", s_ltorg
, 0 },
4667 { "syntax", s_syntax
, 0 },
4668 { "cpu", s_arm_cpu
, 0 },
4669 { "arch", s_arm_arch
, 0 },
4670 { "object_arch", s_arm_object_arch
, 0 },
4671 { "fpu", s_arm_fpu
, 0 },
4672 { "arch_extension", s_arm_arch_extension
, 0 },
4674 { "word", s_arm_elf_cons
, 4 },
4675 { "long", s_arm_elf_cons
, 4 },
4676 { "inst.n", s_arm_elf_inst
, 2 },
4677 { "inst.w", s_arm_elf_inst
, 4 },
4678 { "inst", s_arm_elf_inst
, 0 },
4679 { "rel31", s_arm_rel31
, 0 },
4680 { "fnstart", s_arm_unwind_fnstart
, 0 },
4681 { "fnend", s_arm_unwind_fnend
, 0 },
4682 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4683 { "personality", s_arm_unwind_personality
, 0 },
4684 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4685 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4686 { "save", s_arm_unwind_save
, 0 },
4687 { "vsave", s_arm_unwind_save
, 1 },
4688 { "movsp", s_arm_unwind_movsp
, 0 },
4689 { "pad", s_arm_unwind_pad
, 0 },
4690 { "setfp", s_arm_unwind_setfp
, 0 },
4691 { "unwind_raw", s_arm_unwind_raw
, 0 },
4692 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4693 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4697 /* These are used for dwarf. */
4701 /* These are used for dwarf2. */
4702 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4703 { "loc", dwarf2_directive_loc
, 0 },
4704 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4706 { "extend", float_cons
, 'x' },
4707 { "ldouble", float_cons
, 'x' },
4708 { "packed", float_cons
, 'p' },
4710 {"secrel32", pe_directive_secrel
, 0},
4713 /* These are for compatibility with CodeComposer Studio. */
4714 {"ref", s_ccs_ref
, 0},
4715 {"def", s_ccs_def
, 0},
4716 {"asmfunc", s_ccs_asmfunc
, 0},
4717 {"endasmfunc", s_ccs_endasmfunc
, 0},
4722 /* Parser functions used exclusively in instruction operands. */
4724 /* Generic immediate-value read function for use in insn parsing.
4725 STR points to the beginning of the immediate (the leading #);
4726 VAL receives the value; if the value is outside [MIN, MAX]
4727 issue an error. PREFIX_OPT is true if the immediate prefix is
4731 parse_immediate (char **str
, int *val
, int min
, int max
,
4732 bfd_boolean prefix_opt
)
4735 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4736 if (exp
.X_op
!= O_constant
)
4738 inst
.error
= _("constant expression required");
4742 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4744 inst
.error
= _("immediate value out of range");
4748 *val
= exp
.X_add_number
;
4752 /* Less-generic immediate-value read function with the possibility of loading a
4753 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4754 instructions. Puts the result directly in inst.operands[i]. */
4757 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4758 bfd_boolean allow_symbol_p
)
4761 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4764 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4766 if (exp_p
->X_op
== O_constant
)
4768 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4769 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4770 O_constant. We have to be careful not to break compilation for
4771 32-bit X_add_number, though. */
4772 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4774 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4775 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4777 inst
.operands
[i
].regisimm
= 1;
4780 else if (exp_p
->X_op
== O_big
4781 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4783 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4785 /* Bignums have their least significant bits in
4786 generic_bignum[0]. Make sure we put 32 bits in imm and
4787 32 bits in reg, in a (hopefully) portable way. */
4788 gas_assert (parts
!= 0);
4790 /* Make sure that the number is not too big.
4791 PR 11972: Bignums can now be sign-extended to the
4792 size of a .octa so check that the out of range bits
4793 are all zero or all one. */
4794 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4796 LITTLENUM_TYPE m
= -1;
4798 if (generic_bignum
[parts
* 2] != 0
4799 && generic_bignum
[parts
* 2] != m
)
4802 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4803 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4807 inst
.operands
[i
].imm
= 0;
4808 for (j
= 0; j
< parts
; j
++, idx
++)
4809 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4810 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4811 inst
.operands
[i
].reg
= 0;
4812 for (j
= 0; j
< parts
; j
++, idx
++)
4813 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4814 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4815 inst
.operands
[i
].regisimm
= 1;
4817 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4825 /* Returns the pseudo-register number of an FPA immediate constant,
4826 or FAIL if there isn't a valid constant here. */
4829 parse_fpa_immediate (char ** str
)
4831 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4837 /* First try and match exact strings, this is to guarantee
4838 that some formats will work even for cross assembly. */
4840 for (i
= 0; fp_const
[i
]; i
++)
4842 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4846 *str
+= strlen (fp_const
[i
]);
4847 if (is_end_of_line
[(unsigned char) **str
])
4853 /* Just because we didn't get a match doesn't mean that the constant
4854 isn't valid, just that it is in a format that we don't
4855 automatically recognize. Try parsing it with the standard
4856 expression routines. */
4858 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4860 /* Look for a raw floating point number. */
4861 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4862 && is_end_of_line
[(unsigned char) *save_in
])
4864 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4866 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4868 if (words
[j
] != fp_values
[i
][j
])
4872 if (j
== MAX_LITTLENUMS
)
4880 /* Try and parse a more complex expression, this will probably fail
4881 unless the code uses a floating point prefix (eg "0f"). */
4882 save_in
= input_line_pointer
;
4883 input_line_pointer
= *str
;
4884 if (expression (&exp
) == absolute_section
4885 && exp
.X_op
== O_big
4886 && exp
.X_add_number
< 0)
4888 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4890 #define X_PRECISION 5
4891 #define E_PRECISION 15L
4892 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4894 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4896 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4898 if (words
[j
] != fp_values
[i
][j
])
4902 if (j
== MAX_LITTLENUMS
)
4904 *str
= input_line_pointer
;
4905 input_line_pointer
= save_in
;
4912 *str
= input_line_pointer
;
4913 input_line_pointer
= save_in
;
4914 inst
.error
= _("invalid FPA immediate expression");
4918 /* Returns 1 if a number has "quarter-precision" float format
4919 0baBbbbbbc defgh000 00000000 00000000. */
4922 is_quarter_float (unsigned imm
)
4924 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4925 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4929 /* Detect the presence of a floating point or integer zero constant,
4933 parse_ifimm_zero (char **in
)
4937 if (!is_immediate_prefix (**in
))
4942 /* Accept #0x0 as a synonym for #0. */
4943 if (strncmp (*in
, "0x", 2) == 0)
4946 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4951 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4952 &generic_floating_point_number
);
4955 && generic_floating_point_number
.sign
== '+'
4956 && (generic_floating_point_number
.low
4957 > generic_floating_point_number
.leader
))
4963 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4964 0baBbbbbbc defgh000 00000000 00000000.
4965 The zero and minus-zero cases need special handling, since they can't be
4966 encoded in the "quarter-precision" float format, but can nonetheless be
4967 loaded as integer constants. */
4970 parse_qfloat_immediate (char **ccp
, int *immed
)
4974 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4975 int found_fpchar
= 0;
4977 skip_past_char (&str
, '#');
4979 /* We must not accidentally parse an integer as a floating-point number. Make
4980 sure that the value we parse is not an integer by checking for special
4981 characters '.' or 'e'.
4982 FIXME: This is a horrible hack, but doing better is tricky because type
4983 information isn't in a very usable state at parse time. */
4985 skip_whitespace (fpnum
);
4987 if (strncmp (fpnum
, "0x", 2) == 0)
4991 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4992 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5002 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5004 unsigned fpword
= 0;
5007 /* Our FP word must be 32 bits (single-precision FP). */
5008 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5010 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5014 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5027 /* Shift operands. */
5030 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5033 struct asm_shift_name
5036 enum shift_kind kind
;
5039 /* Third argument to parse_shift. */
5040 enum parse_shift_mode
5042 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5043 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5044 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5045 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5046 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5049 /* Parse a <shift> specifier on an ARM data processing instruction.
5050 This has three forms:
5052 (LSL|LSR|ASL|ASR|ROR) Rs
5053 (LSL|LSR|ASL|ASR|ROR) #imm
5056 Note that ASL is assimilated to LSL in the instruction encoding, and
5057 RRX to ROR #0 (which cannot be written as such). */
5060 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5062 const struct asm_shift_name
*shift_name
;
5063 enum shift_kind shift
;
5068 for (p
= *str
; ISALPHA (*p
); p
++)
5073 inst
.error
= _("shift expression expected");
5077 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5080 if (shift_name
== NULL
)
5082 inst
.error
= _("shift expression expected");
5086 shift
= shift_name
->kind
;
5090 case NO_SHIFT_RESTRICT
:
5091 case SHIFT_IMMEDIATE
: break;
5093 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5094 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5096 inst
.error
= _("'LSL' or 'ASR' required");
5101 case SHIFT_LSL_IMMEDIATE
:
5102 if (shift
!= SHIFT_LSL
)
5104 inst
.error
= _("'LSL' required");
5109 case SHIFT_ASR_IMMEDIATE
:
5110 if (shift
!= SHIFT_ASR
)
5112 inst
.error
= _("'ASR' required");
5120 if (shift
!= SHIFT_RRX
)
5122 /* Whitespace can appear here if the next thing is a bare digit. */
5123 skip_whitespace (p
);
5125 if (mode
== NO_SHIFT_RESTRICT
5126 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5128 inst
.operands
[i
].imm
= reg
;
5129 inst
.operands
[i
].immisreg
= 1;
5131 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5134 inst
.operands
[i
].shift_kind
= shift
;
5135 inst
.operands
[i
].shifted
= 1;
5140 /* Parse a <shifter_operand> for an ARM data processing instruction:
5143 #<immediate>, <rotate>
5147 where <shift> is defined by parse_shift above, and <rotate> is a
5148 multiple of 2 between 0 and 30. Validation of immediate operands
5149 is deferred to md_apply_fix. */
5152 parse_shifter_operand (char **str
, int i
)
5157 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5159 inst
.operands
[i
].reg
= value
;
5160 inst
.operands
[i
].isreg
= 1;
5162 /* parse_shift will override this if appropriate */
5163 inst
.reloc
.exp
.X_op
= O_constant
;
5164 inst
.reloc
.exp
.X_add_number
= 0;
5166 if (skip_past_comma (str
) == FAIL
)
5169 /* Shift operation on register. */
5170 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5173 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5176 if (skip_past_comma (str
) == SUCCESS
)
5178 /* #x, y -- ie explicit rotation by Y. */
5179 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5182 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5184 inst
.error
= _("constant expression expected");
5188 value
= exp
.X_add_number
;
5189 if (value
< 0 || value
> 30 || value
% 2 != 0)
5191 inst
.error
= _("invalid rotation");
5194 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5196 inst
.error
= _("invalid constant");
5200 /* Encode as specified. */
5201 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5205 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5206 inst
.reloc
.pc_rel
= 0;
5210 /* Group relocation information. Each entry in the table contains the
5211 textual name of the relocation as may appear in assembler source
5212 and must end with a colon.
5213 Along with this textual name are the relocation codes to be used if
5214 the corresponding instruction is an ALU instruction (ADD or SUB only),
5215 an LDR, an LDRS, or an LDC. */
5217 struct group_reloc_table_entry
5228 /* Varieties of non-ALU group relocation. */
5235 static struct group_reloc_table_entry group_reloc_table
[] =
5236 { /* Program counter relative: */
5238 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5243 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5244 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5245 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5246 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5248 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5253 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5254 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5255 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5256 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5258 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5259 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5260 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5261 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5262 /* Section base relative */
5264 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5269 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5270 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5271 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5272 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5274 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5279 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5280 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5281 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5282 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5284 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5285 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5286 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5287 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5288 /* Absolute thumb alu relocations. */
5290 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5295 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5300 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5305 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5310 /* Given the address of a pointer pointing to the textual name of a group
5311 relocation as may appear in assembler source, attempt to find its details
5312 in group_reloc_table. The pointer will be updated to the character after
5313 the trailing colon. On failure, FAIL will be returned; SUCCESS
5314 otherwise. On success, *entry will be updated to point at the relevant
5315 group_reloc_table entry. */
5318 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5321 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5323 int length
= strlen (group_reloc_table
[i
].name
);
5325 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5326 && (*str
)[length
] == ':')
5328 *out
= &group_reloc_table
[i
];
5329 *str
+= (length
+ 1);
5337 /* Parse a <shifter_operand> for an ARM data processing instruction
5338 (as for parse_shifter_operand) where group relocations are allowed:
5341 #<immediate>, <rotate>
5342 #:<group_reloc>:<expression>
5346 where <group_reloc> is one of the strings defined in group_reloc_table.
5347 The hashes are optional.
5349 Everything else is as for parse_shifter_operand. */
5351 static parse_operand_result
5352 parse_shifter_operand_group_reloc (char **str
, int i
)
5354 /* Determine if we have the sequence of characters #: or just :
5355 coming next. If we do, then we check for a group relocation.
5356 If we don't, punt the whole lot to parse_shifter_operand. */
5358 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5359 || (*str
)[0] == ':')
5361 struct group_reloc_table_entry
*entry
;
5363 if ((*str
)[0] == '#')
5368 /* Try to parse a group relocation. Anything else is an error. */
5369 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5371 inst
.error
= _("unknown group relocation");
5372 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5375 /* We now have the group relocation table entry corresponding to
5376 the name in the assembler source. Next, we parse the expression. */
5377 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5378 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5380 /* Record the relocation type (always the ALU variant here). */
5381 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5382 gas_assert (inst
.reloc
.type
!= 0);
5384 return PARSE_OPERAND_SUCCESS
;
5387 return parse_shifter_operand (str
, i
) == SUCCESS
5388 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5390 /* Never reached. */
5393 /* Parse a Neon alignment expression. Information is written to
5394 inst.operands[i]. We assume the initial ':' has been skipped.
5396 align .imm = align << 8, .immisalign=1, .preind=0 */
5397 static parse_operand_result
5398 parse_neon_alignment (char **str
, int i
)
5403 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5405 if (exp
.X_op
!= O_constant
)
5407 inst
.error
= _("alignment must be constant");
5408 return PARSE_OPERAND_FAIL
;
5411 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5412 inst
.operands
[i
].immisalign
= 1;
5413 /* Alignments are not pre-indexes. */
5414 inst
.operands
[i
].preind
= 0;
5417 return PARSE_OPERAND_SUCCESS
;
5420 /* Parse all forms of an ARM address expression. Information is written
5421 to inst.operands[i] and/or inst.reloc.
5423 Preindexed addressing (.preind=1):
5425 [Rn, #offset] .reg=Rn .reloc.exp=offset
5426 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5427 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5428 .shift_kind=shift .reloc.exp=shift_imm
5430 These three may have a trailing ! which causes .writeback to be set also.
5432 Postindexed addressing (.postind=1, .writeback=1):
5434 [Rn], #offset .reg=Rn .reloc.exp=offset
5435 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5436 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5437 .shift_kind=shift .reloc.exp=shift_imm
5439 Unindexed addressing (.preind=0, .postind=0):
5441 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5445 [Rn]{!} shorthand for [Rn,#0]{!}
5446 =immediate .isreg=0 .reloc.exp=immediate
5447 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5449 It is the caller's responsibility to check for addressing modes not
5450 supported by the instruction, and to set inst.reloc.type. */
5452 static parse_operand_result
5453 parse_address_main (char **str
, int i
, int group_relocations
,
5454 group_reloc_type group_type
)
5459 if (skip_past_char (&p
, '[') == FAIL
)
5461 if (skip_past_char (&p
, '=') == FAIL
)
5463 /* Bare address - translate to PC-relative offset. */
5464 inst
.reloc
.pc_rel
= 1;
5465 inst
.operands
[i
].reg
= REG_PC
;
5466 inst
.operands
[i
].isreg
= 1;
5467 inst
.operands
[i
].preind
= 1;
5469 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5470 return PARSE_OPERAND_FAIL
;
5472 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5473 /*allow_symbol_p=*/TRUE
))
5474 return PARSE_OPERAND_FAIL
;
5477 return PARSE_OPERAND_SUCCESS
;
5480 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5481 skip_whitespace (p
);
5483 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5485 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5486 return PARSE_OPERAND_FAIL
;
5488 inst
.operands
[i
].reg
= reg
;
5489 inst
.operands
[i
].isreg
= 1;
5491 if (skip_past_comma (&p
) == SUCCESS
)
5493 inst
.operands
[i
].preind
= 1;
5496 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5498 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5500 inst
.operands
[i
].imm
= reg
;
5501 inst
.operands
[i
].immisreg
= 1;
5503 if (skip_past_comma (&p
) == SUCCESS
)
5504 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5505 return PARSE_OPERAND_FAIL
;
5507 else if (skip_past_char (&p
, ':') == SUCCESS
)
5509 /* FIXME: '@' should be used here, but it's filtered out by generic
5510 code before we get to see it here. This may be subject to
5512 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5514 if (result
!= PARSE_OPERAND_SUCCESS
)
5519 if (inst
.operands
[i
].negative
)
5521 inst
.operands
[i
].negative
= 0;
5525 if (group_relocations
5526 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5528 struct group_reloc_table_entry
*entry
;
5530 /* Skip over the #: or : sequence. */
5536 /* Try to parse a group relocation. Anything else is an
5538 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5540 inst
.error
= _("unknown group relocation");
5541 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5544 /* We now have the group relocation table entry corresponding to
5545 the name in the assembler source. Next, we parse the
5547 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5548 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5550 /* Record the relocation type. */
5554 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5558 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5562 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5569 if (inst
.reloc
.type
== 0)
5571 inst
.error
= _("this group relocation is not allowed on this instruction");
5572 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5578 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5579 return PARSE_OPERAND_FAIL
;
5580 /* If the offset is 0, find out if it's a +0 or -0. */
5581 if (inst
.reloc
.exp
.X_op
== O_constant
5582 && inst
.reloc
.exp
.X_add_number
== 0)
5584 skip_whitespace (q
);
5588 skip_whitespace (q
);
5591 inst
.operands
[i
].negative
= 1;
5596 else if (skip_past_char (&p
, ':') == SUCCESS
)
5598 /* FIXME: '@' should be used here, but it's filtered out by generic code
5599 before we get to see it here. This may be subject to change. */
5600 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5602 if (result
!= PARSE_OPERAND_SUCCESS
)
5606 if (skip_past_char (&p
, ']') == FAIL
)
5608 inst
.error
= _("']' expected");
5609 return PARSE_OPERAND_FAIL
;
5612 if (skip_past_char (&p
, '!') == SUCCESS
)
5613 inst
.operands
[i
].writeback
= 1;
5615 else if (skip_past_comma (&p
) == SUCCESS
)
5617 if (skip_past_char (&p
, '{') == SUCCESS
)
5619 /* [Rn], {expr} - unindexed, with option */
5620 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5621 0, 255, TRUE
) == FAIL
)
5622 return PARSE_OPERAND_FAIL
;
5624 if (skip_past_char (&p
, '}') == FAIL
)
5626 inst
.error
= _("'}' expected at end of 'option' field");
5627 return PARSE_OPERAND_FAIL
;
5629 if (inst
.operands
[i
].preind
)
5631 inst
.error
= _("cannot combine index with option");
5632 return PARSE_OPERAND_FAIL
;
5635 return PARSE_OPERAND_SUCCESS
;
5639 inst
.operands
[i
].postind
= 1;
5640 inst
.operands
[i
].writeback
= 1;
5642 if (inst
.operands
[i
].preind
)
5644 inst
.error
= _("cannot combine pre- and post-indexing");
5645 return PARSE_OPERAND_FAIL
;
5649 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5651 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5653 /* We might be using the immediate for alignment already. If we
5654 are, OR the register number into the low-order bits. */
5655 if (inst
.operands
[i
].immisalign
)
5656 inst
.operands
[i
].imm
|= reg
;
5658 inst
.operands
[i
].imm
= reg
;
5659 inst
.operands
[i
].immisreg
= 1;
5661 if (skip_past_comma (&p
) == SUCCESS
)
5662 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5663 return PARSE_OPERAND_FAIL
;
5668 if (inst
.operands
[i
].negative
)
5670 inst
.operands
[i
].negative
= 0;
5673 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5674 return PARSE_OPERAND_FAIL
;
5675 /* If the offset is 0, find out if it's a +0 or -0. */
5676 if (inst
.reloc
.exp
.X_op
== O_constant
5677 && inst
.reloc
.exp
.X_add_number
== 0)
5679 skip_whitespace (q
);
5683 skip_whitespace (q
);
5686 inst
.operands
[i
].negative
= 1;
5692 /* If at this point neither .preind nor .postind is set, we have a
5693 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5694 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5696 inst
.operands
[i
].preind
= 1;
5697 inst
.reloc
.exp
.X_op
= O_constant
;
5698 inst
.reloc
.exp
.X_add_number
= 0;
5701 return PARSE_OPERAND_SUCCESS
;
5705 parse_address (char **str
, int i
)
5707 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5711 static parse_operand_result
5712 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5714 return parse_address_main (str
, i
, 1, type
);
5717 /* Parse an operand for a MOVW or MOVT instruction. */
5719 parse_half (char **str
)
5724 skip_past_char (&p
, '#');
5725 if (strncasecmp (p
, ":lower16:", 9) == 0)
5726 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5727 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5728 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5730 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5733 skip_whitespace (p
);
5736 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5739 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5741 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5743 inst
.error
= _("constant expression expected");
5746 if (inst
.reloc
.exp
.X_add_number
< 0
5747 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5749 inst
.error
= _("immediate value out of range");
5757 /* Miscellaneous. */
5759 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5760 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5762 parse_psr (char **str
, bfd_boolean lhs
)
5765 unsigned long psr_field
;
5766 const struct asm_psr
*psr
;
5768 bfd_boolean is_apsr
= FALSE
;
5769 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5771 /* PR gas/12698: If the user has specified -march=all then m_profile will
5772 be TRUE, but we want to ignore it in this case as we are building for any
5773 CPU type, including non-m variants. */
5774 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5777 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5778 feature for ease of use and backwards compatibility. */
5780 if (strncasecmp (p
, "SPSR", 4) == 0)
5783 goto unsupported_psr
;
5785 psr_field
= SPSR_BIT
;
5787 else if (strncasecmp (p
, "CPSR", 4) == 0)
5790 goto unsupported_psr
;
5794 else if (strncasecmp (p
, "APSR", 4) == 0)
5796 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5797 and ARMv7-R architecture CPUs. */
5806 while (ISALNUM (*p
) || *p
== '_');
5808 if (strncasecmp (start
, "iapsr", 5) == 0
5809 || strncasecmp (start
, "eapsr", 5) == 0
5810 || strncasecmp (start
, "xpsr", 4) == 0
5811 || strncasecmp (start
, "psr", 3) == 0)
5812 p
= start
+ strcspn (start
, "rR") + 1;
5814 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5820 /* If APSR is being written, a bitfield may be specified. Note that
5821 APSR itself is handled above. */
5822 if (psr
->field
<= 3)
5824 psr_field
= psr
->field
;
5830 /* M-profile MSR instructions have the mask field set to "10", except
5831 *PSR variants which modify APSR, which may use a different mask (and
5832 have been handled already). Do that by setting the PSR_f field
5834 return psr
->field
| (lhs
? PSR_f
: 0);
5837 goto unsupported_psr
;
5843 /* A suffix follows. */
5849 while (ISALNUM (*p
) || *p
== '_');
5853 /* APSR uses a notation for bits, rather than fields. */
5854 unsigned int nzcvq_bits
= 0;
5855 unsigned int g_bit
= 0;
5858 for (bit
= start
; bit
!= p
; bit
++)
5860 switch (TOLOWER (*bit
))
5863 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5867 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5871 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5875 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5879 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5883 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5887 inst
.error
= _("unexpected bit specified after APSR");
5892 if (nzcvq_bits
== 0x1f)
5897 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5899 inst
.error
= _("selected processor does not "
5900 "support DSP extension");
5907 if ((nzcvq_bits
& 0x20) != 0
5908 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5909 || (g_bit
& 0x2) != 0)
5911 inst
.error
= _("bad bitmask specified after APSR");
5917 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5922 psr_field
|= psr
->field
;
5928 goto error
; /* Garbage after "[CS]PSR". */
5930 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5931 is deprecated, but allow it anyway. */
5935 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5938 else if (!m_profile
)
5939 /* These bits are never right for M-profile devices: don't set them
5940 (only code paths which read/write APSR reach here). */
5941 psr_field
|= (PSR_c
| PSR_f
);
5947 inst
.error
= _("selected processor does not support requested special "
5948 "purpose register");
5952 inst
.error
= _("flag for {c}psr instruction expected");
5956 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5957 value suitable for splatting into the AIF field of the instruction. */
5960 parse_cps_flags (char **str
)
5969 case '\0': case ',':
5972 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5973 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5974 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5977 inst
.error
= _("unrecognized CPS flag");
5982 if (saw_a_flag
== 0)
5984 inst
.error
= _("missing CPS flags");
5992 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5993 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5996 parse_endian_specifier (char **str
)
6001 if (strncasecmp (s
, "BE", 2))
6003 else if (strncasecmp (s
, "LE", 2))
6007 inst
.error
= _("valid endian specifiers are be or le");
6011 if (ISALNUM (s
[2]) || s
[2] == '_')
6013 inst
.error
= _("valid endian specifiers are be or le");
6018 return little_endian
;
6021 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6022 value suitable for poking into the rotate field of an sxt or sxta
6023 instruction, or FAIL on error. */
6026 parse_ror (char **str
)
6031 if (strncasecmp (s
, "ROR", 3) == 0)
6035 inst
.error
= _("missing rotation field after comma");
6039 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6044 case 0: *str
= s
; return 0x0;
6045 case 8: *str
= s
; return 0x1;
6046 case 16: *str
= s
; return 0x2;
6047 case 24: *str
= s
; return 0x3;
6050 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6055 /* Parse a conditional code (from conds[] below). The value returned is in the
6056 range 0 .. 14, or FAIL. */
6058 parse_cond (char **str
)
6061 const struct asm_cond
*c
;
6063 /* Condition codes are always 2 characters, so matching up to
6064 3 characters is sufficient. */
6069 while (ISALPHA (*q
) && n
< 3)
6071 cond
[n
] = TOLOWER (*q
);
6076 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6079 inst
.error
= _("condition required");
6087 /* If the given feature available in the selected CPU, mark it as used.
6088 Returns TRUE iff feature is available. */
6090 mark_feature_used (const arm_feature_set
*feature
)
6092 /* Ensure the option is valid on the current architecture. */
6093 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6096 /* Add the appropriate architecture feature for the barrier option used.
6099 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6101 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6106 /* Parse an option for a barrier instruction. Returns the encoding for the
6109 parse_barrier (char **str
)
6112 const struct asm_barrier_opt
*o
;
6115 while (ISALPHA (*q
))
6118 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6123 if (!mark_feature_used (&o
->arch
))
6130 /* Parse the operands of a table branch instruction. Similar to a memory
6133 parse_tb (char **str
)
6138 if (skip_past_char (&p
, '[') == FAIL
)
6140 inst
.error
= _("'[' expected");
6144 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6146 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6149 inst
.operands
[0].reg
= reg
;
6151 if (skip_past_comma (&p
) == FAIL
)
6153 inst
.error
= _("',' expected");
6157 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6159 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6162 inst
.operands
[0].imm
= reg
;
6164 if (skip_past_comma (&p
) == SUCCESS
)
6166 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6168 if (inst
.reloc
.exp
.X_add_number
!= 1)
6170 inst
.error
= _("invalid shift");
6173 inst
.operands
[0].shifted
= 1;
6176 if (skip_past_char (&p
, ']') == FAIL
)
6178 inst
.error
= _("']' expected");
6185 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6186 information on the types the operands can take and how they are encoded.
6187 Up to four operands may be read; this function handles setting the
6188 ".present" field for each read operand itself.
6189 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6190 else returns FAIL. */
6193 parse_neon_mov (char **str
, int *which_operand
)
6195 int i
= *which_operand
, val
;
6196 enum arm_reg_type rtype
;
6198 struct neon_type_el optype
;
6200 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6202 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6203 inst
.operands
[i
].reg
= val
;
6204 inst
.operands
[i
].isscalar
= 1;
6205 inst
.operands
[i
].vectype
= optype
;
6206 inst
.operands
[i
++].present
= 1;
6208 if (skip_past_comma (&ptr
) == FAIL
)
6211 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6214 inst
.operands
[i
].reg
= val
;
6215 inst
.operands
[i
].isreg
= 1;
6216 inst
.operands
[i
].present
= 1;
6218 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6221 /* Cases 0, 1, 2, 3, 5 (D only). */
6222 if (skip_past_comma (&ptr
) == FAIL
)
6225 inst
.operands
[i
].reg
= val
;
6226 inst
.operands
[i
].isreg
= 1;
6227 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6228 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6229 inst
.operands
[i
].isvec
= 1;
6230 inst
.operands
[i
].vectype
= optype
;
6231 inst
.operands
[i
++].present
= 1;
6233 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6235 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6236 Case 13: VMOV <Sd>, <Rm> */
6237 inst
.operands
[i
].reg
= val
;
6238 inst
.operands
[i
].isreg
= 1;
6239 inst
.operands
[i
].present
= 1;
6241 if (rtype
== REG_TYPE_NQ
)
6243 first_error (_("can't use Neon quad register here"));
6246 else if (rtype
!= REG_TYPE_VFS
)
6249 if (skip_past_comma (&ptr
) == FAIL
)
6251 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6253 inst
.operands
[i
].reg
= val
;
6254 inst
.operands
[i
].isreg
= 1;
6255 inst
.operands
[i
].present
= 1;
6258 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6261 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6262 Case 1: VMOV<c><q> <Dd>, <Dm>
6263 Case 8: VMOV.F32 <Sd>, <Sm>
6264 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6266 inst
.operands
[i
].reg
= val
;
6267 inst
.operands
[i
].isreg
= 1;
6268 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6269 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6270 inst
.operands
[i
].isvec
= 1;
6271 inst
.operands
[i
].vectype
= optype
;
6272 inst
.operands
[i
].present
= 1;
6274 if (skip_past_comma (&ptr
) == SUCCESS
)
6279 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6282 inst
.operands
[i
].reg
= val
;
6283 inst
.operands
[i
].isreg
= 1;
6284 inst
.operands
[i
++].present
= 1;
6286 if (skip_past_comma (&ptr
) == FAIL
)
6289 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6292 inst
.operands
[i
].reg
= val
;
6293 inst
.operands
[i
].isreg
= 1;
6294 inst
.operands
[i
].present
= 1;
6297 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6298 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6299 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6300 Case 10: VMOV.F32 <Sd>, #<imm>
6301 Case 11: VMOV.F64 <Dd>, #<imm> */
6302 inst
.operands
[i
].immisfloat
= 1;
6303 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6305 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6306 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6310 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6314 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6317 inst
.operands
[i
].reg
= val
;
6318 inst
.operands
[i
].isreg
= 1;
6319 inst
.operands
[i
++].present
= 1;
6321 if (skip_past_comma (&ptr
) == FAIL
)
6324 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6326 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6327 inst
.operands
[i
].reg
= val
;
6328 inst
.operands
[i
].isscalar
= 1;
6329 inst
.operands
[i
].present
= 1;
6330 inst
.operands
[i
].vectype
= optype
;
6332 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6334 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6335 inst
.operands
[i
].reg
= val
;
6336 inst
.operands
[i
].isreg
= 1;
6337 inst
.operands
[i
++].present
= 1;
6339 if (skip_past_comma (&ptr
) == FAIL
)
6342 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6345 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6349 inst
.operands
[i
].reg
= val
;
6350 inst
.operands
[i
].isreg
= 1;
6351 inst
.operands
[i
].isvec
= 1;
6352 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6353 inst
.operands
[i
].vectype
= optype
;
6354 inst
.operands
[i
].present
= 1;
6356 if (rtype
== REG_TYPE_VFS
)
6360 if (skip_past_comma (&ptr
) == FAIL
)
6362 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6365 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6368 inst
.operands
[i
].reg
= val
;
6369 inst
.operands
[i
].isreg
= 1;
6370 inst
.operands
[i
].isvec
= 1;
6371 inst
.operands
[i
].issingle
= 1;
6372 inst
.operands
[i
].vectype
= optype
;
6373 inst
.operands
[i
].present
= 1;
6376 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6380 inst
.operands
[i
].reg
= val
;
6381 inst
.operands
[i
].isreg
= 1;
6382 inst
.operands
[i
].isvec
= 1;
6383 inst
.operands
[i
].issingle
= 1;
6384 inst
.operands
[i
].vectype
= optype
;
6385 inst
.operands
[i
].present
= 1;
6390 first_error (_("parse error"));
6394 /* Successfully parsed the operands. Update args. */
6400 first_error (_("expected comma"));
6404 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6408 /* Use this macro when the operand constraints are different
6409 for ARM and THUMB (e.g. ldrd). */
6410 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6411 ((arm_operand) | ((thumb_operand) << 16))
6413 /* Matcher codes for parse_operands. */
6414 enum operand_parse_code
6416 OP_stop
, /* end of line */
6418 OP_RR
, /* ARM register */
6419 OP_RRnpc
, /* ARM register, not r15 */
6420 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6421 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6422 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6423 optional trailing ! */
6424 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6425 OP_RCP
, /* Coprocessor number */
6426 OP_RCN
, /* Coprocessor register */
6427 OP_RF
, /* FPA register */
6428 OP_RVS
, /* VFP single precision register */
6429 OP_RVD
, /* VFP double precision register (0..15) */
6430 OP_RND
, /* Neon double precision register (0..31) */
6431 OP_RNQ
, /* Neon quad precision register */
6432 OP_RVSD
, /* VFP single or double precision register */
6433 OP_RNDQ
, /* Neon double or quad precision register */
6434 OP_RNSDQ
, /* Neon single, double or quad precision register */
6435 OP_RNSC
, /* Neon scalar D[X] */
6436 OP_RVC
, /* VFP control register */
6437 OP_RMF
, /* Maverick F register */
6438 OP_RMD
, /* Maverick D register */
6439 OP_RMFX
, /* Maverick FX register */
6440 OP_RMDX
, /* Maverick DX register */
6441 OP_RMAX
, /* Maverick AX register */
6442 OP_RMDS
, /* Maverick DSPSC register */
6443 OP_RIWR
, /* iWMMXt wR register */
6444 OP_RIWC
, /* iWMMXt wC register */
6445 OP_RIWG
, /* iWMMXt wCG register */
6446 OP_RXA
, /* XScale accumulator register */
6448 OP_REGLST
, /* ARM register list */
6449 OP_VRSLST
, /* VFP single-precision register list */
6450 OP_VRDLST
, /* VFP double-precision register list */
6451 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6452 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6453 OP_NSTRLST
, /* Neon element/structure list */
6455 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6456 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6457 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6458 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6459 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6460 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6461 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6462 OP_VMOV
, /* Neon VMOV operands. */
6463 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6464 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6465 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6467 OP_I0
, /* immediate zero */
6468 OP_I7
, /* immediate value 0 .. 7 */
6469 OP_I15
, /* 0 .. 15 */
6470 OP_I16
, /* 1 .. 16 */
6471 OP_I16z
, /* 0 .. 16 */
6472 OP_I31
, /* 0 .. 31 */
6473 OP_I31w
, /* 0 .. 31, optional trailing ! */
6474 OP_I32
, /* 1 .. 32 */
6475 OP_I32z
, /* 0 .. 32 */
6476 OP_I63
, /* 0 .. 63 */
6477 OP_I63s
, /* -64 .. 63 */
6478 OP_I64
, /* 1 .. 64 */
6479 OP_I64z
, /* 0 .. 64 */
6480 OP_I255
, /* 0 .. 255 */
6482 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6483 OP_I7b
, /* 0 .. 7 */
6484 OP_I15b
, /* 0 .. 15 */
6485 OP_I31b
, /* 0 .. 31 */
6487 OP_SH
, /* shifter operand */
6488 OP_SHG
, /* shifter operand with possible group relocation */
6489 OP_ADDR
, /* Memory address expression (any mode) */
6490 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6491 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6492 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6493 OP_EXP
, /* arbitrary expression */
6494 OP_EXPi
, /* same, with optional immediate prefix */
6495 OP_EXPr
, /* same, with optional relocation suffix */
6496 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6498 OP_CPSF
, /* CPS flags */
6499 OP_ENDI
, /* Endianness specifier */
6500 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6501 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6502 OP_COND
, /* conditional code */
6503 OP_TB
, /* Table branch. */
6505 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6507 OP_RRnpc_I0
, /* ARM register or literal 0 */
6508 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
6509 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6510 OP_RF_IF
, /* FPA register or immediate */
6511 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6512 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6514 /* Optional operands. */
6515 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6516 OP_oI31b
, /* 0 .. 31 */
6517 OP_oI32b
, /* 1 .. 32 */
6518 OP_oI32z
, /* 0 .. 32 */
6519 OP_oIffffb
, /* 0 .. 65535 */
6520 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6522 OP_oRR
, /* ARM register */
6523 OP_oRRnpc
, /* ARM register, not the PC */
6524 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6525 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6526 OP_oRND
, /* Optional Neon double precision register */
6527 OP_oRNQ
, /* Optional Neon quad precision register */
6528 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6529 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6530 OP_oSHll
, /* LSL immediate */
6531 OP_oSHar
, /* ASR immediate */
6532 OP_oSHllar
, /* LSL or ASR immediate */
6533 OP_oROR
, /* ROR 0/8/16/24 */
6534 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6536 /* Some pre-defined mixed (ARM/THUMB) operands. */
6537 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6538 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6539 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6541 OP_FIRST_OPTIONAL
= OP_oI7b
6544 /* Generic instruction operand parser. This does no encoding and no
6545 semantic validation; it merely squirrels values away in the inst
6546 structure. Returns SUCCESS or FAIL depending on whether the
6547 specified grammar matched. */
6549 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6551 unsigned const int *upat
= pattern
;
6552 char *backtrack_pos
= 0;
6553 const char *backtrack_error
= 0;
6554 int i
, val
= 0, backtrack_index
= 0;
6555 enum arm_reg_type rtype
;
6556 parse_operand_result result
;
6557 unsigned int op_parse_code
;
6559 #define po_char_or_fail(chr) \
6562 if (skip_past_char (&str, chr) == FAIL) \
6567 #define po_reg_or_fail(regtype) \
6570 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6571 & inst.operands[i].vectype); \
6574 first_error (_(reg_expected_msgs[regtype])); \
6577 inst.operands[i].reg = val; \
6578 inst.operands[i].isreg = 1; \
6579 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6580 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6581 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6582 || rtype == REG_TYPE_VFD \
6583 || rtype == REG_TYPE_NQ); \
6587 #define po_reg_or_goto(regtype, label) \
6590 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6591 & inst.operands[i].vectype); \
6595 inst.operands[i].reg = val; \
6596 inst.operands[i].isreg = 1; \
6597 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6598 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6599 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6600 || rtype == REG_TYPE_VFD \
6601 || rtype == REG_TYPE_NQ); \
6605 #define po_imm_or_fail(min, max, popt) \
6608 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6610 inst.operands[i].imm = val; \
6614 #define po_scalar_or_goto(elsz, label) \
6617 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6620 inst.operands[i].reg = val; \
6621 inst.operands[i].isscalar = 1; \
6625 #define po_misc_or_fail(expr) \
6633 #define po_misc_or_fail_no_backtrack(expr) \
6637 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6638 backtrack_pos = 0; \
6639 if (result != PARSE_OPERAND_SUCCESS) \
6644 #define po_barrier_or_imm(str) \
6647 val = parse_barrier (&str); \
6648 if (val == FAIL && ! ISALPHA (*str)) \
6651 /* ISB can only take SY as an option. */ \
6652 || ((inst.instruction & 0xf0) == 0x60 \
6655 inst.error = _("invalid barrier type"); \
6656 backtrack_pos = 0; \
6662 skip_whitespace (str
);
6664 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6666 op_parse_code
= upat
[i
];
6667 if (op_parse_code
>= 1<<16)
6668 op_parse_code
= thumb
? (op_parse_code
>> 16)
6669 : (op_parse_code
& ((1<<16)-1));
6671 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6673 /* Remember where we are in case we need to backtrack. */
6674 gas_assert (!backtrack_pos
);
6675 backtrack_pos
= str
;
6676 backtrack_error
= inst
.error
;
6677 backtrack_index
= i
;
6680 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6681 po_char_or_fail (',');
6683 switch (op_parse_code
)
6691 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6692 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6693 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6694 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6695 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6696 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6698 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6700 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6702 /* Also accept generic coprocessor regs for unknown registers. */
6704 po_reg_or_fail (REG_TYPE_CN
);
6706 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6707 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6708 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6709 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6710 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6711 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6712 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6713 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6714 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6715 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6717 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6719 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6720 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6722 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6724 /* Neon scalar. Using an element size of 8 means that some invalid
6725 scalars are accepted here, so deal with those in later code. */
6726 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6730 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6733 po_imm_or_fail (0, 0, TRUE
);
6738 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6743 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6746 if (parse_ifimm_zero (&str
))
6747 inst
.operands
[i
].imm
= 0;
6751 = _("only floating point zero is allowed as immediate value");
6759 po_scalar_or_goto (8, try_rr
);
6762 po_reg_or_fail (REG_TYPE_RN
);
6768 po_scalar_or_goto (8, try_nsdq
);
6771 po_reg_or_fail (REG_TYPE_NSDQ
);
6777 po_scalar_or_goto (8, try_ndq
);
6780 po_reg_or_fail (REG_TYPE_NDQ
);
6786 po_scalar_or_goto (8, try_vfd
);
6789 po_reg_or_fail (REG_TYPE_VFD
);
6794 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6795 not careful then bad things might happen. */
6796 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6801 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6804 /* There's a possibility of getting a 64-bit immediate here, so
6805 we need special handling. */
6806 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6809 inst
.error
= _("immediate value is out of range");
6817 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6820 po_imm_or_fail (0, 63, TRUE
);
6825 po_char_or_fail ('[');
6826 po_reg_or_fail (REG_TYPE_RN
);
6827 po_char_or_fail (']');
6833 po_reg_or_fail (REG_TYPE_RN
);
6834 if (skip_past_char (&str
, '!') == SUCCESS
)
6835 inst
.operands
[i
].writeback
= 1;
6839 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6840 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6841 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6842 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6843 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6844 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6845 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6846 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6847 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6848 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6849 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6850 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6852 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6854 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6855 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6857 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6858 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6859 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6860 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6862 /* Immediate variants */
6864 po_char_or_fail ('{');
6865 po_imm_or_fail (0, 255, TRUE
);
6866 po_char_or_fail ('}');
6870 /* The expression parser chokes on a trailing !, so we have
6871 to find it first and zap it. */
6874 while (*s
&& *s
!= ',')
6879 inst
.operands
[i
].writeback
= 1;
6881 po_imm_or_fail (0, 31, TRUE
);
6889 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6894 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6899 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6901 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6903 val
= parse_reloc (&str
);
6906 inst
.error
= _("unrecognized relocation suffix");
6909 else if (val
!= BFD_RELOC_UNUSED
)
6911 inst
.operands
[i
].imm
= val
;
6912 inst
.operands
[i
].hasreloc
= 1;
6917 /* Operand for MOVW or MOVT. */
6919 po_misc_or_fail (parse_half (&str
));
6922 /* Register or expression. */
6923 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6924 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6926 /* Register or immediate. */
6927 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6928 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6930 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6932 if (!is_immediate_prefix (*str
))
6935 val
= parse_fpa_immediate (&str
);
6938 /* FPA immediates are encoded as registers 8-15.
6939 parse_fpa_immediate has already applied the offset. */
6940 inst
.operands
[i
].reg
= val
;
6941 inst
.operands
[i
].isreg
= 1;
6944 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6945 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6947 /* Two kinds of register. */
6950 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6952 || (rege
->type
!= REG_TYPE_MMXWR
6953 && rege
->type
!= REG_TYPE_MMXWC
6954 && rege
->type
!= REG_TYPE_MMXWCG
))
6956 inst
.error
= _("iWMMXt data or control register expected");
6959 inst
.operands
[i
].reg
= rege
->number
;
6960 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6966 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6968 || (rege
->type
!= REG_TYPE_MMXWC
6969 && rege
->type
!= REG_TYPE_MMXWCG
))
6971 inst
.error
= _("iWMMXt control register expected");
6974 inst
.operands
[i
].reg
= rege
->number
;
6975 inst
.operands
[i
].isreg
= 1;
6980 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6981 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6982 case OP_oROR
: val
= parse_ror (&str
); break;
6983 case OP_COND
: val
= parse_cond (&str
); break;
6984 case OP_oBARRIER_I15
:
6985 po_barrier_or_imm (str
); break;
6987 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
6993 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
6994 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
6996 inst
.error
= _("Banked registers are not available with this "
7002 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7006 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7009 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7011 if (strncasecmp (str
, "APSR_", 5) == 0)
7018 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7019 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7020 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7021 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7022 default: found
= 16;
7026 inst
.operands
[i
].isvec
= 1;
7027 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7028 inst
.operands
[i
].reg
= REG_PC
;
7035 po_misc_or_fail (parse_tb (&str
));
7038 /* Register lists. */
7040 val
= parse_reg_list (&str
);
7043 inst
.operands
[i
].writeback
= 1;
7049 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7053 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7057 /* Allow Q registers too. */
7058 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7063 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7065 inst
.operands
[i
].issingle
= 1;
7070 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7075 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7076 &inst
.operands
[i
].vectype
);
7079 /* Addressing modes */
7081 po_misc_or_fail (parse_address (&str
, i
));
7085 po_misc_or_fail_no_backtrack (
7086 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7090 po_misc_or_fail_no_backtrack (
7091 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7095 po_misc_or_fail_no_backtrack (
7096 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7100 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7104 po_misc_or_fail_no_backtrack (
7105 parse_shifter_operand_group_reloc (&str
, i
));
7109 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7113 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7117 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7121 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7124 /* Various value-based sanity checks and shared operations. We
7125 do not signal immediate failures for the register constraints;
7126 this allows a syntax error to take precedence. */
7127 switch (op_parse_code
)
7135 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7136 inst
.error
= BAD_PC
;
7141 if (inst
.operands
[i
].isreg
)
7143 if (inst
.operands
[i
].reg
== REG_PC
)
7144 inst
.error
= BAD_PC
;
7145 else if (inst
.operands
[i
].reg
== REG_SP
)
7146 inst
.error
= BAD_SP
;
7151 if (inst
.operands
[i
].isreg
7152 && inst
.operands
[i
].reg
== REG_PC
7153 && (inst
.operands
[i
].writeback
|| thumb
))
7154 inst
.error
= BAD_PC
;
7163 case OP_oBARRIER_I15
:
7172 inst
.operands
[i
].imm
= val
;
7179 /* If we get here, this operand was successfully parsed. */
7180 inst
.operands
[i
].present
= 1;
7184 inst
.error
= BAD_ARGS
;
7189 /* The parse routine should already have set inst.error, but set a
7190 default here just in case. */
7192 inst
.error
= _("syntax error");
7196 /* Do not backtrack over a trailing optional argument that
7197 absorbed some text. We will only fail again, with the
7198 'garbage following instruction' error message, which is
7199 probably less helpful than the current one. */
7200 if (backtrack_index
== i
&& backtrack_pos
!= str
7201 && upat
[i
+1] == OP_stop
)
7204 inst
.error
= _("syntax error");
7208 /* Try again, skipping the optional argument at backtrack_pos. */
7209 str
= backtrack_pos
;
7210 inst
.error
= backtrack_error
;
7211 inst
.operands
[backtrack_index
].present
= 0;
7212 i
= backtrack_index
;
7216 /* Check that we have parsed all the arguments. */
7217 if (*str
!= '\0' && !inst
.error
)
7218 inst
.error
= _("garbage following instruction");
7220 return inst
.error
? FAIL
: SUCCESS
;
7223 #undef po_char_or_fail
7224 #undef po_reg_or_fail
7225 #undef po_reg_or_goto
7226 #undef po_imm_or_fail
7227 #undef po_scalar_or_fail
7228 #undef po_barrier_or_imm
7230 /* Shorthand macro for instruction encoding functions issuing errors. */
7231 #define constraint(expr, err) \
7242 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7243 instructions are unpredictable if these registers are used. This
7244 is the BadReg predicate in ARM's Thumb-2 documentation. */
7245 #define reject_bad_reg(reg) \
7247 if (reg == REG_SP || reg == REG_PC) \
7249 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7254 /* If REG is R13 (the stack pointer), warn that its use is
7256 #define warn_deprecated_sp(reg) \
7258 if (warn_on_deprecated && reg == REG_SP) \
7259 as_tsktsk (_("use of r13 is deprecated")); \
7262 /* Functions for operand encoding. ARM, then Thumb. */
7264 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7266 /* If VAL can be encoded in the immediate field of an ARM instruction,
7267 return the encoded form. Otherwise, return FAIL. */
7270 encode_arm_immediate (unsigned int val
)
7277 for (i
= 2; i
< 32; i
+= 2)
7278 if ((a
= rotate_left (val
, i
)) <= 0xff)
7279 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7284 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7285 return the encoded form. Otherwise, return FAIL. */
7287 encode_thumb32_immediate (unsigned int val
)
7294 for (i
= 1; i
<= 24; i
++)
7297 if ((val
& ~(0xff << i
)) == 0)
7298 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7302 if (val
== ((a
<< 16) | a
))
7304 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7308 if (val
== ((a
<< 16) | a
))
7309 return 0x200 | (a
>> 8);
7313 /* Encode a VFP SP or DP register number into inst.instruction. */
7316 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7318 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7321 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7324 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7327 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7332 first_error (_("D register out of range for selected VFP version"));
7340 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7344 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7348 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7352 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7356 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7360 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7368 /* Encode a <shift> in an ARM-format instruction. The immediate,
7369 if any, is handled by md_apply_fix. */
7371 encode_arm_shift (int i
)
7373 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7374 inst
.instruction
|= SHIFT_ROR
<< 5;
7377 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7378 if (inst
.operands
[i
].immisreg
)
7380 inst
.instruction
|= SHIFT_BY_REG
;
7381 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7384 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7389 encode_arm_shifter_operand (int i
)
7391 if (inst
.operands
[i
].isreg
)
7393 inst
.instruction
|= inst
.operands
[i
].reg
;
7394 encode_arm_shift (i
);
7398 inst
.instruction
|= INST_IMMEDIATE
;
7399 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7400 inst
.instruction
|= inst
.operands
[i
].imm
;
7404 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7406 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7409 Generate an error if the operand is not a register. */
7410 constraint (!inst
.operands
[i
].isreg
,
7411 _("Instruction does not support =N addresses"));
7413 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7415 if (inst
.operands
[i
].preind
)
7419 inst
.error
= _("instruction does not accept preindexed addressing");
7422 inst
.instruction
|= PRE_INDEX
;
7423 if (inst
.operands
[i
].writeback
)
7424 inst
.instruction
|= WRITE_BACK
;
7427 else if (inst
.operands
[i
].postind
)
7429 gas_assert (inst
.operands
[i
].writeback
);
7431 inst
.instruction
|= WRITE_BACK
;
7433 else /* unindexed - only for coprocessor */
7435 inst
.error
= _("instruction does not accept unindexed addressing");
7439 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7440 && (((inst
.instruction
& 0x000f0000) >> 16)
7441 == ((inst
.instruction
& 0x0000f000) >> 12)))
7442 as_warn ((inst
.instruction
& LOAD_BIT
)
7443 ? _("destination register same as write-back base")
7444 : _("source register same as write-back base"));
7447 /* inst.operands[i] was set up by parse_address. Encode it into an
7448 ARM-format mode 2 load or store instruction. If is_t is true,
7449 reject forms that cannot be used with a T instruction (i.e. not
7452 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7454 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7456 encode_arm_addr_mode_common (i
, is_t
);
7458 if (inst
.operands
[i
].immisreg
)
7460 constraint ((inst
.operands
[i
].imm
== REG_PC
7461 || (is_pc
&& inst
.operands
[i
].writeback
)),
7463 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7464 inst
.instruction
|= inst
.operands
[i
].imm
;
7465 if (!inst
.operands
[i
].negative
)
7466 inst
.instruction
|= INDEX_UP
;
7467 if (inst
.operands
[i
].shifted
)
7469 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7470 inst
.instruction
|= SHIFT_ROR
<< 5;
7473 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7474 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7478 else /* immediate offset in inst.reloc */
7480 if (is_pc
&& !inst
.reloc
.pc_rel
)
7482 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7484 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7485 cannot use PC in addressing.
7486 PC cannot be used in writeback addressing, either. */
7487 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7490 /* Use of PC in str is deprecated for ARMv7. */
7491 if (warn_on_deprecated
7493 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7494 as_tsktsk (_("use of PC in this instruction is deprecated"));
7497 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7499 /* Prefer + for zero encoded value. */
7500 if (!inst
.operands
[i
].negative
)
7501 inst
.instruction
|= INDEX_UP
;
7502 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7507 /* inst.operands[i] was set up by parse_address. Encode it into an
7508 ARM-format mode 3 load or store instruction. Reject forms that
7509 cannot be used with such instructions. If is_t is true, reject
7510 forms that cannot be used with a T instruction (i.e. not
7513 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7515 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7517 inst
.error
= _("instruction does not accept scaled register index");
7521 encode_arm_addr_mode_common (i
, is_t
);
7523 if (inst
.operands
[i
].immisreg
)
7525 constraint ((inst
.operands
[i
].imm
== REG_PC
7526 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7528 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7530 inst
.instruction
|= inst
.operands
[i
].imm
;
7531 if (!inst
.operands
[i
].negative
)
7532 inst
.instruction
|= INDEX_UP
;
7534 else /* immediate offset in inst.reloc */
7536 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7537 && inst
.operands
[i
].writeback
),
7539 inst
.instruction
|= HWOFFSET_IMM
;
7540 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7542 /* Prefer + for zero encoded value. */
7543 if (!inst
.operands
[i
].negative
)
7544 inst
.instruction
|= INDEX_UP
;
7546 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7551 /* Write immediate bits [7:0] to the following locations:
7553 |28/24|23 19|18 16|15 4|3 0|
7554 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7556 This function is used by VMOV/VMVN/VORR/VBIC. */
7559 neon_write_immbits (unsigned immbits
)
7561 inst
.instruction
|= immbits
& 0xf;
7562 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7563 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7566 /* Invert low-order SIZE bits of XHI:XLO. */
7569 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7571 unsigned immlo
= xlo
? *xlo
: 0;
7572 unsigned immhi
= xhi
? *xhi
: 0;
7577 immlo
= (~immlo
) & 0xff;
7581 immlo
= (~immlo
) & 0xffff;
7585 immhi
= (~immhi
) & 0xffffffff;
7589 immlo
= (~immlo
) & 0xffffffff;
7603 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7607 neon_bits_same_in_bytes (unsigned imm
)
7609 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7610 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7611 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7612 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7615 /* For immediate of above form, return 0bABCD. */
7618 neon_squash_bits (unsigned imm
)
7620 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7621 | ((imm
& 0x01000000) >> 21);
7624 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7627 neon_qfloat_bits (unsigned imm
)
7629 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7632 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7633 the instruction. *OP is passed as the initial value of the op field, and
7634 may be set to a different value depending on the constant (i.e.
7635 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7636 MVN). If the immediate looks like a repeated pattern then also
7637 try smaller element sizes. */
7640 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7641 unsigned *immbits
, int *op
, int size
,
7642 enum neon_el_type type
)
7644 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7646 if (type
== NT_float
&& !float_p
)
7649 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7651 if (size
!= 32 || *op
== 1)
7653 *immbits
= neon_qfloat_bits (immlo
);
7659 if (neon_bits_same_in_bytes (immhi
)
7660 && neon_bits_same_in_bytes (immlo
))
7664 *immbits
= (neon_squash_bits (immhi
) << 4)
7665 | neon_squash_bits (immlo
);
7676 if (immlo
== (immlo
& 0x000000ff))
7681 else if (immlo
== (immlo
& 0x0000ff00))
7683 *immbits
= immlo
>> 8;
7686 else if (immlo
== (immlo
& 0x00ff0000))
7688 *immbits
= immlo
>> 16;
7691 else if (immlo
== (immlo
& 0xff000000))
7693 *immbits
= immlo
>> 24;
7696 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7698 *immbits
= (immlo
>> 8) & 0xff;
7701 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7703 *immbits
= (immlo
>> 16) & 0xff;
7707 if ((immlo
& 0xffff) != (immlo
>> 16))
7714 if (immlo
== (immlo
& 0x000000ff))
7719 else if (immlo
== (immlo
& 0x0000ff00))
7721 *immbits
= immlo
>> 8;
7725 if ((immlo
& 0xff) != (immlo
>> 8))
7730 if (immlo
== (immlo
& 0x000000ff))
7732 /* Don't allow MVN with 8-bit immediate. */
7742 #if defined BFD_HOST_64_BIT
7743 /* Returns TRUE if double precision value V may be cast
7744 to single precision without loss of accuracy. */
7747 is_double_a_single (bfd_int64_t v
)
7749 int exp
= (int)((v
>> 52) & 0x7FF);
7750 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7752 return (exp
== 0 || exp
== 0x7FF
7753 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7754 && (mantissa
& 0x1FFFFFFFl
) == 0;
7757 /* Returns a double precision value casted to single precision
7758 (ignoring the least significant bits in exponent and mantissa). */
7761 double_to_single (bfd_int64_t v
)
7763 int sign
= (int) ((v
>> 63) & 1l);
7764 int exp
= (int) ((v
>> 52) & 0x7FF);
7765 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7771 exp
= exp
- 1023 + 127;
7780 /* No denormalized numbers. */
7786 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7788 #endif /* BFD_HOST_64_BIT */
7797 static void do_vfp_nsyn_opcode (const char *);
7799 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7800 Determine whether it can be performed with a move instruction; if
7801 it can, convert inst.instruction to that move instruction and
7802 return TRUE; if it can't, convert inst.instruction to a literal-pool
7803 load and return FALSE. If this is not a valid thing to do in the
7804 current context, set inst.error and return TRUE.
7806 inst.operands[i] describes the destination register. */
7809 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7812 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7813 bfd_boolean arm_p
= (t
== CONST_ARM
);
7816 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7820 if ((inst
.instruction
& tbit
) == 0)
7822 inst
.error
= _("invalid pseudo operation");
7826 if (inst
.reloc
.exp
.X_op
!= O_constant
7827 && inst
.reloc
.exp
.X_op
!= O_symbol
7828 && inst
.reloc
.exp
.X_op
!= O_big
)
7830 inst
.error
= _("constant expression expected");
7834 if (inst
.reloc
.exp
.X_op
== O_constant
7835 || inst
.reloc
.exp
.X_op
== O_big
)
7837 #if defined BFD_HOST_64_BIT
7842 if (inst
.reloc
.exp
.X_op
== O_big
)
7844 LITTLENUM_TYPE w
[X_PRECISION
];
7847 if (inst
.reloc
.exp
.X_add_number
== -1)
7849 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7851 /* FIXME: Should we check words w[2..5] ? */
7856 #if defined BFD_HOST_64_BIT
7858 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7859 << LITTLENUM_NUMBER_OF_BITS
)
7860 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7861 << LITTLENUM_NUMBER_OF_BITS
)
7862 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7863 << LITTLENUM_NUMBER_OF_BITS
)
7864 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7866 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7867 | (l
[0] & LITTLENUM_MASK
);
7871 v
= inst
.reloc
.exp
.X_add_number
;
7873 if (!inst
.operands
[i
].issingle
)
7877 /* This can be encoded only for a low register. */
7878 if ((v
& ~0xFF) == 0 && (inst
.operands
[i
].reg
< 8))
7880 /* This can be done with a mov(1) instruction. */
7881 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7882 inst
.instruction
|= v
;
7886 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7887 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7889 /* Check if on thumb2 it can be done with a mov.w, mvn or
7890 movw instruction. */
7891 unsigned int newimm
;
7892 bfd_boolean isNegated
;
7894 newimm
= encode_thumb32_immediate (v
);
7895 if (newimm
!= (unsigned int) FAIL
)
7899 newimm
= encode_thumb32_immediate (~v
);
7900 if (newimm
!= (unsigned int) FAIL
)
7904 /* The number can be loaded with a mov.w or mvn
7906 if (newimm
!= (unsigned int) FAIL
7907 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
7909 inst
.instruction
= (0xf04f0000 /* MOV.W. */
7910 | (inst
.operands
[i
].reg
<< 8));
7911 /* Change to MOVN. */
7912 inst
.instruction
|= (isNegated
? 0x200000 : 0);
7913 inst
.instruction
|= (newimm
& 0x800) << 15;
7914 inst
.instruction
|= (newimm
& 0x700) << 4;
7915 inst
.instruction
|= (newimm
& 0x0ff);
7918 /* The number can be loaded with a movw instruction. */
7919 else if ((v
& ~0xFFFF) == 0
7920 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7922 int imm
= v
& 0xFFFF;
7924 inst
.instruction
= 0xf2400000; /* MOVW. */
7925 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
7926 inst
.instruction
|= (imm
& 0xf000) << 4;
7927 inst
.instruction
|= (imm
& 0x0800) << 15;
7928 inst
.instruction
|= (imm
& 0x0700) << 4;
7929 inst
.instruction
|= (imm
& 0x00ff);
7936 int value
= encode_arm_immediate (v
);
7940 /* This can be done with a mov instruction. */
7941 inst
.instruction
&= LITERAL_MASK
;
7942 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
7943 inst
.instruction
|= value
& 0xfff;
7947 value
= encode_arm_immediate (~ v
);
7950 /* This can be done with a mvn instruction. */
7951 inst
.instruction
&= LITERAL_MASK
;
7952 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
7953 inst
.instruction
|= value
& 0xfff;
7957 else if (t
== CONST_VEC
)
7960 unsigned immbits
= 0;
7961 unsigned immlo
= inst
.operands
[1].imm
;
7962 unsigned immhi
= inst
.operands
[1].regisimm
7963 ? inst
.operands
[1].reg
7964 : inst
.reloc
.exp
.X_unsigned
7966 : ((bfd_int64_t
)((int) immlo
)) >> 32;
7967 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7968 &op
, 64, NT_invtype
);
7972 neon_invert_size (&immlo
, &immhi
, 64);
7974 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7975 &op
, 64, NT_invtype
);
7980 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
7986 /* Fill other bits in vmov encoding for both thumb and arm. */
7988 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
7990 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
7991 neon_write_immbits (immbits
);
7999 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8000 if (inst
.operands
[i
].issingle
8001 && is_quarter_float (inst
.operands
[1].imm
)
8002 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8004 inst
.operands
[1].imm
=
8005 neon_qfloat_bits (v
);
8006 do_vfp_nsyn_opcode ("fconsts");
8010 /* If our host does not support a 64-bit type then we cannot perform
8011 the following optimization. This mean that there will be a
8012 discrepancy between the output produced by an assembler built for
8013 a 32-bit-only host and the output produced from a 64-bit host, but
8014 this cannot be helped. */
8015 #if defined BFD_HOST_64_BIT
8016 else if (!inst
.operands
[1].issingle
8017 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8019 if (is_double_a_single (v
)
8020 && is_quarter_float (double_to_single (v
)))
8022 inst
.operands
[1].imm
=
8023 neon_qfloat_bits (double_to_single (v
));
8024 do_vfp_nsyn_opcode ("fconstd");
8032 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8033 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8036 inst
.operands
[1].reg
= REG_PC
;
8037 inst
.operands
[1].isreg
= 1;
8038 inst
.operands
[1].preind
= 1;
8039 inst
.reloc
.pc_rel
= 1;
8040 inst
.reloc
.type
= (thumb_p
8041 ? BFD_RELOC_ARM_THUMB_OFFSET
8043 ? BFD_RELOC_ARM_HWLITERAL
8044 : BFD_RELOC_ARM_LITERAL
));
8048 /* inst.operands[i] was set up by parse_address. Encode it into an
8049 ARM-format instruction. Reject all forms which cannot be encoded
8050 into a coprocessor load/store instruction. If wb_ok is false,
8051 reject use of writeback; if unind_ok is false, reject use of
8052 unindexed addressing. If reloc_override is not 0, use it instead
8053 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8054 (in which case it is preserved). */
8057 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8059 if (!inst
.operands
[i
].isreg
)
8062 if (! inst
.operands
[0].isvec
)
8064 inst
.error
= _("invalid co-processor operand");
8067 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8071 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8073 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8075 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8077 gas_assert (!inst
.operands
[i
].writeback
);
8080 inst
.error
= _("instruction does not support unindexed addressing");
8083 inst
.instruction
|= inst
.operands
[i
].imm
;
8084 inst
.instruction
|= INDEX_UP
;
8088 if (inst
.operands
[i
].preind
)
8089 inst
.instruction
|= PRE_INDEX
;
8091 if (inst
.operands
[i
].writeback
)
8093 if (inst
.operands
[i
].reg
== REG_PC
)
8095 inst
.error
= _("pc may not be used with write-back");
8100 inst
.error
= _("instruction does not support writeback");
8103 inst
.instruction
|= WRITE_BACK
;
8107 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8108 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8109 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8110 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8113 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8115 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8118 /* Prefer + for zero encoded value. */
8119 if (!inst
.operands
[i
].negative
)
8120 inst
.instruction
|= INDEX_UP
;
8125 /* Functions for instruction encoding, sorted by sub-architecture.
8126 First some generics; their names are taken from the conventional
8127 bit positions for register arguments in ARM format instructions. */
8137 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8143 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8144 inst
.instruction
|= inst
.operands
[1].reg
;
8150 inst
.instruction
|= inst
.operands
[0].reg
;
8151 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8157 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8158 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8164 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8165 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8171 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8172 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8176 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8178 if (ARM_CPU_IS_ANY (cpu_variant
))
8180 as_tsktsk ("%s", msg
);
8183 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8195 unsigned Rn
= inst
.operands
[2].reg
;
8196 /* Enforce restrictions on SWP instruction. */
8197 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8199 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8200 _("Rn must not overlap other operands"));
8202 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8204 if (!check_obsolete (&arm_ext_v8
,
8205 _("swp{b} use is obsoleted for ARMv8 and later"))
8206 && warn_on_deprecated
8207 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8208 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8211 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8212 inst
.instruction
|= inst
.operands
[1].reg
;
8213 inst
.instruction
|= Rn
<< 16;
8219 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8220 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8221 inst
.instruction
|= inst
.operands
[2].reg
;
8227 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8228 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8229 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8230 || inst
.reloc
.exp
.X_add_number
!= 0),
8232 inst
.instruction
|= inst
.operands
[0].reg
;
8233 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8234 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8240 inst
.instruction
|= inst
.operands
[0].imm
;
8246 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8247 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8250 /* ARM instructions, in alphabetical order by function name (except
8251 that wrapper functions appear immediately after the function they
8254 /* This is a pseudo-op of the form "adr rd, label" to be converted
8255 into a relative address of the form "add rd, pc, #label-.-8". */
8260 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8262 /* Frag hacking will turn this into a sub instruction if the offset turns
8263 out to be negative. */
8264 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8265 inst
.reloc
.pc_rel
= 1;
8266 inst
.reloc
.exp
.X_add_number
-= 8;
8269 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8270 into a relative address of the form:
8271 add rd, pc, #low(label-.-8)"
8272 add rd, rd, #high(label-.-8)" */
8277 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8279 /* Frag hacking will turn this into a sub instruction if the offset turns
8280 out to be negative. */
8281 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8282 inst
.reloc
.pc_rel
= 1;
8283 inst
.size
= INSN_SIZE
* 2;
8284 inst
.reloc
.exp
.X_add_number
-= 8;
8290 if (!inst
.operands
[1].present
)
8291 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8292 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8293 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8294 encode_arm_shifter_operand (2);
8300 if (inst
.operands
[0].present
)
8301 inst
.instruction
|= inst
.operands
[0].imm
;
8303 inst
.instruction
|= 0xf;
8309 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8310 constraint (msb
> 32, _("bit-field extends past end of register"));
8311 /* The instruction encoding stores the LSB and MSB,
8312 not the LSB and width. */
8313 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8314 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8315 inst
.instruction
|= (msb
- 1) << 16;
8323 /* #0 in second position is alternative syntax for bfc, which is
8324 the same instruction but with REG_PC in the Rm field. */
8325 if (!inst
.operands
[1].isreg
)
8326 inst
.operands
[1].reg
= REG_PC
;
8328 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8329 constraint (msb
> 32, _("bit-field extends past end of register"));
8330 /* The instruction encoding stores the LSB and MSB,
8331 not the LSB and width. */
8332 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8333 inst
.instruction
|= inst
.operands
[1].reg
;
8334 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8335 inst
.instruction
|= (msb
- 1) << 16;
8341 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8342 _("bit-field extends past end of register"));
8343 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8344 inst
.instruction
|= inst
.operands
[1].reg
;
8345 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8346 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8349 /* ARM V5 breakpoint instruction (argument parse)
8350 BKPT <16 bit unsigned immediate>
8351 Instruction is not conditional.
8352 The bit pattern given in insns[] has the COND_ALWAYS condition,
8353 and it is an error if the caller tried to override that. */
8358 /* Top 12 of 16 bits to bits 19:8. */
8359 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8361 /* Bottom 4 of 16 bits to bits 3:0. */
8362 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8366 encode_branch (int default_reloc
)
8368 if (inst
.operands
[0].hasreloc
)
8370 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8371 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8372 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8373 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8374 ? BFD_RELOC_ARM_PLT32
8375 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8378 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8379 inst
.reloc
.pc_rel
= 1;
8386 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8387 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8390 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8397 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8399 if (inst
.cond
== COND_ALWAYS
)
8400 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8402 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8406 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8409 /* ARM V5 branch-link-exchange instruction (argument parse)
8410 BLX <target_addr> ie BLX(1)
8411 BLX{<condition>} <Rm> ie BLX(2)
8412 Unfortunately, there are two different opcodes for this mnemonic.
8413 So, the insns[].value is not used, and the code here zaps values
8414 into inst.instruction.
8415 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8420 if (inst
.operands
[0].isreg
)
8422 /* Arg is a register; the opcode provided by insns[] is correct.
8423 It is not illegal to do "blx pc", just useless. */
8424 if (inst
.operands
[0].reg
== REG_PC
)
8425 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8427 inst
.instruction
|= inst
.operands
[0].reg
;
8431 /* Arg is an address; this instruction cannot be executed
8432 conditionally, and the opcode must be adjusted.
8433 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8434 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8435 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8436 inst
.instruction
= 0xfa000000;
8437 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8444 bfd_boolean want_reloc
;
8446 if (inst
.operands
[0].reg
== REG_PC
)
8447 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8449 inst
.instruction
|= inst
.operands
[0].reg
;
8450 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8451 it is for ARMv4t or earlier. */
8452 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8453 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8457 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8462 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8466 /* ARM v5TEJ. Jump to Jazelle code. */
8471 if (inst
.operands
[0].reg
== REG_PC
)
8472 as_tsktsk (_("use of r15 in bxj is not really useful"));
8474 inst
.instruction
|= inst
.operands
[0].reg
;
8477 /* Co-processor data operation:
8478 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8479 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8483 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8484 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8485 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8486 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8487 inst
.instruction
|= inst
.operands
[4].reg
;
8488 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8494 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8495 encode_arm_shifter_operand (1);
8498 /* Transfer between coprocessor and ARM registers.
8499 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8504 No special properties. */
8506 struct deprecated_coproc_regs_s
8513 arm_feature_set deprecated
;
8514 arm_feature_set obsoleted
;
8515 const char *dep_msg
;
8516 const char *obs_msg
;
8519 #define DEPR_ACCESS_V8 \
8520 N_("This coprocessor register access is deprecated in ARMv8")
8522 /* Table of all deprecated coprocessor registers. */
8523 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8525 {15, 0, 7, 10, 5, /* CP15DMB. */
8526 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8527 DEPR_ACCESS_V8
, NULL
},
8528 {15, 0, 7, 10, 4, /* CP15DSB. */
8529 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8530 DEPR_ACCESS_V8
, NULL
},
8531 {15, 0, 7, 5, 4, /* CP15ISB. */
8532 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8533 DEPR_ACCESS_V8
, NULL
},
8534 {14, 6, 1, 0, 0, /* TEEHBR. */
8535 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8536 DEPR_ACCESS_V8
, NULL
},
8537 {14, 6, 0, 0, 0, /* TEECR. */
8538 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8539 DEPR_ACCESS_V8
, NULL
},
8542 #undef DEPR_ACCESS_V8
8544 static const size_t deprecated_coproc_reg_count
=
8545 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8553 Rd
= inst
.operands
[2].reg
;
8556 if (inst
.instruction
== 0xee000010
8557 || inst
.instruction
== 0xfe000010)
8559 reject_bad_reg (Rd
);
8562 constraint (Rd
== REG_SP
, BAD_SP
);
8567 if (inst
.instruction
== 0xe000010)
8568 constraint (Rd
== REG_PC
, BAD_PC
);
8571 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8573 const struct deprecated_coproc_regs_s
*r
=
8574 deprecated_coproc_regs
+ i
;
8576 if (inst
.operands
[0].reg
== r
->cp
8577 && inst
.operands
[1].imm
== r
->opc1
8578 && inst
.operands
[3].reg
== r
->crn
8579 && inst
.operands
[4].reg
== r
->crm
8580 && inst
.operands
[5].imm
== r
->opc2
)
8582 if (! ARM_CPU_IS_ANY (cpu_variant
)
8583 && warn_on_deprecated
8584 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8585 as_tsktsk ("%s", r
->dep_msg
);
8589 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8590 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8591 inst
.instruction
|= Rd
<< 12;
8592 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8593 inst
.instruction
|= inst
.operands
[4].reg
;
8594 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8597 /* Transfer between coprocessor register and pair of ARM registers.
8598 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8603 Two XScale instructions are special cases of these:
8605 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8606 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8608 Result unpredictable if Rd or Rn is R15. */
8615 Rd
= inst
.operands
[2].reg
;
8616 Rn
= inst
.operands
[3].reg
;
8620 reject_bad_reg (Rd
);
8621 reject_bad_reg (Rn
);
8625 constraint (Rd
== REG_PC
, BAD_PC
);
8626 constraint (Rn
== REG_PC
, BAD_PC
);
8629 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8630 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8631 inst
.instruction
|= Rd
<< 12;
8632 inst
.instruction
|= Rn
<< 16;
8633 inst
.instruction
|= inst
.operands
[4].reg
;
8639 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8640 if (inst
.operands
[1].present
)
8642 inst
.instruction
|= CPSI_MMOD
;
8643 inst
.instruction
|= inst
.operands
[1].imm
;
8650 inst
.instruction
|= inst
.operands
[0].imm
;
8656 unsigned Rd
, Rn
, Rm
;
8658 Rd
= inst
.operands
[0].reg
;
8659 Rn
= (inst
.operands
[1].present
8660 ? inst
.operands
[1].reg
: Rd
);
8661 Rm
= inst
.operands
[2].reg
;
8663 constraint ((Rd
== REG_PC
), BAD_PC
);
8664 constraint ((Rn
== REG_PC
), BAD_PC
);
8665 constraint ((Rm
== REG_PC
), BAD_PC
);
8667 inst
.instruction
|= Rd
<< 16;
8668 inst
.instruction
|= Rn
<< 0;
8669 inst
.instruction
|= Rm
<< 8;
8675 /* There is no IT instruction in ARM mode. We
8676 process it to do the validation as if in
8677 thumb mode, just in case the code gets
8678 assembled for thumb using the unified syntax. */
8683 set_it_insn_type (IT_INSN
);
8684 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8685 now_it
.cc
= inst
.operands
[0].imm
;
8689 /* If there is only one register in the register list,
8690 then return its register number. Otherwise return -1. */
8692 only_one_reg_in_list (int range
)
8694 int i
= ffs (range
) - 1;
8695 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8699 encode_ldmstm(int from_push_pop_mnem
)
8701 int base_reg
= inst
.operands
[0].reg
;
8702 int range
= inst
.operands
[1].imm
;
8705 inst
.instruction
|= base_reg
<< 16;
8706 inst
.instruction
|= range
;
8708 if (inst
.operands
[1].writeback
)
8709 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8711 if (inst
.operands
[0].writeback
)
8713 inst
.instruction
|= WRITE_BACK
;
8714 /* Check for unpredictable uses of writeback. */
8715 if (inst
.instruction
& LOAD_BIT
)
8717 /* Not allowed in LDM type 2. */
8718 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8719 && ((range
& (1 << REG_PC
)) == 0))
8720 as_warn (_("writeback of base register is UNPREDICTABLE"));
8721 /* Only allowed if base reg not in list for other types. */
8722 else if (range
& (1 << base_reg
))
8723 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8727 /* Not allowed for type 2. */
8728 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8729 as_warn (_("writeback of base register is UNPREDICTABLE"));
8730 /* Only allowed if base reg not in list, or first in list. */
8731 else if ((range
& (1 << base_reg
))
8732 && (range
& ((1 << base_reg
) - 1)))
8733 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8737 /* If PUSH/POP has only one register, then use the A2 encoding. */
8738 one_reg
= only_one_reg_in_list (range
);
8739 if (from_push_pop_mnem
&& one_reg
>= 0)
8741 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8743 inst
.instruction
&= A_COND_MASK
;
8744 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8745 inst
.instruction
|= one_reg
<< 12;
8752 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8755 /* ARMv5TE load-consecutive (argument parse)
8764 constraint (inst
.operands
[0].reg
% 2 != 0,
8765 _("first transfer register must be even"));
8766 constraint (inst
.operands
[1].present
8767 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8768 _("can only transfer two consecutive registers"));
8769 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8770 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8772 if (!inst
.operands
[1].present
)
8773 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8775 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8776 register and the first register written; we have to diagnose
8777 overlap between the base and the second register written here. */
8779 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8780 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8781 as_warn (_("base register written back, and overlaps "
8782 "second transfer register"));
8784 if (!(inst
.instruction
& V4_STR_BIT
))
8786 /* For an index-register load, the index register must not overlap the
8787 destination (even if not write-back). */
8788 if (inst
.operands
[2].immisreg
8789 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8790 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8791 as_warn (_("index register overlaps transfer register"));
8793 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8794 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8800 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8801 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8802 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8803 || inst
.operands
[1].negative
8804 /* This can arise if the programmer has written
8806 or if they have mistakenly used a register name as the last
8809 It is very difficult to distinguish between these two cases
8810 because "rX" might actually be a label. ie the register
8811 name has been occluded by a symbol of the same name. So we
8812 just generate a general 'bad addressing mode' type error
8813 message and leave it up to the programmer to discover the
8814 true cause and fix their mistake. */
8815 || (inst
.operands
[1].reg
== REG_PC
),
8818 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8819 || inst
.reloc
.exp
.X_add_number
!= 0,
8820 _("offset must be zero in ARM encoding"));
8822 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8824 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8825 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8826 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8832 constraint (inst
.operands
[0].reg
% 2 != 0,
8833 _("even register required"));
8834 constraint (inst
.operands
[1].present
8835 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8836 _("can only load two consecutive registers"));
8837 /* If op 1 were present and equal to PC, this function wouldn't
8838 have been called in the first place. */
8839 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8841 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8842 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8845 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8846 which is not a multiple of four is UNPREDICTABLE. */
8848 check_ldr_r15_aligned (void)
8850 constraint (!(inst
.operands
[1].immisreg
)
8851 && (inst
.operands
[0].reg
== REG_PC
8852 && inst
.operands
[1].reg
== REG_PC
8853 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8854 _("ldr to register 15 must be 4-byte alligned"));
8860 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8861 if (!inst
.operands
[1].isreg
)
8862 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8864 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8865 check_ldr_r15_aligned ();
8871 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8873 if (inst
.operands
[1].preind
)
8875 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8876 || inst
.reloc
.exp
.X_add_number
!= 0,
8877 _("this instruction requires a post-indexed address"));
8879 inst
.operands
[1].preind
= 0;
8880 inst
.operands
[1].postind
= 1;
8881 inst
.operands
[1].writeback
= 1;
8883 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8884 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
8887 /* Halfword and signed-byte load/store operations. */
8892 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8893 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8894 if (!inst
.operands
[1].isreg
)
8895 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
8897 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
8903 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8905 if (inst
.operands
[1].preind
)
8907 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8908 || inst
.reloc
.exp
.X_add_number
!= 0,
8909 _("this instruction requires a post-indexed address"));
8911 inst
.operands
[1].preind
= 0;
8912 inst
.operands
[1].postind
= 1;
8913 inst
.operands
[1].writeback
= 1;
8915 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8916 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
8919 /* Co-processor register load/store.
8920 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8924 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8925 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8926 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8932 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8933 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
8934 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
8935 && !(inst
.instruction
& 0x00400000))
8936 as_tsktsk (_("Rd and Rm should be different in mla"));
8938 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8939 inst
.instruction
|= inst
.operands
[1].reg
;
8940 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8941 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8947 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8948 encode_arm_shifter_operand (1);
8951 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8958 top
= (inst
.instruction
& 0x00400000) != 0;
8959 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
8960 _(":lower16: not allowed this instruction"));
8961 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
8962 _(":upper16: not allowed instruction"));
8963 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8964 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
8966 imm
= inst
.reloc
.exp
.X_add_number
;
8967 /* The value is in two pieces: 0:11, 16:19. */
8968 inst
.instruction
|= (imm
& 0x00000fff);
8969 inst
.instruction
|= (imm
& 0x0000f000) << 4;
8974 do_vfp_nsyn_mrs (void)
8976 if (inst
.operands
[0].isvec
)
8978 if (inst
.operands
[1].reg
!= 1)
8979 first_error (_("operand 1 must be FPSCR"));
8980 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
8981 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
8982 do_vfp_nsyn_opcode ("fmstat");
8984 else if (inst
.operands
[1].isvec
)
8985 do_vfp_nsyn_opcode ("fmrx");
8993 do_vfp_nsyn_msr (void)
8995 if (inst
.operands
[0].isvec
)
8996 do_vfp_nsyn_opcode ("fmxr");
9006 unsigned Rt
= inst
.operands
[0].reg
;
9008 if (thumb_mode
&& Rt
== REG_SP
)
9010 inst
.error
= BAD_SP
;
9014 /* APSR_ sets isvec. All other refs to PC are illegal. */
9015 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9017 inst
.error
= BAD_PC
;
9021 /* If we get through parsing the register name, we just insert the number
9022 generated into the instruction without further validation. */
9023 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9024 inst
.instruction
|= (Rt
<< 12);
9030 unsigned Rt
= inst
.operands
[1].reg
;
9033 reject_bad_reg (Rt
);
9034 else if (Rt
== REG_PC
)
9036 inst
.error
= BAD_PC
;
9040 /* If we get through parsing the register name, we just insert the number
9041 generated into the instruction without further validation. */
9042 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9043 inst
.instruction
|= (Rt
<< 12);
9051 if (do_vfp_nsyn_mrs () == SUCCESS
)
9054 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9055 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9057 if (inst
.operands
[1].isreg
)
9059 br
= inst
.operands
[1].reg
;
9060 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9061 as_bad (_("bad register for mrs"));
9065 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9066 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9068 _("'APSR', 'CPSR' or 'SPSR' expected"));
9069 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9072 inst
.instruction
|= br
;
9075 /* Two possible forms:
9076 "{C|S}PSR_<field>, Rm",
9077 "{C|S}PSR_f, #expression". */
9082 if (do_vfp_nsyn_msr () == SUCCESS
)
9085 inst
.instruction
|= inst
.operands
[0].imm
;
9086 if (inst
.operands
[1].isreg
)
9087 inst
.instruction
|= inst
.operands
[1].reg
;
9090 inst
.instruction
|= INST_IMMEDIATE
;
9091 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9092 inst
.reloc
.pc_rel
= 0;
9099 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9101 if (!inst
.operands
[2].present
)
9102 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9103 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9104 inst
.instruction
|= inst
.operands
[1].reg
;
9105 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9107 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9108 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9109 as_tsktsk (_("Rd and Rm should be different in mul"));
9112 /* Long Multiply Parser
9113 UMULL RdLo, RdHi, Rm, Rs
9114 SMULL RdLo, RdHi, Rm, Rs
9115 UMLAL RdLo, RdHi, Rm, Rs
9116 SMLAL RdLo, RdHi, Rm, Rs. */
9121 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9122 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9123 inst
.instruction
|= inst
.operands
[2].reg
;
9124 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9126 /* rdhi and rdlo must be different. */
9127 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9128 as_tsktsk (_("rdhi and rdlo must be different"));
9130 /* rdhi, rdlo and rm must all be different before armv6. */
9131 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9132 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9133 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9134 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9140 if (inst
.operands
[0].present
9141 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9143 /* Architectural NOP hints are CPSR sets with no bits selected. */
9144 inst
.instruction
&= 0xf0000000;
9145 inst
.instruction
|= 0x0320f000;
9146 if (inst
.operands
[0].present
)
9147 inst
.instruction
|= inst
.operands
[0].imm
;
9151 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9152 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9153 Condition defaults to COND_ALWAYS.
9154 Error if Rd, Rn or Rm are R15. */
9159 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9160 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9161 inst
.instruction
|= inst
.operands
[2].reg
;
9162 if (inst
.operands
[3].present
)
9163 encode_arm_shift (3);
9166 /* ARM V6 PKHTB (Argument Parse). */
9171 if (!inst
.operands
[3].present
)
9173 /* If the shift specifier is omitted, turn the instruction
9174 into pkhbt rd, rm, rn. */
9175 inst
.instruction
&= 0xfff00010;
9176 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9177 inst
.instruction
|= inst
.operands
[1].reg
;
9178 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9182 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9183 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9184 inst
.instruction
|= inst
.operands
[2].reg
;
9185 encode_arm_shift (3);
9189 /* ARMv5TE: Preload-Cache
9190 MP Extensions: Preload for write
9194 Syntactically, like LDR with B=1, W=0, L=1. */
9199 constraint (!inst
.operands
[0].isreg
,
9200 _("'[' expected after PLD mnemonic"));
9201 constraint (inst
.operands
[0].postind
,
9202 _("post-indexed expression used in preload instruction"));
9203 constraint (inst
.operands
[0].writeback
,
9204 _("writeback used in preload instruction"));
9205 constraint (!inst
.operands
[0].preind
,
9206 _("unindexed addressing used in preload instruction"));
9207 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9210 /* ARMv7: PLI <addr_mode> */
9214 constraint (!inst
.operands
[0].isreg
,
9215 _("'[' expected after PLI mnemonic"));
9216 constraint (inst
.operands
[0].postind
,
9217 _("post-indexed expression used in preload instruction"));
9218 constraint (inst
.operands
[0].writeback
,
9219 _("writeback used in preload instruction"));
9220 constraint (!inst
.operands
[0].preind
,
9221 _("unindexed addressing used in preload instruction"));
9222 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9223 inst
.instruction
&= ~PRE_INDEX
;
9229 constraint (inst
.operands
[0].writeback
,
9230 _("push/pop do not support {reglist}^"));
9231 inst
.operands
[1] = inst
.operands
[0];
9232 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9233 inst
.operands
[0].isreg
= 1;
9234 inst
.operands
[0].writeback
= 1;
9235 inst
.operands
[0].reg
= REG_SP
;
9236 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9239 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9240 word at the specified address and the following word
9242 Unconditionally executed.
9243 Error if Rn is R15. */
9248 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9249 if (inst
.operands
[0].writeback
)
9250 inst
.instruction
|= WRITE_BACK
;
9253 /* ARM V6 ssat (argument parse). */
9258 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9259 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9260 inst
.instruction
|= inst
.operands
[2].reg
;
9262 if (inst
.operands
[3].present
)
9263 encode_arm_shift (3);
9266 /* ARM V6 usat (argument parse). */
9271 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9272 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9273 inst
.instruction
|= inst
.operands
[2].reg
;
9275 if (inst
.operands
[3].present
)
9276 encode_arm_shift (3);
9279 /* ARM V6 ssat16 (argument parse). */
9284 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9285 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9286 inst
.instruction
|= inst
.operands
[2].reg
;
9292 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9293 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9294 inst
.instruction
|= inst
.operands
[2].reg
;
9297 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9298 preserving the other bits.
9300 setend <endian_specifier>, where <endian_specifier> is either
9306 if (warn_on_deprecated
9307 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9308 as_tsktsk (_("setend use is deprecated for ARMv8"));
9310 if (inst
.operands
[0].imm
)
9311 inst
.instruction
|= 0x200;
9317 unsigned int Rm
= (inst
.operands
[1].present
9318 ? inst
.operands
[1].reg
9319 : inst
.operands
[0].reg
);
9321 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9322 inst
.instruction
|= Rm
;
9323 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9325 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9326 inst
.instruction
|= SHIFT_BY_REG
;
9327 /* PR 12854: Error on extraneous shifts. */
9328 constraint (inst
.operands
[2].shifted
,
9329 _("extraneous shift as part of operand to shift insn"));
9332 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9338 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9339 inst
.reloc
.pc_rel
= 0;
9345 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9346 inst
.reloc
.pc_rel
= 0;
9352 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9353 inst
.reloc
.pc_rel
= 0;
9359 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9360 _("selected processor does not support SETPAN instruction"));
9362 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9368 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9369 _("selected processor does not support SETPAN instruction"));
9371 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9374 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9375 SMLAxy{cond} Rd,Rm,Rs,Rn
9376 SMLAWy{cond} Rd,Rm,Rs,Rn
9377 Error if any register is R15. */
9382 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9383 inst
.instruction
|= inst
.operands
[1].reg
;
9384 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9385 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9388 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9389 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9390 Error if any register is R15.
9391 Warning if Rdlo == Rdhi. */
9396 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9397 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9398 inst
.instruction
|= inst
.operands
[2].reg
;
9399 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9401 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9402 as_tsktsk (_("rdhi and rdlo must be different"));
9405 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9406 SMULxy{cond} Rd,Rm,Rs
9407 Error if any register is R15. */
9412 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9413 inst
.instruction
|= inst
.operands
[1].reg
;
9414 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9417 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9418 the same for both ARM and Thumb-2. */
9425 if (inst
.operands
[0].present
)
9427 reg
= inst
.operands
[0].reg
;
9428 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9433 inst
.instruction
|= reg
<< 16;
9434 inst
.instruction
|= inst
.operands
[1].imm
;
9435 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9436 inst
.instruction
|= WRITE_BACK
;
9439 /* ARM V6 strex (argument parse). */
9444 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9445 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9446 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9447 || inst
.operands
[2].negative
9448 /* See comment in do_ldrex(). */
9449 || (inst
.operands
[2].reg
== REG_PC
),
9452 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9453 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9455 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9456 || inst
.reloc
.exp
.X_add_number
!= 0,
9457 _("offset must be zero in ARM encoding"));
9459 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9460 inst
.instruction
|= inst
.operands
[1].reg
;
9461 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9462 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9468 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9469 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9470 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9471 || inst
.operands
[2].negative
,
9474 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9475 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9483 constraint (inst
.operands
[1].reg
% 2 != 0,
9484 _("even register required"));
9485 constraint (inst
.operands
[2].present
9486 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9487 _("can only store two consecutive registers"));
9488 /* If op 2 were present and equal to PC, this function wouldn't
9489 have been called in the first place. */
9490 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9492 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9493 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9494 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9497 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9498 inst
.instruction
|= inst
.operands
[1].reg
;
9499 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9506 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9507 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9515 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9516 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9521 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9522 extends it to 32-bits, and adds the result to a value in another
9523 register. You can specify a rotation by 0, 8, 16, or 24 bits
9524 before extracting the 16-bit value.
9525 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9526 Condition defaults to COND_ALWAYS.
9527 Error if any register uses R15. */
9532 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9533 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9534 inst
.instruction
|= inst
.operands
[2].reg
;
9535 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9540 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9541 Condition defaults to COND_ALWAYS.
9542 Error if any register uses R15. */
9547 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9548 inst
.instruction
|= inst
.operands
[1].reg
;
9549 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9552 /* VFP instructions. In a logical order: SP variant first, monad
9553 before dyad, arithmetic then move then load/store. */
9556 do_vfp_sp_monadic (void)
9558 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9559 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9563 do_vfp_sp_dyadic (void)
9565 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9566 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9567 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9571 do_vfp_sp_compare_z (void)
9573 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9577 do_vfp_dp_sp_cvt (void)
9579 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9580 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9584 do_vfp_sp_dp_cvt (void)
9586 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9587 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9591 do_vfp_reg_from_sp (void)
9593 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9594 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9598 do_vfp_reg2_from_sp2 (void)
9600 constraint (inst
.operands
[2].imm
!= 2,
9601 _("only two consecutive VFP SP registers allowed here"));
9602 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9603 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9604 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9608 do_vfp_sp_from_reg (void)
9610 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9611 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9615 do_vfp_sp2_from_reg2 (void)
9617 constraint (inst
.operands
[0].imm
!= 2,
9618 _("only two consecutive VFP SP registers allowed here"));
9619 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9620 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9621 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9625 do_vfp_sp_ldst (void)
9627 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9628 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9632 do_vfp_dp_ldst (void)
9634 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9635 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9640 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9642 if (inst
.operands
[0].writeback
)
9643 inst
.instruction
|= WRITE_BACK
;
9645 constraint (ldstm_type
!= VFP_LDSTMIA
,
9646 _("this addressing mode requires base-register writeback"));
9647 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9648 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9649 inst
.instruction
|= inst
.operands
[1].imm
;
9653 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9657 if (inst
.operands
[0].writeback
)
9658 inst
.instruction
|= WRITE_BACK
;
9660 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9661 _("this addressing mode requires base-register writeback"));
9663 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9664 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9666 count
= inst
.operands
[1].imm
<< 1;
9667 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9670 inst
.instruction
|= count
;
9674 do_vfp_sp_ldstmia (void)
9676 vfp_sp_ldstm (VFP_LDSTMIA
);
9680 do_vfp_sp_ldstmdb (void)
9682 vfp_sp_ldstm (VFP_LDSTMDB
);
9686 do_vfp_dp_ldstmia (void)
9688 vfp_dp_ldstm (VFP_LDSTMIA
);
9692 do_vfp_dp_ldstmdb (void)
9694 vfp_dp_ldstm (VFP_LDSTMDB
);
9698 do_vfp_xp_ldstmia (void)
9700 vfp_dp_ldstm (VFP_LDSTMIAX
);
9704 do_vfp_xp_ldstmdb (void)
9706 vfp_dp_ldstm (VFP_LDSTMDBX
);
9710 do_vfp_dp_rd_rm (void)
9712 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9713 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9717 do_vfp_dp_rn_rd (void)
9719 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9720 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9724 do_vfp_dp_rd_rn (void)
9726 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9727 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9731 do_vfp_dp_rd_rn_rm (void)
9733 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9734 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9735 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9741 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9745 do_vfp_dp_rm_rd_rn (void)
9747 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9748 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9749 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9752 /* VFPv3 instructions. */
9754 do_vfp_sp_const (void)
9756 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9757 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9758 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9762 do_vfp_dp_const (void)
9764 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9765 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9766 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9770 vfp_conv (int srcsize
)
9772 int immbits
= srcsize
- inst
.operands
[1].imm
;
9774 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9776 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9777 i.e. immbits must be in range 0 - 16. */
9778 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9781 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9783 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9784 i.e. immbits must be in range 0 - 31. */
9785 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9789 inst
.instruction
|= (immbits
& 1) << 5;
9790 inst
.instruction
|= (immbits
>> 1);
9794 do_vfp_sp_conv_16 (void)
9796 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9801 do_vfp_dp_conv_16 (void)
9803 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9808 do_vfp_sp_conv_32 (void)
9810 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9815 do_vfp_dp_conv_32 (void)
9817 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9821 /* FPA instructions. Also in a logical order. */
9826 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9827 inst
.instruction
|= inst
.operands
[1].reg
;
9831 do_fpa_ldmstm (void)
9833 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9834 switch (inst
.operands
[1].imm
)
9836 case 1: inst
.instruction
|= CP_T_X
; break;
9837 case 2: inst
.instruction
|= CP_T_Y
; break;
9838 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9843 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9845 /* The instruction specified "ea" or "fd", so we can only accept
9846 [Rn]{!}. The instruction does not really support stacking or
9847 unstacking, so we have to emulate these by setting appropriate
9848 bits and offsets. */
9849 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9850 || inst
.reloc
.exp
.X_add_number
!= 0,
9851 _("this instruction does not support indexing"));
9853 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9854 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9856 if (!(inst
.instruction
& INDEX_UP
))
9857 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9859 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9861 inst
.operands
[2].preind
= 0;
9862 inst
.operands
[2].postind
= 1;
9866 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9869 /* iWMMXt instructions: strictly in alphabetical order. */
9872 do_iwmmxt_tandorc (void)
9874 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
9878 do_iwmmxt_textrc (void)
9880 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9881 inst
.instruction
|= inst
.operands
[1].imm
;
9885 do_iwmmxt_textrm (void)
9887 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9888 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9889 inst
.instruction
|= inst
.operands
[2].imm
;
9893 do_iwmmxt_tinsr (void)
9895 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9896 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9897 inst
.instruction
|= inst
.operands
[2].imm
;
9901 do_iwmmxt_tmia (void)
9903 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
9904 inst
.instruction
|= inst
.operands
[1].reg
;
9905 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9909 do_iwmmxt_waligni (void)
9911 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9912 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9913 inst
.instruction
|= inst
.operands
[2].reg
;
9914 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
9918 do_iwmmxt_wmerge (void)
9920 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9921 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9922 inst
.instruction
|= inst
.operands
[2].reg
;
9923 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
9927 do_iwmmxt_wmov (void)
9929 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9930 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9931 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9932 inst
.instruction
|= inst
.operands
[1].reg
;
9936 do_iwmmxt_wldstbh (void)
9939 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9941 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
9943 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
9944 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
9948 do_iwmmxt_wldstw (void)
9950 /* RIWR_RIWC clears .isreg for a control register. */
9951 if (!inst
.operands
[0].isreg
)
9953 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
9954 inst
.instruction
|= 0xf0000000;
9957 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9958 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
9962 do_iwmmxt_wldstd (void)
9964 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9965 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
9966 && inst
.operands
[1].immisreg
)
9968 inst
.instruction
&= ~0x1a000ff;
9969 inst
.instruction
|= (0xfU
<< 28);
9970 if (inst
.operands
[1].preind
)
9971 inst
.instruction
|= PRE_INDEX
;
9972 if (!inst
.operands
[1].negative
)
9973 inst
.instruction
|= INDEX_UP
;
9974 if (inst
.operands
[1].writeback
)
9975 inst
.instruction
|= WRITE_BACK
;
9976 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9977 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
9978 inst
.instruction
|= inst
.operands
[1].imm
;
9981 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
9985 do_iwmmxt_wshufh (void)
9987 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9988 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9989 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
9990 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
9994 do_iwmmxt_wzero (void)
9996 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9997 inst
.instruction
|= inst
.operands
[0].reg
;
9998 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9999 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10003 do_iwmmxt_wrwrwr_or_imm5 (void)
10005 if (inst
.operands
[2].isreg
)
10008 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10009 _("immediate operand requires iWMMXt2"));
10011 if (inst
.operands
[2].imm
== 0)
10013 switch ((inst
.instruction
>> 20) & 0xf)
10019 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10020 inst
.operands
[2].imm
= 16;
10021 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10027 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10028 inst
.operands
[2].imm
= 32;
10029 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10036 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10038 wrn
= (inst
.instruction
>> 16) & 0xf;
10039 inst
.instruction
&= 0xff0fff0f;
10040 inst
.instruction
|= wrn
;
10041 /* Bail out here; the instruction is now assembled. */
10046 /* Map 32 -> 0, etc. */
10047 inst
.operands
[2].imm
&= 0x1f;
10048 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10052 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10053 operations first, then control, shift, and load/store. */
10055 /* Insns like "foo X,Y,Z". */
10058 do_mav_triple (void)
10060 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10061 inst
.instruction
|= inst
.operands
[1].reg
;
10062 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10065 /* Insns like "foo W,X,Y,Z".
10066 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10071 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10072 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10073 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10074 inst
.instruction
|= inst
.operands
[3].reg
;
10077 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10079 do_mav_dspsc (void)
10081 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10084 /* Maverick shift immediate instructions.
10085 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10086 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10089 do_mav_shift (void)
10091 int imm
= inst
.operands
[2].imm
;
10093 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10094 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10096 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10097 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10098 Bit 4 should be 0. */
10099 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10101 inst
.instruction
|= imm
;
10104 /* XScale instructions. Also sorted arithmetic before move. */
10106 /* Xscale multiply-accumulate (argument parse)
10109 MIAxycc acc0,Rm,Rs. */
10114 inst
.instruction
|= inst
.operands
[1].reg
;
10115 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10118 /* Xscale move-accumulator-register (argument parse)
10120 MARcc acc0,RdLo,RdHi. */
10125 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10126 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10129 /* Xscale move-register-accumulator (argument parse)
10131 MRAcc RdLo,RdHi,acc0. */
10136 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10137 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10138 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10141 /* Encoding functions relevant only to Thumb. */
10143 /* inst.operands[i] is a shifted-register operand; encode
10144 it into inst.instruction in the format used by Thumb32. */
10147 encode_thumb32_shifted_operand (int i
)
10149 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10150 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10152 constraint (inst
.operands
[i
].immisreg
,
10153 _("shift by register not allowed in thumb mode"));
10154 inst
.instruction
|= inst
.operands
[i
].reg
;
10155 if (shift
== SHIFT_RRX
)
10156 inst
.instruction
|= SHIFT_ROR
<< 4;
10159 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10160 _("expression too complex"));
10162 constraint (value
> 32
10163 || (value
== 32 && (shift
== SHIFT_LSL
10164 || shift
== SHIFT_ROR
)),
10165 _("shift expression is too large"));
10169 else if (value
== 32)
10172 inst
.instruction
|= shift
<< 4;
10173 inst
.instruction
|= (value
& 0x1c) << 10;
10174 inst
.instruction
|= (value
& 0x03) << 6;
10179 /* inst.operands[i] was set up by parse_address. Encode it into a
10180 Thumb32 format load or store instruction. Reject forms that cannot
10181 be used with such instructions. If is_t is true, reject forms that
10182 cannot be used with a T instruction; if is_d is true, reject forms
10183 that cannot be used with a D instruction. If it is a store insn,
10184 reject PC in Rn. */
10187 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10189 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10191 constraint (!inst
.operands
[i
].isreg
,
10192 _("Instruction does not support =N addresses"));
10194 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10195 if (inst
.operands
[i
].immisreg
)
10197 constraint (is_pc
, BAD_PC_ADDRESSING
);
10198 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10199 constraint (inst
.operands
[i
].negative
,
10200 _("Thumb does not support negative register indexing"));
10201 constraint (inst
.operands
[i
].postind
,
10202 _("Thumb does not support register post-indexing"));
10203 constraint (inst
.operands
[i
].writeback
,
10204 _("Thumb does not support register indexing with writeback"));
10205 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10206 _("Thumb supports only LSL in shifted register indexing"));
10208 inst
.instruction
|= inst
.operands
[i
].imm
;
10209 if (inst
.operands
[i
].shifted
)
10211 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10212 _("expression too complex"));
10213 constraint (inst
.reloc
.exp
.X_add_number
< 0
10214 || inst
.reloc
.exp
.X_add_number
> 3,
10215 _("shift out of range"));
10216 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10218 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10220 else if (inst
.operands
[i
].preind
)
10222 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10223 constraint (is_t
&& inst
.operands
[i
].writeback
,
10224 _("cannot use writeback with this instruction"));
10225 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10226 BAD_PC_ADDRESSING
);
10230 inst
.instruction
|= 0x01000000;
10231 if (inst
.operands
[i
].writeback
)
10232 inst
.instruction
|= 0x00200000;
10236 inst
.instruction
|= 0x00000c00;
10237 if (inst
.operands
[i
].writeback
)
10238 inst
.instruction
|= 0x00000100;
10240 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10242 else if (inst
.operands
[i
].postind
)
10244 gas_assert (inst
.operands
[i
].writeback
);
10245 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10246 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10249 inst
.instruction
|= 0x00200000;
10251 inst
.instruction
|= 0x00000900;
10252 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10254 else /* unindexed - only for coprocessor */
10255 inst
.error
= _("instruction does not accept unindexed addressing");
10258 /* Table of Thumb instructions which exist in both 16- and 32-bit
10259 encodings (the latter only in post-V6T2 cores). The index is the
10260 value used in the insns table below. When there is more than one
10261 possible 16-bit encoding for the instruction, this table always
10263 Also contains several pseudo-instructions used during relaxation. */
10264 #define T16_32_TAB \
10265 X(_adc, 4140, eb400000), \
10266 X(_adcs, 4140, eb500000), \
10267 X(_add, 1c00, eb000000), \
10268 X(_adds, 1c00, eb100000), \
10269 X(_addi, 0000, f1000000), \
10270 X(_addis, 0000, f1100000), \
10271 X(_add_pc,000f, f20f0000), \
10272 X(_add_sp,000d, f10d0000), \
10273 X(_adr, 000f, f20f0000), \
10274 X(_and, 4000, ea000000), \
10275 X(_ands, 4000, ea100000), \
10276 X(_asr, 1000, fa40f000), \
10277 X(_asrs, 1000, fa50f000), \
10278 X(_b, e000, f000b000), \
10279 X(_bcond, d000, f0008000), \
10280 X(_bic, 4380, ea200000), \
10281 X(_bics, 4380, ea300000), \
10282 X(_cmn, 42c0, eb100f00), \
10283 X(_cmp, 2800, ebb00f00), \
10284 X(_cpsie, b660, f3af8400), \
10285 X(_cpsid, b670, f3af8600), \
10286 X(_cpy, 4600, ea4f0000), \
10287 X(_dec_sp,80dd, f1ad0d00), \
10288 X(_eor, 4040, ea800000), \
10289 X(_eors, 4040, ea900000), \
10290 X(_inc_sp,00dd, f10d0d00), \
10291 X(_ldmia, c800, e8900000), \
10292 X(_ldr, 6800, f8500000), \
10293 X(_ldrb, 7800, f8100000), \
10294 X(_ldrh, 8800, f8300000), \
10295 X(_ldrsb, 5600, f9100000), \
10296 X(_ldrsh, 5e00, f9300000), \
10297 X(_ldr_pc,4800, f85f0000), \
10298 X(_ldr_pc2,4800, f85f0000), \
10299 X(_ldr_sp,9800, f85d0000), \
10300 X(_lsl, 0000, fa00f000), \
10301 X(_lsls, 0000, fa10f000), \
10302 X(_lsr, 0800, fa20f000), \
10303 X(_lsrs, 0800, fa30f000), \
10304 X(_mov, 2000, ea4f0000), \
10305 X(_movs, 2000, ea5f0000), \
10306 X(_mul, 4340, fb00f000), \
10307 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10308 X(_mvn, 43c0, ea6f0000), \
10309 X(_mvns, 43c0, ea7f0000), \
10310 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10311 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10312 X(_orr, 4300, ea400000), \
10313 X(_orrs, 4300, ea500000), \
10314 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10315 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10316 X(_rev, ba00, fa90f080), \
10317 X(_rev16, ba40, fa90f090), \
10318 X(_revsh, bac0, fa90f0b0), \
10319 X(_ror, 41c0, fa60f000), \
10320 X(_rors, 41c0, fa70f000), \
10321 X(_sbc, 4180, eb600000), \
10322 X(_sbcs, 4180, eb700000), \
10323 X(_stmia, c000, e8800000), \
10324 X(_str, 6000, f8400000), \
10325 X(_strb, 7000, f8000000), \
10326 X(_strh, 8000, f8200000), \
10327 X(_str_sp,9000, f84d0000), \
10328 X(_sub, 1e00, eba00000), \
10329 X(_subs, 1e00, ebb00000), \
10330 X(_subi, 8000, f1a00000), \
10331 X(_subis, 8000, f1b00000), \
10332 X(_sxtb, b240, fa4ff080), \
10333 X(_sxth, b200, fa0ff080), \
10334 X(_tst, 4200, ea100f00), \
10335 X(_uxtb, b2c0, fa5ff080), \
10336 X(_uxth, b280, fa1ff080), \
10337 X(_nop, bf00, f3af8000), \
10338 X(_yield, bf10, f3af8001), \
10339 X(_wfe, bf20, f3af8002), \
10340 X(_wfi, bf30, f3af8003), \
10341 X(_sev, bf40, f3af8004), \
10342 X(_sevl, bf50, f3af8005), \
10343 X(_udf, de00, f7f0a000)
10345 /* To catch errors in encoding functions, the codes are all offset by
10346 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10347 as 16-bit instructions. */
10348 #define X(a,b,c) T_MNEM##a
10349 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10352 #define X(a,b,c) 0x##b
10353 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10354 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10357 #define X(a,b,c) 0x##c
10358 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10359 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10360 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10364 /* Thumb instruction encoders, in alphabetical order. */
10366 /* ADDW or SUBW. */
10369 do_t_add_sub_w (void)
10373 Rd
= inst
.operands
[0].reg
;
10374 Rn
= inst
.operands
[1].reg
;
10376 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10377 is the SP-{plus,minus}-immediate form of the instruction. */
10379 constraint (Rd
== REG_PC
, BAD_PC
);
10381 reject_bad_reg (Rd
);
10383 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10384 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10387 /* Parse an add or subtract instruction. We get here with inst.instruction
10388 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10391 do_t_add_sub (void)
10395 Rd
= inst
.operands
[0].reg
;
10396 Rs
= (inst
.operands
[1].present
10397 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10398 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10401 set_it_insn_type_last ();
10403 if (unified_syntax
)
10406 bfd_boolean narrow
;
10409 flags
= (inst
.instruction
== T_MNEM_adds
10410 || inst
.instruction
== T_MNEM_subs
);
10412 narrow
= !in_it_block ();
10414 narrow
= in_it_block ();
10415 if (!inst
.operands
[2].isreg
)
10419 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10421 add
= (inst
.instruction
== T_MNEM_add
10422 || inst
.instruction
== T_MNEM_adds
);
10424 if (inst
.size_req
!= 4)
10426 /* Attempt to use a narrow opcode, with relaxation if
10428 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10429 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10430 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10431 opcode
= T_MNEM_add_sp
;
10432 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10433 opcode
= T_MNEM_add_pc
;
10434 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10437 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10439 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10443 inst
.instruction
= THUMB_OP16(opcode
);
10444 inst
.instruction
|= (Rd
<< 4) | Rs
;
10445 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10446 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10447 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10448 if (inst
.size_req
!= 2)
10449 inst
.relax
= opcode
;
10452 constraint (inst
.size_req
== 2, BAD_HIREG
);
10454 if (inst
.size_req
== 4
10455 || (inst
.size_req
!= 2 && !opcode
))
10459 constraint (add
, BAD_PC
);
10460 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10461 _("only SUBS PC, LR, #const allowed"));
10462 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10463 _("expression too complex"));
10464 constraint (inst
.reloc
.exp
.X_add_number
< 0
10465 || inst
.reloc
.exp
.X_add_number
> 0xff,
10466 _("immediate value out of range"));
10467 inst
.instruction
= T2_SUBS_PC_LR
10468 | inst
.reloc
.exp
.X_add_number
;
10469 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10472 else if (Rs
== REG_PC
)
10474 /* Always use addw/subw. */
10475 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10476 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10480 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10481 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10484 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10486 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10488 inst
.instruction
|= Rd
<< 8;
10489 inst
.instruction
|= Rs
<< 16;
10494 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10495 unsigned int shift
= inst
.operands
[2].shift_kind
;
10497 Rn
= inst
.operands
[2].reg
;
10498 /* See if we can do this with a 16-bit instruction. */
10499 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10501 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10506 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10507 || inst
.instruction
== T_MNEM_add
)
10509 : T_OPCODE_SUB_R3
);
10510 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10514 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10516 /* Thumb-1 cores (except v6-M) require at least one high
10517 register in a narrow non flag setting add. */
10518 if (Rd
> 7 || Rn
> 7
10519 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10520 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10527 inst
.instruction
= T_OPCODE_ADD_HI
;
10528 inst
.instruction
|= (Rd
& 8) << 4;
10529 inst
.instruction
|= (Rd
& 7);
10530 inst
.instruction
|= Rn
<< 3;
10536 constraint (Rd
== REG_PC
, BAD_PC
);
10537 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10538 constraint (Rs
== REG_PC
, BAD_PC
);
10539 reject_bad_reg (Rn
);
10541 /* If we get here, it can't be done in 16 bits. */
10542 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10543 _("shift must be constant"));
10544 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10545 inst
.instruction
|= Rd
<< 8;
10546 inst
.instruction
|= Rs
<< 16;
10547 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10548 _("shift value over 3 not allowed in thumb mode"));
10549 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10550 _("only LSL shift allowed in thumb mode"));
10551 encode_thumb32_shifted_operand (2);
10556 constraint (inst
.instruction
== T_MNEM_adds
10557 || inst
.instruction
== T_MNEM_subs
,
10560 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10562 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10563 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10566 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10567 ? 0x0000 : 0x8000);
10568 inst
.instruction
|= (Rd
<< 4) | Rs
;
10569 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10573 Rn
= inst
.operands
[2].reg
;
10574 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10576 /* We now have Rd, Rs, and Rn set to registers. */
10577 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10579 /* Can't do this for SUB. */
10580 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10581 inst
.instruction
= T_OPCODE_ADD_HI
;
10582 inst
.instruction
|= (Rd
& 8) << 4;
10583 inst
.instruction
|= (Rd
& 7);
10585 inst
.instruction
|= Rn
<< 3;
10587 inst
.instruction
|= Rs
<< 3;
10589 constraint (1, _("dest must overlap one source register"));
10593 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10594 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10595 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10605 Rd
= inst
.operands
[0].reg
;
10606 reject_bad_reg (Rd
);
10608 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10610 /* Defer to section relaxation. */
10611 inst
.relax
= inst
.instruction
;
10612 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10613 inst
.instruction
|= Rd
<< 4;
10615 else if (unified_syntax
&& inst
.size_req
!= 2)
10617 /* Generate a 32-bit opcode. */
10618 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10619 inst
.instruction
|= Rd
<< 8;
10620 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10621 inst
.reloc
.pc_rel
= 1;
10625 /* Generate a 16-bit opcode. */
10626 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10627 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10628 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10629 inst
.reloc
.pc_rel
= 1;
10631 inst
.instruction
|= Rd
<< 4;
10635 /* Arithmetic instructions for which there is just one 16-bit
10636 instruction encoding, and it allows only two low registers.
10637 For maximal compatibility with ARM syntax, we allow three register
10638 operands even when Thumb-32 instructions are not available, as long
10639 as the first two are identical. For instance, both "sbc r0,r1" and
10640 "sbc r0,r0,r1" are allowed. */
10646 Rd
= inst
.operands
[0].reg
;
10647 Rs
= (inst
.operands
[1].present
10648 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10649 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10650 Rn
= inst
.operands
[2].reg
;
10652 reject_bad_reg (Rd
);
10653 reject_bad_reg (Rs
);
10654 if (inst
.operands
[2].isreg
)
10655 reject_bad_reg (Rn
);
10657 if (unified_syntax
)
10659 if (!inst
.operands
[2].isreg
)
10661 /* For an immediate, we always generate a 32-bit opcode;
10662 section relaxation will shrink it later if possible. */
10663 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10664 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10665 inst
.instruction
|= Rd
<< 8;
10666 inst
.instruction
|= Rs
<< 16;
10667 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10671 bfd_boolean narrow
;
10673 /* See if we can do this with a 16-bit instruction. */
10674 if (THUMB_SETS_FLAGS (inst
.instruction
))
10675 narrow
= !in_it_block ();
10677 narrow
= in_it_block ();
10679 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10681 if (inst
.operands
[2].shifted
)
10683 if (inst
.size_req
== 4)
10689 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10690 inst
.instruction
|= Rd
;
10691 inst
.instruction
|= Rn
<< 3;
10695 /* If we get here, it can't be done in 16 bits. */
10696 constraint (inst
.operands
[2].shifted
10697 && inst
.operands
[2].immisreg
,
10698 _("shift must be constant"));
10699 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10700 inst
.instruction
|= Rd
<< 8;
10701 inst
.instruction
|= Rs
<< 16;
10702 encode_thumb32_shifted_operand (2);
10707 /* On its face this is a lie - the instruction does set the
10708 flags. However, the only supported mnemonic in this mode
10709 says it doesn't. */
10710 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10712 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10713 _("unshifted register required"));
10714 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10715 constraint (Rd
!= Rs
,
10716 _("dest and source1 must be the same register"));
10718 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10719 inst
.instruction
|= Rd
;
10720 inst
.instruction
|= Rn
<< 3;
10724 /* Similarly, but for instructions where the arithmetic operation is
10725 commutative, so we can allow either of them to be different from
10726 the destination operand in a 16-bit instruction. For instance, all
10727 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10734 Rd
= inst
.operands
[0].reg
;
10735 Rs
= (inst
.operands
[1].present
10736 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10737 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10738 Rn
= inst
.operands
[2].reg
;
10740 reject_bad_reg (Rd
);
10741 reject_bad_reg (Rs
);
10742 if (inst
.operands
[2].isreg
)
10743 reject_bad_reg (Rn
);
10745 if (unified_syntax
)
10747 if (!inst
.operands
[2].isreg
)
10749 /* For an immediate, we always generate a 32-bit opcode;
10750 section relaxation will shrink it later if possible. */
10751 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10752 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10753 inst
.instruction
|= Rd
<< 8;
10754 inst
.instruction
|= Rs
<< 16;
10755 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10759 bfd_boolean narrow
;
10761 /* See if we can do this with a 16-bit instruction. */
10762 if (THUMB_SETS_FLAGS (inst
.instruction
))
10763 narrow
= !in_it_block ();
10765 narrow
= in_it_block ();
10767 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10769 if (inst
.operands
[2].shifted
)
10771 if (inst
.size_req
== 4)
10778 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10779 inst
.instruction
|= Rd
;
10780 inst
.instruction
|= Rn
<< 3;
10785 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10786 inst
.instruction
|= Rd
;
10787 inst
.instruction
|= Rs
<< 3;
10792 /* If we get here, it can't be done in 16 bits. */
10793 constraint (inst
.operands
[2].shifted
10794 && inst
.operands
[2].immisreg
,
10795 _("shift must be constant"));
10796 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10797 inst
.instruction
|= Rd
<< 8;
10798 inst
.instruction
|= Rs
<< 16;
10799 encode_thumb32_shifted_operand (2);
10804 /* On its face this is a lie - the instruction does set the
10805 flags. However, the only supported mnemonic in this mode
10806 says it doesn't. */
10807 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10809 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10810 _("unshifted register required"));
10811 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10813 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10814 inst
.instruction
|= Rd
;
10817 inst
.instruction
|= Rn
<< 3;
10819 inst
.instruction
|= Rs
<< 3;
10821 constraint (1, _("dest must overlap one source register"));
10829 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10830 constraint (msb
> 32, _("bit-field extends past end of register"));
10831 /* The instruction encoding stores the LSB and MSB,
10832 not the LSB and width. */
10833 Rd
= inst
.operands
[0].reg
;
10834 reject_bad_reg (Rd
);
10835 inst
.instruction
|= Rd
<< 8;
10836 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10837 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10838 inst
.instruction
|= msb
- 1;
10847 Rd
= inst
.operands
[0].reg
;
10848 reject_bad_reg (Rd
);
10850 /* #0 in second position is alternative syntax for bfc, which is
10851 the same instruction but with REG_PC in the Rm field. */
10852 if (!inst
.operands
[1].isreg
)
10856 Rn
= inst
.operands
[1].reg
;
10857 reject_bad_reg (Rn
);
10860 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
10861 constraint (msb
> 32, _("bit-field extends past end of register"));
10862 /* The instruction encoding stores the LSB and MSB,
10863 not the LSB and width. */
10864 inst
.instruction
|= Rd
<< 8;
10865 inst
.instruction
|= Rn
<< 16;
10866 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10867 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10868 inst
.instruction
|= msb
- 1;
10876 Rd
= inst
.operands
[0].reg
;
10877 Rn
= inst
.operands
[1].reg
;
10879 reject_bad_reg (Rd
);
10880 reject_bad_reg (Rn
);
10882 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
10883 _("bit-field extends past end of register"));
10884 inst
.instruction
|= Rd
<< 8;
10885 inst
.instruction
|= Rn
<< 16;
10886 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10887 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10888 inst
.instruction
|= inst
.operands
[3].imm
- 1;
10891 /* ARM V5 Thumb BLX (argument parse)
10892 BLX <target_addr> which is BLX(1)
10893 BLX <Rm> which is BLX(2)
10894 Unfortunately, there are two different opcodes for this mnemonic.
10895 So, the insns[].value is not used, and the code here zaps values
10896 into inst.instruction.
10898 ??? How to take advantage of the additional two bits of displacement
10899 available in Thumb32 mode? Need new relocation? */
10904 set_it_insn_type_last ();
10906 if (inst
.operands
[0].isreg
)
10908 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
10909 /* We have a register, so this is BLX(2). */
10910 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
10914 /* No register. This must be BLX(1). */
10915 inst
.instruction
= 0xf000e800;
10916 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
10928 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
10930 if (in_it_block ())
10932 /* Conditional branches inside IT blocks are encoded as unconditional
10934 cond
= COND_ALWAYS
;
10939 if (cond
!= COND_ALWAYS
)
10940 opcode
= T_MNEM_bcond
;
10942 opcode
= inst
.instruction
;
10945 && (inst
.size_req
== 4
10946 || (inst
.size_req
!= 2
10947 && (inst
.operands
[0].hasreloc
10948 || inst
.reloc
.exp
.X_op
== O_constant
))))
10950 inst
.instruction
= THUMB_OP32(opcode
);
10951 if (cond
== COND_ALWAYS
)
10952 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
10955 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
10956 _("selected architecture does not support "
10957 "wide conditional branch instruction"));
10959 gas_assert (cond
!= 0xF);
10960 inst
.instruction
|= cond
<< 22;
10961 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
10966 inst
.instruction
= THUMB_OP16(opcode
);
10967 if (cond
== COND_ALWAYS
)
10968 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
10971 inst
.instruction
|= cond
<< 8;
10972 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
10974 /* Allow section relaxation. */
10975 if (unified_syntax
&& inst
.size_req
!= 2)
10976 inst
.relax
= opcode
;
10978 inst
.reloc
.type
= reloc
;
10979 inst
.reloc
.pc_rel
= 1;
10982 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10983 between the two is the maximum immediate allowed - which is passed in
10986 do_t_bkpt_hlt1 (int range
)
10988 constraint (inst
.cond
!= COND_ALWAYS
,
10989 _("instruction is always unconditional"));
10990 if (inst
.operands
[0].present
)
10992 constraint (inst
.operands
[0].imm
> range
,
10993 _("immediate value out of range"));
10994 inst
.instruction
|= inst
.operands
[0].imm
;
10997 set_it_insn_type (NEUTRAL_IT_INSN
);
11003 do_t_bkpt_hlt1 (63);
11009 do_t_bkpt_hlt1 (255);
11013 do_t_branch23 (void)
11015 set_it_insn_type_last ();
11016 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11018 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11019 this file. We used to simply ignore the PLT reloc type here --
11020 the branch encoding is now needed to deal with TLSCALL relocs.
11021 So if we see a PLT reloc now, put it back to how it used to be to
11022 keep the preexisting behaviour. */
11023 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11024 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11026 #if defined(OBJ_COFF)
11027 /* If the destination of the branch is a defined symbol which does not have
11028 the THUMB_FUNC attribute, then we must be calling a function which has
11029 the (interfacearm) attribute. We look for the Thumb entry point to that
11030 function and change the branch to refer to that function instead. */
11031 if ( inst
.reloc
.exp
.X_op
== O_symbol
11032 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11033 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11034 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11035 inst
.reloc
.exp
.X_add_symbol
=
11036 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11043 set_it_insn_type_last ();
11044 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11045 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11046 should cause the alignment to be checked once it is known. This is
11047 because BX PC only works if the instruction is word aligned. */
11055 set_it_insn_type_last ();
11056 Rm
= inst
.operands
[0].reg
;
11057 reject_bad_reg (Rm
);
11058 inst
.instruction
|= Rm
<< 16;
11067 Rd
= inst
.operands
[0].reg
;
11068 Rm
= inst
.operands
[1].reg
;
11070 reject_bad_reg (Rd
);
11071 reject_bad_reg (Rm
);
11073 inst
.instruction
|= Rd
<< 8;
11074 inst
.instruction
|= Rm
<< 16;
11075 inst
.instruction
|= Rm
;
11081 set_it_insn_type (OUTSIDE_IT_INSN
);
11082 inst
.instruction
|= inst
.operands
[0].imm
;
11088 set_it_insn_type (OUTSIDE_IT_INSN
);
11090 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11091 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11093 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11094 inst
.instruction
= 0xf3af8000;
11095 inst
.instruction
|= imod
<< 9;
11096 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11097 if (inst
.operands
[1].present
)
11098 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11102 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11103 && (inst
.operands
[0].imm
& 4),
11104 _("selected processor does not support 'A' form "
11105 "of this instruction"));
11106 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11107 _("Thumb does not support the 2-argument "
11108 "form of this instruction"));
11109 inst
.instruction
|= inst
.operands
[0].imm
;
11113 /* THUMB CPY instruction (argument parse). */
11118 if (inst
.size_req
== 4)
11120 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11121 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11122 inst
.instruction
|= inst
.operands
[1].reg
;
11126 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11127 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11128 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11135 set_it_insn_type (OUTSIDE_IT_INSN
);
11136 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11137 inst
.instruction
|= inst
.operands
[0].reg
;
11138 inst
.reloc
.pc_rel
= 1;
11139 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11145 inst
.instruction
|= inst
.operands
[0].imm
;
11151 unsigned Rd
, Rn
, Rm
;
11153 Rd
= inst
.operands
[0].reg
;
11154 Rn
= (inst
.operands
[1].present
11155 ? inst
.operands
[1].reg
: Rd
);
11156 Rm
= inst
.operands
[2].reg
;
11158 reject_bad_reg (Rd
);
11159 reject_bad_reg (Rn
);
11160 reject_bad_reg (Rm
);
11162 inst
.instruction
|= Rd
<< 8;
11163 inst
.instruction
|= Rn
<< 16;
11164 inst
.instruction
|= Rm
;
11170 if (unified_syntax
&& inst
.size_req
== 4)
11171 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11173 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11179 unsigned int cond
= inst
.operands
[0].imm
;
11181 set_it_insn_type (IT_INSN
);
11182 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11184 now_it
.warn_deprecated
= FALSE
;
11186 /* If the condition is a negative condition, invert the mask. */
11187 if ((cond
& 0x1) == 0x0)
11189 unsigned int mask
= inst
.instruction
& 0x000f;
11191 if ((mask
& 0x7) == 0)
11193 /* No conversion needed. */
11194 now_it
.block_length
= 1;
11196 else if ((mask
& 0x3) == 0)
11199 now_it
.block_length
= 2;
11201 else if ((mask
& 0x1) == 0)
11204 now_it
.block_length
= 3;
11209 now_it
.block_length
= 4;
11212 inst
.instruction
&= 0xfff0;
11213 inst
.instruction
|= mask
;
11216 inst
.instruction
|= cond
<< 4;
11219 /* Helper function used for both push/pop and ldm/stm. */
11221 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11225 load
= (inst
.instruction
& (1 << 20)) != 0;
11227 if (mask
& (1 << 13))
11228 inst
.error
= _("SP not allowed in register list");
11230 if ((mask
& (1 << base
)) != 0
11232 inst
.error
= _("having the base register in the register list when "
11233 "using write back is UNPREDICTABLE");
11237 if (mask
& (1 << 15))
11239 if (mask
& (1 << 14))
11240 inst
.error
= _("LR and PC should not both be in register list");
11242 set_it_insn_type_last ();
11247 if (mask
& (1 << 15))
11248 inst
.error
= _("PC not allowed in register list");
11251 if ((mask
& (mask
- 1)) == 0)
11253 /* Single register transfers implemented as str/ldr. */
11256 if (inst
.instruction
& (1 << 23))
11257 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11259 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11263 if (inst
.instruction
& (1 << 23))
11264 inst
.instruction
= 0x00800000; /* ia -> [base] */
11266 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11269 inst
.instruction
|= 0xf8400000;
11271 inst
.instruction
|= 0x00100000;
11273 mask
= ffs (mask
) - 1;
11276 else if (writeback
)
11277 inst
.instruction
|= WRITE_BACK
;
11279 inst
.instruction
|= mask
;
11280 inst
.instruction
|= base
<< 16;
11286 /* This really doesn't seem worth it. */
11287 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11288 _("expression too complex"));
11289 constraint (inst
.operands
[1].writeback
,
11290 _("Thumb load/store multiple does not support {reglist}^"));
11292 if (unified_syntax
)
11294 bfd_boolean narrow
;
11298 /* See if we can use a 16-bit instruction. */
11299 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11300 && inst
.size_req
!= 4
11301 && !(inst
.operands
[1].imm
& ~0xff))
11303 mask
= 1 << inst
.operands
[0].reg
;
11305 if (inst
.operands
[0].reg
<= 7)
11307 if (inst
.instruction
== T_MNEM_stmia
11308 ? inst
.operands
[0].writeback
11309 : (inst
.operands
[0].writeback
11310 == !(inst
.operands
[1].imm
& mask
)))
11312 if (inst
.instruction
== T_MNEM_stmia
11313 && (inst
.operands
[1].imm
& mask
)
11314 && (inst
.operands
[1].imm
& (mask
- 1)))
11315 as_warn (_("value stored for r%d is UNKNOWN"),
11316 inst
.operands
[0].reg
);
11318 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11319 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11320 inst
.instruction
|= inst
.operands
[1].imm
;
11323 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11325 /* This means 1 register in reg list one of 3 situations:
11326 1. Instruction is stmia, but without writeback.
11327 2. lmdia without writeback, but with Rn not in
11329 3. ldmia with writeback, but with Rn in reglist.
11330 Case 3 is UNPREDICTABLE behaviour, so we handle
11331 case 1 and 2 which can be converted into a 16-bit
11332 str or ldr. The SP cases are handled below. */
11333 unsigned long opcode
;
11334 /* First, record an error for Case 3. */
11335 if (inst
.operands
[1].imm
& mask
11336 && inst
.operands
[0].writeback
)
11338 _("having the base register in the register list when "
11339 "using write back is UNPREDICTABLE");
11341 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11343 inst
.instruction
= THUMB_OP16 (opcode
);
11344 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11345 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11349 else if (inst
.operands
[0] .reg
== REG_SP
)
11351 if (inst
.operands
[0].writeback
)
11354 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11355 ? T_MNEM_push
: T_MNEM_pop
);
11356 inst
.instruction
|= inst
.operands
[1].imm
;
11359 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11362 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11363 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11364 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11372 if (inst
.instruction
< 0xffff)
11373 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11375 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11376 inst
.operands
[0].writeback
);
11381 constraint (inst
.operands
[0].reg
> 7
11382 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11383 constraint (inst
.instruction
!= T_MNEM_ldmia
11384 && inst
.instruction
!= T_MNEM_stmia
,
11385 _("Thumb-2 instruction only valid in unified syntax"));
11386 if (inst
.instruction
== T_MNEM_stmia
)
11388 if (!inst
.operands
[0].writeback
)
11389 as_warn (_("this instruction will write back the base register"));
11390 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11391 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11392 as_warn (_("value stored for r%d is UNKNOWN"),
11393 inst
.operands
[0].reg
);
11397 if (!inst
.operands
[0].writeback
11398 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11399 as_warn (_("this instruction will write back the base register"));
11400 else if (inst
.operands
[0].writeback
11401 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11402 as_warn (_("this instruction will not write back the base register"));
11405 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11406 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11407 inst
.instruction
|= inst
.operands
[1].imm
;
11414 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11415 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11416 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11417 || inst
.operands
[1].negative
,
11420 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11422 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11423 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11424 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11430 if (!inst
.operands
[1].present
)
11432 constraint (inst
.operands
[0].reg
== REG_LR
,
11433 _("r14 not allowed as first register "
11434 "when second register is omitted"));
11435 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11437 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11440 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11441 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11442 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11448 unsigned long opcode
;
11451 if (inst
.operands
[0].isreg
11452 && !inst
.operands
[0].preind
11453 && inst
.operands
[0].reg
== REG_PC
)
11454 set_it_insn_type_last ();
11456 opcode
= inst
.instruction
;
11457 if (unified_syntax
)
11459 if (!inst
.operands
[1].isreg
)
11461 if (opcode
<= 0xffff)
11462 inst
.instruction
= THUMB_OP32 (opcode
);
11463 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11466 if (inst
.operands
[1].isreg
11467 && !inst
.operands
[1].writeback
11468 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11469 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11470 && opcode
<= 0xffff
11471 && inst
.size_req
!= 4)
11473 /* Insn may have a 16-bit form. */
11474 Rn
= inst
.operands
[1].reg
;
11475 if (inst
.operands
[1].immisreg
)
11477 inst
.instruction
= THUMB_OP16 (opcode
);
11479 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11481 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11482 reject_bad_reg (inst
.operands
[1].imm
);
11484 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11485 && opcode
!= T_MNEM_ldrsb
)
11486 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11487 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11494 if (inst
.reloc
.pc_rel
)
11495 opcode
= T_MNEM_ldr_pc2
;
11497 opcode
= T_MNEM_ldr_pc
;
11501 if (opcode
== T_MNEM_ldr
)
11502 opcode
= T_MNEM_ldr_sp
;
11504 opcode
= T_MNEM_str_sp
;
11506 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11510 inst
.instruction
= inst
.operands
[0].reg
;
11511 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11513 inst
.instruction
|= THUMB_OP16 (opcode
);
11514 if (inst
.size_req
== 2)
11515 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11517 inst
.relax
= opcode
;
11521 /* Definitely a 32-bit variant. */
11523 /* Warning for Erratum 752419. */
11524 if (opcode
== T_MNEM_ldr
11525 && inst
.operands
[0].reg
== REG_SP
11526 && inst
.operands
[1].writeback
== 1
11527 && !inst
.operands
[1].immisreg
)
11529 if (no_cpu_selected ()
11530 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11531 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11532 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11533 as_warn (_("This instruction may be unpredictable "
11534 "if executed on M-profile cores "
11535 "with interrupts enabled."));
11538 /* Do some validations regarding addressing modes. */
11539 if (inst
.operands
[1].immisreg
)
11540 reject_bad_reg (inst
.operands
[1].imm
);
11542 constraint (inst
.operands
[1].writeback
== 1
11543 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11546 inst
.instruction
= THUMB_OP32 (opcode
);
11547 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11548 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11549 check_ldr_r15_aligned ();
11553 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11555 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11557 /* Only [Rn,Rm] is acceptable. */
11558 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11559 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11560 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11561 || inst
.operands
[1].negative
,
11562 _("Thumb does not support this addressing mode"));
11563 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11567 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11568 if (!inst
.operands
[1].isreg
)
11569 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11572 constraint (!inst
.operands
[1].preind
11573 || inst
.operands
[1].shifted
11574 || inst
.operands
[1].writeback
,
11575 _("Thumb does not support this addressing mode"));
11576 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11578 constraint (inst
.instruction
& 0x0600,
11579 _("byte or halfword not valid for base register"));
11580 constraint (inst
.operands
[1].reg
== REG_PC
11581 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11582 _("r15 based store not allowed"));
11583 constraint (inst
.operands
[1].immisreg
,
11584 _("invalid base register for register offset"));
11586 if (inst
.operands
[1].reg
== REG_PC
)
11587 inst
.instruction
= T_OPCODE_LDR_PC
;
11588 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11589 inst
.instruction
= T_OPCODE_LDR_SP
;
11591 inst
.instruction
= T_OPCODE_STR_SP
;
11593 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11594 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11598 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11599 if (!inst
.operands
[1].immisreg
)
11601 /* Immediate offset. */
11602 inst
.instruction
|= inst
.operands
[0].reg
;
11603 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11604 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11608 /* Register offset. */
11609 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11610 constraint (inst
.operands
[1].negative
,
11611 _("Thumb does not support this addressing mode"));
11614 switch (inst
.instruction
)
11616 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11617 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11618 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11619 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11620 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11621 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11622 case 0x5600 /* ldrsb */:
11623 case 0x5e00 /* ldrsh */: break;
11627 inst
.instruction
|= inst
.operands
[0].reg
;
11628 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11629 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11635 if (!inst
.operands
[1].present
)
11637 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11638 constraint (inst
.operands
[0].reg
== REG_LR
,
11639 _("r14 not allowed here"));
11640 constraint (inst
.operands
[0].reg
== REG_R12
,
11641 _("r12 not allowed here"));
11644 if (inst
.operands
[2].writeback
11645 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11646 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11647 as_warn (_("base register written back, and overlaps "
11648 "one of transfer registers"));
11650 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11651 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11652 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11658 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11659 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11665 unsigned Rd
, Rn
, Rm
, Ra
;
11667 Rd
= inst
.operands
[0].reg
;
11668 Rn
= inst
.operands
[1].reg
;
11669 Rm
= inst
.operands
[2].reg
;
11670 Ra
= inst
.operands
[3].reg
;
11672 reject_bad_reg (Rd
);
11673 reject_bad_reg (Rn
);
11674 reject_bad_reg (Rm
);
11675 reject_bad_reg (Ra
);
11677 inst
.instruction
|= Rd
<< 8;
11678 inst
.instruction
|= Rn
<< 16;
11679 inst
.instruction
|= Rm
;
11680 inst
.instruction
|= Ra
<< 12;
11686 unsigned RdLo
, RdHi
, Rn
, Rm
;
11688 RdLo
= inst
.operands
[0].reg
;
11689 RdHi
= inst
.operands
[1].reg
;
11690 Rn
= inst
.operands
[2].reg
;
11691 Rm
= inst
.operands
[3].reg
;
11693 reject_bad_reg (RdLo
);
11694 reject_bad_reg (RdHi
);
11695 reject_bad_reg (Rn
);
11696 reject_bad_reg (Rm
);
11698 inst
.instruction
|= RdLo
<< 12;
11699 inst
.instruction
|= RdHi
<< 8;
11700 inst
.instruction
|= Rn
<< 16;
11701 inst
.instruction
|= Rm
;
11705 do_t_mov_cmp (void)
11709 Rn
= inst
.operands
[0].reg
;
11710 Rm
= inst
.operands
[1].reg
;
11713 set_it_insn_type_last ();
11715 if (unified_syntax
)
11717 int r0off
= (inst
.instruction
== T_MNEM_mov
11718 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11719 unsigned long opcode
;
11720 bfd_boolean narrow
;
11721 bfd_boolean low_regs
;
11723 low_regs
= (Rn
<= 7 && Rm
<= 7);
11724 opcode
= inst
.instruction
;
11725 if (in_it_block ())
11726 narrow
= opcode
!= T_MNEM_movs
;
11728 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11729 if (inst
.size_req
== 4
11730 || inst
.operands
[1].shifted
)
11733 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11734 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11735 && !inst
.operands
[1].shifted
11739 inst
.instruction
= T2_SUBS_PC_LR
;
11743 if (opcode
== T_MNEM_cmp
)
11745 constraint (Rn
== REG_PC
, BAD_PC
);
11748 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11750 warn_deprecated_sp (Rm
);
11751 /* R15 was documented as a valid choice for Rm in ARMv6,
11752 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11753 tools reject R15, so we do too. */
11754 constraint (Rm
== REG_PC
, BAD_PC
);
11757 reject_bad_reg (Rm
);
11759 else if (opcode
== T_MNEM_mov
11760 || opcode
== T_MNEM_movs
)
11762 if (inst
.operands
[1].isreg
)
11764 if (opcode
== T_MNEM_movs
)
11766 reject_bad_reg (Rn
);
11767 reject_bad_reg (Rm
);
11771 /* This is mov.n. */
11772 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11773 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11775 as_tsktsk (_("Use of r%u as a source register is "
11776 "deprecated when r%u is the destination "
11777 "register."), Rm
, Rn
);
11782 /* This is mov.w. */
11783 constraint (Rn
== REG_PC
, BAD_PC
);
11784 constraint (Rm
== REG_PC
, BAD_PC
);
11785 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11789 reject_bad_reg (Rn
);
11792 if (!inst
.operands
[1].isreg
)
11794 /* Immediate operand. */
11795 if (!in_it_block () && opcode
== T_MNEM_mov
)
11797 if (low_regs
&& narrow
)
11799 inst
.instruction
= THUMB_OP16 (opcode
);
11800 inst
.instruction
|= Rn
<< 8;
11801 if (inst
.size_req
== 2)
11803 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11804 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11805 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11808 inst
.relax
= opcode
;
11812 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11813 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11814 inst
.instruction
|= Rn
<< r0off
;
11815 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11818 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11819 && (inst
.instruction
== T_MNEM_mov
11820 || inst
.instruction
== T_MNEM_movs
))
11822 /* Register shifts are encoded as separate shift instructions. */
11823 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11825 if (in_it_block ())
11830 if (inst
.size_req
== 4)
11833 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11839 switch (inst
.operands
[1].shift_kind
)
11842 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11845 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11848 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11851 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
11857 inst
.instruction
= opcode
;
11860 inst
.instruction
|= Rn
;
11861 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
11866 inst
.instruction
|= CONDS_BIT
;
11868 inst
.instruction
|= Rn
<< 8;
11869 inst
.instruction
|= Rm
<< 16;
11870 inst
.instruction
|= inst
.operands
[1].imm
;
11875 /* Some mov with immediate shift have narrow variants.
11876 Register shifts are handled above. */
11877 if (low_regs
&& inst
.operands
[1].shifted
11878 && (inst
.instruction
== T_MNEM_mov
11879 || inst
.instruction
== T_MNEM_movs
))
11881 if (in_it_block ())
11882 narrow
= (inst
.instruction
== T_MNEM_mov
);
11884 narrow
= (inst
.instruction
== T_MNEM_movs
);
11889 switch (inst
.operands
[1].shift_kind
)
11891 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11892 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11893 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11894 default: narrow
= FALSE
; break;
11900 inst
.instruction
|= Rn
;
11901 inst
.instruction
|= Rm
<< 3;
11902 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11906 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11907 inst
.instruction
|= Rn
<< r0off
;
11908 encode_thumb32_shifted_operand (1);
11912 switch (inst
.instruction
)
11915 /* In v4t or v5t a move of two lowregs produces unpredictable
11916 results. Don't allow this. */
11919 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
11920 "MOV Rd, Rs with two low registers is not "
11921 "permitted on this architecture");
11922 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
11926 inst
.instruction
= T_OPCODE_MOV_HR
;
11927 inst
.instruction
|= (Rn
& 0x8) << 4;
11928 inst
.instruction
|= (Rn
& 0x7);
11929 inst
.instruction
|= Rm
<< 3;
11933 /* We know we have low registers at this point.
11934 Generate LSLS Rd, Rs, #0. */
11935 inst
.instruction
= T_OPCODE_LSL_I
;
11936 inst
.instruction
|= Rn
;
11937 inst
.instruction
|= Rm
<< 3;
11943 inst
.instruction
= T_OPCODE_CMP_LR
;
11944 inst
.instruction
|= Rn
;
11945 inst
.instruction
|= Rm
<< 3;
11949 inst
.instruction
= T_OPCODE_CMP_HR
;
11950 inst
.instruction
|= (Rn
& 0x8) << 4;
11951 inst
.instruction
|= (Rn
& 0x7);
11952 inst
.instruction
|= Rm
<< 3;
11959 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11961 /* PR 10443: Do not silently ignore shifted operands. */
11962 constraint (inst
.operands
[1].shifted
,
11963 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11965 if (inst
.operands
[1].isreg
)
11967 if (Rn
< 8 && Rm
< 8)
11969 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11970 since a MOV instruction produces unpredictable results. */
11971 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11972 inst
.instruction
= T_OPCODE_ADD_I3
;
11974 inst
.instruction
= T_OPCODE_CMP_LR
;
11976 inst
.instruction
|= Rn
;
11977 inst
.instruction
|= Rm
<< 3;
11981 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11982 inst
.instruction
= T_OPCODE_MOV_HR
;
11984 inst
.instruction
= T_OPCODE_CMP_HR
;
11990 constraint (Rn
> 7,
11991 _("only lo regs allowed with immediate"));
11992 inst
.instruction
|= Rn
<< 8;
11993 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
12004 top
= (inst
.instruction
& 0x00800000) != 0;
12005 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
12007 constraint (top
, _(":lower16: not allowed this instruction"));
12008 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12010 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12012 constraint (!top
, _(":upper16: not allowed this instruction"));
12013 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12016 Rd
= inst
.operands
[0].reg
;
12017 reject_bad_reg (Rd
);
12019 inst
.instruction
|= Rd
<< 8;
12020 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12022 imm
= inst
.reloc
.exp
.X_add_number
;
12023 inst
.instruction
|= (imm
& 0xf000) << 4;
12024 inst
.instruction
|= (imm
& 0x0800) << 15;
12025 inst
.instruction
|= (imm
& 0x0700) << 4;
12026 inst
.instruction
|= (imm
& 0x00ff);
12031 do_t_mvn_tst (void)
12035 Rn
= inst
.operands
[0].reg
;
12036 Rm
= inst
.operands
[1].reg
;
12038 if (inst
.instruction
== T_MNEM_cmp
12039 || inst
.instruction
== T_MNEM_cmn
)
12040 constraint (Rn
== REG_PC
, BAD_PC
);
12042 reject_bad_reg (Rn
);
12043 reject_bad_reg (Rm
);
12045 if (unified_syntax
)
12047 int r0off
= (inst
.instruction
== T_MNEM_mvn
12048 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12049 bfd_boolean narrow
;
12051 if (inst
.size_req
== 4
12052 || inst
.instruction
> 0xffff
12053 || inst
.operands
[1].shifted
12054 || Rn
> 7 || Rm
> 7)
12056 else if (inst
.instruction
== T_MNEM_cmn
12057 || inst
.instruction
== T_MNEM_tst
)
12059 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12060 narrow
= !in_it_block ();
12062 narrow
= in_it_block ();
12064 if (!inst
.operands
[1].isreg
)
12066 /* For an immediate, we always generate a 32-bit opcode;
12067 section relaxation will shrink it later if possible. */
12068 if (inst
.instruction
< 0xffff)
12069 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12070 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12071 inst
.instruction
|= Rn
<< r0off
;
12072 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12076 /* See if we can do this with a 16-bit instruction. */
12079 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12080 inst
.instruction
|= Rn
;
12081 inst
.instruction
|= Rm
<< 3;
12085 constraint (inst
.operands
[1].shifted
12086 && inst
.operands
[1].immisreg
,
12087 _("shift must be constant"));
12088 if (inst
.instruction
< 0xffff)
12089 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12090 inst
.instruction
|= Rn
<< r0off
;
12091 encode_thumb32_shifted_operand (1);
12097 constraint (inst
.instruction
> 0xffff
12098 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12099 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12100 _("unshifted register required"));
12101 constraint (Rn
> 7 || Rm
> 7,
12104 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12105 inst
.instruction
|= Rn
;
12106 inst
.instruction
|= Rm
<< 3;
12115 if (do_vfp_nsyn_mrs () == SUCCESS
)
12118 Rd
= inst
.operands
[0].reg
;
12119 reject_bad_reg (Rd
);
12120 inst
.instruction
|= Rd
<< 8;
12122 if (inst
.operands
[1].isreg
)
12124 unsigned br
= inst
.operands
[1].reg
;
12125 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12126 as_bad (_("bad register for mrs"));
12128 inst
.instruction
|= br
& (0xf << 16);
12129 inst
.instruction
|= (br
& 0x300) >> 4;
12130 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12134 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12136 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12138 /* PR gas/12698: The constraint is only applied for m_profile.
12139 If the user has specified -march=all, we want to ignore it as
12140 we are building for any CPU type, including non-m variants. */
12141 bfd_boolean m_profile
=
12142 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12143 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12144 "not support requested special purpose register"));
12147 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12149 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12150 _("'APSR', 'CPSR' or 'SPSR' expected"));
12152 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12153 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12154 inst
.instruction
|= 0xf0000;
12164 if (do_vfp_nsyn_msr () == SUCCESS
)
12167 constraint (!inst
.operands
[1].isreg
,
12168 _("Thumb encoding does not support an immediate here"));
12170 if (inst
.operands
[0].isreg
)
12171 flags
= (int)(inst
.operands
[0].reg
);
12173 flags
= inst
.operands
[0].imm
;
12175 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12177 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12179 /* PR gas/12698: The constraint is only applied for m_profile.
12180 If the user has specified -march=all, we want to ignore it as
12181 we are building for any CPU type, including non-m variants. */
12182 bfd_boolean m_profile
=
12183 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12184 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12185 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12186 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12187 && bits
!= PSR_f
)) && m_profile
,
12188 _("selected processor does not support requested special "
12189 "purpose register"));
12192 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12193 "requested special purpose register"));
12195 Rn
= inst
.operands
[1].reg
;
12196 reject_bad_reg (Rn
);
12198 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12199 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12200 inst
.instruction
|= (flags
& 0x300) >> 4;
12201 inst
.instruction
|= (flags
& 0xff);
12202 inst
.instruction
|= Rn
<< 16;
12208 bfd_boolean narrow
;
12209 unsigned Rd
, Rn
, Rm
;
12211 if (!inst
.operands
[2].present
)
12212 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12214 Rd
= inst
.operands
[0].reg
;
12215 Rn
= inst
.operands
[1].reg
;
12216 Rm
= inst
.operands
[2].reg
;
12218 if (unified_syntax
)
12220 if (inst
.size_req
== 4
12226 else if (inst
.instruction
== T_MNEM_muls
)
12227 narrow
= !in_it_block ();
12229 narrow
= in_it_block ();
12233 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12234 constraint (Rn
> 7 || Rm
> 7,
12241 /* 16-bit MULS/Conditional MUL. */
12242 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12243 inst
.instruction
|= Rd
;
12246 inst
.instruction
|= Rm
<< 3;
12248 inst
.instruction
|= Rn
<< 3;
12250 constraint (1, _("dest must overlap one source register"));
12254 constraint (inst
.instruction
!= T_MNEM_mul
,
12255 _("Thumb-2 MUL must not set flags"));
12257 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12258 inst
.instruction
|= Rd
<< 8;
12259 inst
.instruction
|= Rn
<< 16;
12260 inst
.instruction
|= Rm
<< 0;
12262 reject_bad_reg (Rd
);
12263 reject_bad_reg (Rn
);
12264 reject_bad_reg (Rm
);
12271 unsigned RdLo
, RdHi
, Rn
, Rm
;
12273 RdLo
= inst
.operands
[0].reg
;
12274 RdHi
= inst
.operands
[1].reg
;
12275 Rn
= inst
.operands
[2].reg
;
12276 Rm
= inst
.operands
[3].reg
;
12278 reject_bad_reg (RdLo
);
12279 reject_bad_reg (RdHi
);
12280 reject_bad_reg (Rn
);
12281 reject_bad_reg (Rm
);
12283 inst
.instruction
|= RdLo
<< 12;
12284 inst
.instruction
|= RdHi
<< 8;
12285 inst
.instruction
|= Rn
<< 16;
12286 inst
.instruction
|= Rm
;
12289 as_tsktsk (_("rdhi and rdlo must be different"));
12295 set_it_insn_type (NEUTRAL_IT_INSN
);
12297 if (unified_syntax
)
12299 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12301 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12302 inst
.instruction
|= inst
.operands
[0].imm
;
12306 /* PR9722: Check for Thumb2 availability before
12307 generating a thumb2 nop instruction. */
12308 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12310 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12311 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12314 inst
.instruction
= 0x46c0;
12319 constraint (inst
.operands
[0].present
,
12320 _("Thumb does not support NOP with hints"));
12321 inst
.instruction
= 0x46c0;
12328 if (unified_syntax
)
12330 bfd_boolean narrow
;
12332 if (THUMB_SETS_FLAGS (inst
.instruction
))
12333 narrow
= !in_it_block ();
12335 narrow
= in_it_block ();
12336 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12338 if (inst
.size_req
== 4)
12343 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12344 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12345 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12349 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12350 inst
.instruction
|= inst
.operands
[0].reg
;
12351 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12356 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12358 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12360 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12361 inst
.instruction
|= inst
.operands
[0].reg
;
12362 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12371 Rd
= inst
.operands
[0].reg
;
12372 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12374 reject_bad_reg (Rd
);
12375 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12376 reject_bad_reg (Rn
);
12378 inst
.instruction
|= Rd
<< 8;
12379 inst
.instruction
|= Rn
<< 16;
12381 if (!inst
.operands
[2].isreg
)
12383 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12384 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12390 Rm
= inst
.operands
[2].reg
;
12391 reject_bad_reg (Rm
);
12393 constraint (inst
.operands
[2].shifted
12394 && inst
.operands
[2].immisreg
,
12395 _("shift must be constant"));
12396 encode_thumb32_shifted_operand (2);
12403 unsigned Rd
, Rn
, Rm
;
12405 Rd
= inst
.operands
[0].reg
;
12406 Rn
= inst
.operands
[1].reg
;
12407 Rm
= inst
.operands
[2].reg
;
12409 reject_bad_reg (Rd
);
12410 reject_bad_reg (Rn
);
12411 reject_bad_reg (Rm
);
12413 inst
.instruction
|= Rd
<< 8;
12414 inst
.instruction
|= Rn
<< 16;
12415 inst
.instruction
|= Rm
;
12416 if (inst
.operands
[3].present
)
12418 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12419 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12420 _("expression too complex"));
12421 inst
.instruction
|= (val
& 0x1c) << 10;
12422 inst
.instruction
|= (val
& 0x03) << 6;
12429 if (!inst
.operands
[3].present
)
12433 inst
.instruction
&= ~0x00000020;
12435 /* PR 10168. Swap the Rm and Rn registers. */
12436 Rtmp
= inst
.operands
[1].reg
;
12437 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12438 inst
.operands
[2].reg
= Rtmp
;
12446 if (inst
.operands
[0].immisreg
)
12447 reject_bad_reg (inst
.operands
[0].imm
);
12449 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12453 do_t_push_pop (void)
12457 constraint (inst
.operands
[0].writeback
,
12458 _("push/pop do not support {reglist}^"));
12459 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12460 _("expression too complex"));
12462 mask
= inst
.operands
[0].imm
;
12463 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12464 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12465 else if (inst
.size_req
!= 4
12466 && (mask
& ~0xff) == (1 << (inst
.instruction
== T_MNEM_push
12467 ? REG_LR
: REG_PC
)))
12469 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12470 inst
.instruction
|= THUMB_PP_PC_LR
;
12471 inst
.instruction
|= mask
& 0xff;
12473 else if (unified_syntax
)
12475 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12476 encode_thumb2_ldmstm (13, mask
, TRUE
);
12480 inst
.error
= _("invalid register list to push/pop instruction");
12490 Rd
= inst
.operands
[0].reg
;
12491 Rm
= inst
.operands
[1].reg
;
12493 reject_bad_reg (Rd
);
12494 reject_bad_reg (Rm
);
12496 inst
.instruction
|= Rd
<< 8;
12497 inst
.instruction
|= Rm
<< 16;
12498 inst
.instruction
|= Rm
;
12506 Rd
= inst
.operands
[0].reg
;
12507 Rm
= inst
.operands
[1].reg
;
12509 reject_bad_reg (Rd
);
12510 reject_bad_reg (Rm
);
12512 if (Rd
<= 7 && Rm
<= 7
12513 && inst
.size_req
!= 4)
12515 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12516 inst
.instruction
|= Rd
;
12517 inst
.instruction
|= Rm
<< 3;
12519 else if (unified_syntax
)
12521 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12522 inst
.instruction
|= Rd
<< 8;
12523 inst
.instruction
|= Rm
<< 16;
12524 inst
.instruction
|= Rm
;
12527 inst
.error
= BAD_HIREG
;
12535 Rd
= inst
.operands
[0].reg
;
12536 Rm
= inst
.operands
[1].reg
;
12538 reject_bad_reg (Rd
);
12539 reject_bad_reg (Rm
);
12541 inst
.instruction
|= Rd
<< 8;
12542 inst
.instruction
|= Rm
;
12550 Rd
= inst
.operands
[0].reg
;
12551 Rs
= (inst
.operands
[1].present
12552 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12553 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12555 reject_bad_reg (Rd
);
12556 reject_bad_reg (Rs
);
12557 if (inst
.operands
[2].isreg
)
12558 reject_bad_reg (inst
.operands
[2].reg
);
12560 inst
.instruction
|= Rd
<< 8;
12561 inst
.instruction
|= Rs
<< 16;
12562 if (!inst
.operands
[2].isreg
)
12564 bfd_boolean narrow
;
12566 if ((inst
.instruction
& 0x00100000) != 0)
12567 narrow
= !in_it_block ();
12569 narrow
= in_it_block ();
12571 if (Rd
> 7 || Rs
> 7)
12574 if (inst
.size_req
== 4 || !unified_syntax
)
12577 if (inst
.reloc
.exp
.X_op
!= O_constant
12578 || inst
.reloc
.exp
.X_add_number
!= 0)
12581 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12582 relaxation, but it doesn't seem worth the hassle. */
12585 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12586 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12587 inst
.instruction
|= Rs
<< 3;
12588 inst
.instruction
|= Rd
;
12592 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12593 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12597 encode_thumb32_shifted_operand (2);
12603 if (warn_on_deprecated
12604 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12605 as_tsktsk (_("setend use is deprecated for ARMv8"));
12607 set_it_insn_type (OUTSIDE_IT_INSN
);
12608 if (inst
.operands
[0].imm
)
12609 inst
.instruction
|= 0x8;
12615 if (!inst
.operands
[1].present
)
12616 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12618 if (unified_syntax
)
12620 bfd_boolean narrow
;
12623 switch (inst
.instruction
)
12626 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12628 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12630 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12632 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12636 if (THUMB_SETS_FLAGS (inst
.instruction
))
12637 narrow
= !in_it_block ();
12639 narrow
= in_it_block ();
12640 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12642 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12644 if (inst
.operands
[2].isreg
12645 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12646 || inst
.operands
[2].reg
> 7))
12648 if (inst
.size_req
== 4)
12651 reject_bad_reg (inst
.operands
[0].reg
);
12652 reject_bad_reg (inst
.operands
[1].reg
);
12656 if (inst
.operands
[2].isreg
)
12658 reject_bad_reg (inst
.operands
[2].reg
);
12659 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12660 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12661 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12662 inst
.instruction
|= inst
.operands
[2].reg
;
12664 /* PR 12854: Error on extraneous shifts. */
12665 constraint (inst
.operands
[2].shifted
,
12666 _("extraneous shift as part of operand to shift insn"));
12670 inst
.operands
[1].shifted
= 1;
12671 inst
.operands
[1].shift_kind
= shift_kind
;
12672 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12673 ? T_MNEM_movs
: T_MNEM_mov
);
12674 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12675 encode_thumb32_shifted_operand (1);
12676 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12677 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12682 if (inst
.operands
[2].isreg
)
12684 switch (shift_kind
)
12686 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12687 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12688 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12689 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12693 inst
.instruction
|= inst
.operands
[0].reg
;
12694 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12696 /* PR 12854: Error on extraneous shifts. */
12697 constraint (inst
.operands
[2].shifted
,
12698 _("extraneous shift as part of operand to shift insn"));
12702 switch (shift_kind
)
12704 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12705 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12706 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12709 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12710 inst
.instruction
|= inst
.operands
[0].reg
;
12711 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12717 constraint (inst
.operands
[0].reg
> 7
12718 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12719 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12721 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12723 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12724 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12725 _("source1 and dest must be same register"));
12727 switch (inst
.instruction
)
12729 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12730 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12731 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12732 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12736 inst
.instruction
|= inst
.operands
[0].reg
;
12737 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12739 /* PR 12854: Error on extraneous shifts. */
12740 constraint (inst
.operands
[2].shifted
,
12741 _("extraneous shift as part of operand to shift insn"));
12745 switch (inst
.instruction
)
12747 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12748 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12749 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12750 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12753 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12754 inst
.instruction
|= inst
.operands
[0].reg
;
12755 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12763 unsigned Rd
, Rn
, Rm
;
12765 Rd
= inst
.operands
[0].reg
;
12766 Rn
= inst
.operands
[1].reg
;
12767 Rm
= inst
.operands
[2].reg
;
12769 reject_bad_reg (Rd
);
12770 reject_bad_reg (Rn
);
12771 reject_bad_reg (Rm
);
12773 inst
.instruction
|= Rd
<< 8;
12774 inst
.instruction
|= Rn
<< 16;
12775 inst
.instruction
|= Rm
;
12781 unsigned Rd
, Rn
, Rm
;
12783 Rd
= inst
.operands
[0].reg
;
12784 Rm
= inst
.operands
[1].reg
;
12785 Rn
= inst
.operands
[2].reg
;
12787 reject_bad_reg (Rd
);
12788 reject_bad_reg (Rn
);
12789 reject_bad_reg (Rm
);
12791 inst
.instruction
|= Rd
<< 8;
12792 inst
.instruction
|= Rn
<< 16;
12793 inst
.instruction
|= Rm
;
12799 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12800 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12801 _("SMC is not permitted on this architecture"));
12802 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12803 _("expression too complex"));
12804 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12805 inst
.instruction
|= (value
& 0xf000) >> 12;
12806 inst
.instruction
|= (value
& 0x0ff0);
12807 inst
.instruction
|= (value
& 0x000f) << 16;
12808 /* PR gas/15623: SMC instructions must be last in an IT block. */
12809 set_it_insn_type_last ();
12815 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12817 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12818 inst
.instruction
|= (value
& 0x0fff);
12819 inst
.instruction
|= (value
& 0xf000) << 4;
12823 do_t_ssat_usat (int bias
)
12827 Rd
= inst
.operands
[0].reg
;
12828 Rn
= inst
.operands
[2].reg
;
12830 reject_bad_reg (Rd
);
12831 reject_bad_reg (Rn
);
12833 inst
.instruction
|= Rd
<< 8;
12834 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12835 inst
.instruction
|= Rn
<< 16;
12837 if (inst
.operands
[3].present
)
12839 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12841 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12843 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12844 _("expression too complex"));
12846 if (shift_amount
!= 0)
12848 constraint (shift_amount
> 31,
12849 _("shift expression is too large"));
12851 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
12852 inst
.instruction
|= 0x00200000; /* sh bit. */
12854 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
12855 inst
.instruction
|= (shift_amount
& 0x03) << 6;
12863 do_t_ssat_usat (1);
12871 Rd
= inst
.operands
[0].reg
;
12872 Rn
= inst
.operands
[2].reg
;
12874 reject_bad_reg (Rd
);
12875 reject_bad_reg (Rn
);
12877 inst
.instruction
|= Rd
<< 8;
12878 inst
.instruction
|= inst
.operands
[1].imm
- 1;
12879 inst
.instruction
|= Rn
<< 16;
12885 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
12886 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
12887 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
12888 || inst
.operands
[2].negative
,
12891 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
12893 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12894 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12895 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12896 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12902 if (!inst
.operands
[2].present
)
12903 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
12905 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
12906 || inst
.operands
[0].reg
== inst
.operands
[2].reg
12907 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
12910 inst
.instruction
|= inst
.operands
[0].reg
;
12911 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12912 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
12913 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
12919 unsigned Rd
, Rn
, Rm
;
12921 Rd
= inst
.operands
[0].reg
;
12922 Rn
= inst
.operands
[1].reg
;
12923 Rm
= inst
.operands
[2].reg
;
12925 reject_bad_reg (Rd
);
12926 reject_bad_reg (Rn
);
12927 reject_bad_reg (Rm
);
12929 inst
.instruction
|= Rd
<< 8;
12930 inst
.instruction
|= Rn
<< 16;
12931 inst
.instruction
|= Rm
;
12932 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
12940 Rd
= inst
.operands
[0].reg
;
12941 Rm
= inst
.operands
[1].reg
;
12943 reject_bad_reg (Rd
);
12944 reject_bad_reg (Rm
);
12946 if (inst
.instruction
<= 0xffff
12947 && inst
.size_req
!= 4
12948 && Rd
<= 7 && Rm
<= 7
12949 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
12951 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12952 inst
.instruction
|= Rd
;
12953 inst
.instruction
|= Rm
<< 3;
12955 else if (unified_syntax
)
12957 if (inst
.instruction
<= 0xffff)
12958 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12959 inst
.instruction
|= Rd
<< 8;
12960 inst
.instruction
|= Rm
;
12961 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
12965 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
12966 _("Thumb encoding does not support rotation"));
12967 constraint (1, BAD_HIREG
);
12974 /* We have to do the following check manually as ARM_EXT_OS only applies
12976 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
12978 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
12979 /* This only applies to the v6m howver, not later architectures. */
12980 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
12981 as_bad (_("SVC is not permitted on this architecture"));
12982 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
12985 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
12994 half
= (inst
.instruction
& 0x10) != 0;
12995 set_it_insn_type_last ();
12996 constraint (inst
.operands
[0].immisreg
,
12997 _("instruction requires register index"));
12999 Rn
= inst
.operands
[0].reg
;
13000 Rm
= inst
.operands
[0].imm
;
13002 constraint (Rn
== REG_SP
, BAD_SP
);
13003 reject_bad_reg (Rm
);
13005 constraint (!half
&& inst
.operands
[0].shifted
,
13006 _("instruction does not allow shifted index"));
13007 inst
.instruction
|= (Rn
<< 16) | Rm
;
13013 if (!inst
.operands
[0].present
)
13014 inst
.operands
[0].imm
= 0;
13016 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13018 constraint (inst
.size_req
== 2,
13019 _("immediate value out of range"));
13020 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13021 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13022 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13026 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13027 inst
.instruction
|= inst
.operands
[0].imm
;
13030 set_it_insn_type (NEUTRAL_IT_INSN
);
13037 do_t_ssat_usat (0);
13045 Rd
= inst
.operands
[0].reg
;
13046 Rn
= inst
.operands
[2].reg
;
13048 reject_bad_reg (Rd
);
13049 reject_bad_reg (Rn
);
13051 inst
.instruction
|= Rd
<< 8;
13052 inst
.instruction
|= inst
.operands
[1].imm
;
13053 inst
.instruction
|= Rn
<< 16;
13056 /* Neon instruction encoder helpers. */
13058 /* Encodings for the different types for various Neon opcodes. */
13060 /* An "invalid" code for the following tables. */
13063 struct neon_tab_entry
13066 unsigned float_or_poly
;
13067 unsigned scalar_or_imm
;
13070 /* Map overloaded Neon opcodes to their respective encodings. */
13071 #define NEON_ENC_TAB \
13072 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13073 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13074 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13075 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13076 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13077 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13078 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13079 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13080 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13081 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13082 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13083 /* Register variants of the following two instructions are encoded as
13084 vcge / vcgt with the operands reversed. */ \
13085 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13086 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13087 X(vfma, N_INV, 0x0000c10, N_INV), \
13088 X(vfms, N_INV, 0x0200c10, N_INV), \
13089 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13090 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13091 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13092 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13093 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13094 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13095 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13096 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13097 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13098 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13099 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13100 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13101 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13102 X(vshl, 0x0000400, N_INV, 0x0800510), \
13103 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13104 X(vand, 0x0000110, N_INV, 0x0800030), \
13105 X(vbic, 0x0100110, N_INV, 0x0800030), \
13106 X(veor, 0x1000110, N_INV, N_INV), \
13107 X(vorn, 0x0300110, N_INV, 0x0800010), \
13108 X(vorr, 0x0200110, N_INV, 0x0800010), \
13109 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13110 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13111 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13112 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13113 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13114 X(vst1, 0x0000000, 0x0800000, N_INV), \
13115 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13116 X(vst2, 0x0000100, 0x0800100, N_INV), \
13117 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13118 X(vst3, 0x0000200, 0x0800200, N_INV), \
13119 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13120 X(vst4, 0x0000300, 0x0800300, N_INV), \
13121 X(vmovn, 0x1b20200, N_INV, N_INV), \
13122 X(vtrn, 0x1b20080, N_INV, N_INV), \
13123 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13124 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13125 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13126 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13127 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13128 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13129 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13130 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13131 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13132 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13133 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13134 X(vseleq, 0xe000a00, N_INV, N_INV), \
13135 X(vselvs, 0xe100a00, N_INV, N_INV), \
13136 X(vselge, 0xe200a00, N_INV, N_INV), \
13137 X(vselgt, 0xe300a00, N_INV, N_INV), \
13138 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13139 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13140 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13141 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13142 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13143 X(aes, 0x3b00300, N_INV, N_INV), \
13144 X(sha3op, 0x2000c00, N_INV, N_INV), \
13145 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13146 X(sha2op, 0x3ba0380, N_INV, N_INV)
13150 #define X(OPC,I,F,S) N_MNEM_##OPC
13155 static const struct neon_tab_entry neon_enc_tab
[] =
13157 #define X(OPC,I,F,S) { (I), (F), (S) }
13162 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13163 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13164 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13165 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13166 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13167 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13168 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13169 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13170 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13171 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13172 #define NEON_ENC_SINGLE_(X) \
13173 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13174 #define NEON_ENC_DOUBLE_(X) \
13175 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13176 #define NEON_ENC_FPV8_(X) \
13177 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13179 #define NEON_ENCODE(type, inst) \
13182 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13183 inst.is_neon = 1; \
13187 #define check_neon_suffixes \
13190 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13192 as_bad (_("invalid neon suffix for non neon instruction")); \
13198 /* Define shapes for instruction operands. The following mnemonic characters
13199 are used in this table:
13201 F - VFP S<n> register
13202 D - Neon D<n> register
13203 Q - Neon Q<n> register
13207 L - D<n> register list
13209 This table is used to generate various data:
13210 - enumerations of the form NS_DDR to be used as arguments to
13212 - a table classifying shapes into single, double, quad, mixed.
13213 - a table used to drive neon_select_shape. */
13215 #define NEON_SHAPE_DEF \
13216 X(3, (D, D, D), DOUBLE), \
13217 X(3, (Q, Q, Q), QUAD), \
13218 X(3, (D, D, I), DOUBLE), \
13219 X(3, (Q, Q, I), QUAD), \
13220 X(3, (D, D, S), DOUBLE), \
13221 X(3, (Q, Q, S), QUAD), \
13222 X(2, (D, D), DOUBLE), \
13223 X(2, (Q, Q), QUAD), \
13224 X(2, (D, S), DOUBLE), \
13225 X(2, (Q, S), QUAD), \
13226 X(2, (D, R), DOUBLE), \
13227 X(2, (Q, R), QUAD), \
13228 X(2, (D, I), DOUBLE), \
13229 X(2, (Q, I), QUAD), \
13230 X(3, (D, L, D), DOUBLE), \
13231 X(2, (D, Q), MIXED), \
13232 X(2, (Q, D), MIXED), \
13233 X(3, (D, Q, I), MIXED), \
13234 X(3, (Q, D, I), MIXED), \
13235 X(3, (Q, D, D), MIXED), \
13236 X(3, (D, Q, Q), MIXED), \
13237 X(3, (Q, Q, D), MIXED), \
13238 X(3, (Q, D, S), MIXED), \
13239 X(3, (D, Q, S), MIXED), \
13240 X(4, (D, D, D, I), DOUBLE), \
13241 X(4, (Q, Q, Q, I), QUAD), \
13242 X(2, (F, F), SINGLE), \
13243 X(3, (F, F, F), SINGLE), \
13244 X(2, (F, I), SINGLE), \
13245 X(2, (F, D), MIXED), \
13246 X(2, (D, F), MIXED), \
13247 X(3, (F, F, I), MIXED), \
13248 X(4, (R, R, F, F), SINGLE), \
13249 X(4, (F, F, R, R), SINGLE), \
13250 X(3, (D, R, R), DOUBLE), \
13251 X(3, (R, R, D), DOUBLE), \
13252 X(2, (S, R), SINGLE), \
13253 X(2, (R, S), SINGLE), \
13254 X(2, (F, R), SINGLE), \
13255 X(2, (R, F), SINGLE)
13257 #define S2(A,B) NS_##A##B
13258 #define S3(A,B,C) NS_##A##B##C
13259 #define S4(A,B,C,D) NS_##A##B##C##D
13261 #define X(N, L, C) S##N L
13274 enum neon_shape_class
13282 #define X(N, L, C) SC_##C
13284 static enum neon_shape_class neon_shape_class
[] =
13302 /* Register widths of above. */
13303 static unsigned neon_shape_el_size
[] =
13314 struct neon_shape_info
13317 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13320 #define S2(A,B) { SE_##A, SE_##B }
13321 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13322 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13324 #define X(N, L, C) { N, S##N L }
13326 static struct neon_shape_info neon_shape_tab
[] =
13336 /* Bit masks used in type checking given instructions.
13337 'N_EQK' means the type must be the same as (or based on in some way) the key
13338 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13339 set, various other bits can be set as well in order to modify the meaning of
13340 the type constraint. */
13342 enum neon_type_mask
13366 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13367 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13368 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13369 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13370 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13371 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13372 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13373 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13374 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13375 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13376 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13378 N_MAX_NONSPECIAL
= N_P64
13381 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13383 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13384 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13385 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13386 #define N_SUF_32 (N_SU_32 | N_F32)
13387 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13388 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13390 /* Pass this as the first type argument to neon_check_type to ignore types
13392 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13394 /* Select a "shape" for the current instruction (describing register types or
13395 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13396 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13397 function of operand parsing, so this function doesn't need to be called.
13398 Shapes should be listed in order of decreasing length. */
13400 static enum neon_shape
13401 neon_select_shape (enum neon_shape shape
, ...)
13404 enum neon_shape first_shape
= shape
;
13406 /* Fix missing optional operands. FIXME: we don't know at this point how
13407 many arguments we should have, so this makes the assumption that we have
13408 > 1. This is true of all current Neon opcodes, I think, but may not be
13409 true in the future. */
13410 if (!inst
.operands
[1].present
)
13411 inst
.operands
[1] = inst
.operands
[0];
13413 va_start (ap
, shape
);
13415 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13420 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13422 if (!inst
.operands
[j
].present
)
13428 switch (neon_shape_tab
[shape
].el
[j
])
13431 if (!(inst
.operands
[j
].isreg
13432 && inst
.operands
[j
].isvec
13433 && inst
.operands
[j
].issingle
13434 && !inst
.operands
[j
].isquad
))
13439 if (!(inst
.operands
[j
].isreg
13440 && inst
.operands
[j
].isvec
13441 && !inst
.operands
[j
].isquad
13442 && !inst
.operands
[j
].issingle
))
13447 if (!(inst
.operands
[j
].isreg
13448 && !inst
.operands
[j
].isvec
))
13453 if (!(inst
.operands
[j
].isreg
13454 && inst
.operands
[j
].isvec
13455 && inst
.operands
[j
].isquad
13456 && !inst
.operands
[j
].issingle
))
13461 if (!(!inst
.operands
[j
].isreg
13462 && !inst
.operands
[j
].isscalar
))
13467 if (!(!inst
.operands
[j
].isreg
13468 && inst
.operands
[j
].isscalar
))
13478 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13479 /* We've matched all the entries in the shape table, and we don't
13480 have any left over operands which have not been matched. */
13486 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13487 first_error (_("invalid instruction shape"));
13492 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13493 means the Q bit should be set). */
13496 neon_quad (enum neon_shape shape
)
13498 return neon_shape_class
[shape
] == SC_QUAD
;
13502 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13505 /* Allow modification to be made to types which are constrained to be
13506 based on the key element, based on bits set alongside N_EQK. */
13507 if ((typebits
& N_EQK
) != 0)
13509 if ((typebits
& N_HLF
) != 0)
13511 else if ((typebits
& N_DBL
) != 0)
13513 if ((typebits
& N_SGN
) != 0)
13514 *g_type
= NT_signed
;
13515 else if ((typebits
& N_UNS
) != 0)
13516 *g_type
= NT_unsigned
;
13517 else if ((typebits
& N_INT
) != 0)
13518 *g_type
= NT_integer
;
13519 else if ((typebits
& N_FLT
) != 0)
13520 *g_type
= NT_float
;
13521 else if ((typebits
& N_SIZ
) != 0)
13522 *g_type
= NT_untyped
;
13526 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13527 operand type, i.e. the single type specified in a Neon instruction when it
13528 is the only one given. */
13530 static struct neon_type_el
13531 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13533 struct neon_type_el dest
= *key
;
13535 gas_assert ((thisarg
& N_EQK
) != 0);
13537 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13542 /* Convert Neon type and size into compact bitmask representation. */
13544 static enum neon_type_mask
13545 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13552 case 8: return N_8
;
13553 case 16: return N_16
;
13554 case 32: return N_32
;
13555 case 64: return N_64
;
13563 case 8: return N_I8
;
13564 case 16: return N_I16
;
13565 case 32: return N_I32
;
13566 case 64: return N_I64
;
13574 case 16: return N_F16
;
13575 case 32: return N_F32
;
13576 case 64: return N_F64
;
13584 case 8: return N_P8
;
13585 case 16: return N_P16
;
13586 case 64: return N_P64
;
13594 case 8: return N_S8
;
13595 case 16: return N_S16
;
13596 case 32: return N_S32
;
13597 case 64: return N_S64
;
13605 case 8: return N_U8
;
13606 case 16: return N_U16
;
13607 case 32: return N_U32
;
13608 case 64: return N_U64
;
13619 /* Convert compact Neon bitmask type representation to a type and size. Only
13620 handles the case where a single bit is set in the mask. */
13623 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13624 enum neon_type_mask mask
)
13626 if ((mask
& N_EQK
) != 0)
13629 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13631 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13633 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13635 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13640 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13642 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13643 *type
= NT_unsigned
;
13644 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13645 *type
= NT_integer
;
13646 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13647 *type
= NT_untyped
;
13648 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13650 else if ((mask
& (N_F16
| N_F32
| N_F64
)) != 0)
13658 /* Modify a bitmask of allowed types. This is only needed for type
13662 modify_types_allowed (unsigned allowed
, unsigned mods
)
13665 enum neon_el_type type
;
13671 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13673 if (el_type_of_type_chk (&type
, &size
,
13674 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13676 neon_modify_type_size (mods
, &type
, &size
);
13677 destmask
|= type_chk_of_el_type (type
, size
);
13684 /* Check type and return type classification.
13685 The manual states (paraphrase): If one datatype is given, it indicates the
13687 - the second operand, if there is one
13688 - the operand, if there is no second operand
13689 - the result, if there are no operands.
13690 This isn't quite good enough though, so we use a concept of a "key" datatype
13691 which is set on a per-instruction basis, which is the one which matters when
13692 only one data type is written.
13693 Note: this function has side-effects (e.g. filling in missing operands). All
13694 Neon instructions should call it before performing bit encoding. */
13696 static struct neon_type_el
13697 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13700 unsigned i
, pass
, key_el
= 0;
13701 unsigned types
[NEON_MAX_TYPE_ELS
];
13702 enum neon_el_type k_type
= NT_invtype
;
13703 unsigned k_size
= -1u;
13704 struct neon_type_el badtype
= {NT_invtype
, -1};
13705 unsigned key_allowed
= 0;
13707 /* Optional registers in Neon instructions are always (not) in operand 1.
13708 Fill in the missing operand here, if it was omitted. */
13709 if (els
> 1 && !inst
.operands
[1].present
)
13710 inst
.operands
[1] = inst
.operands
[0];
13712 /* Suck up all the varargs. */
13714 for (i
= 0; i
< els
; i
++)
13716 unsigned thisarg
= va_arg (ap
, unsigned);
13717 if (thisarg
== N_IGNORE_TYPE
)
13722 types
[i
] = thisarg
;
13723 if ((thisarg
& N_KEY
) != 0)
13728 if (inst
.vectype
.elems
> 0)
13729 for (i
= 0; i
< els
; i
++)
13730 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13732 first_error (_("types specified in both the mnemonic and operands"));
13736 /* Duplicate inst.vectype elements here as necessary.
13737 FIXME: No idea if this is exactly the same as the ARM assembler,
13738 particularly when an insn takes one register and one non-register
13740 if (inst
.vectype
.elems
== 1 && els
> 1)
13743 inst
.vectype
.elems
= els
;
13744 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13745 for (j
= 0; j
< els
; j
++)
13747 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13750 else if (inst
.vectype
.elems
== 0 && els
> 0)
13753 /* No types were given after the mnemonic, so look for types specified
13754 after each operand. We allow some flexibility here; as long as the
13755 "key" operand has a type, we can infer the others. */
13756 for (j
= 0; j
< els
; j
++)
13757 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13758 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13760 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13762 for (j
= 0; j
< els
; j
++)
13763 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13764 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13769 first_error (_("operand types can't be inferred"));
13773 else if (inst
.vectype
.elems
!= els
)
13775 first_error (_("type specifier has the wrong number of parts"));
13779 for (pass
= 0; pass
< 2; pass
++)
13781 for (i
= 0; i
< els
; i
++)
13783 unsigned thisarg
= types
[i
];
13784 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13785 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
13786 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
13787 unsigned g_size
= inst
.vectype
.el
[i
].size
;
13789 /* Decay more-specific signed & unsigned types to sign-insensitive
13790 integer types if sign-specific variants are unavailable. */
13791 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
13792 && (types_allowed
& N_SU_ALL
) == 0)
13793 g_type
= NT_integer
;
13795 /* If only untyped args are allowed, decay any more specific types to
13796 them. Some instructions only care about signs for some element
13797 sizes, so handle that properly. */
13798 if (((types_allowed
& N_UNT
) == 0)
13799 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
13800 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
13801 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
13802 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
13803 g_type
= NT_untyped
;
13807 if ((thisarg
& N_KEY
) != 0)
13811 key_allowed
= thisarg
& ~N_KEY
;
13816 if ((thisarg
& N_VFP
) != 0)
13818 enum neon_shape_el regshape
;
13819 unsigned regwidth
, match
;
13821 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13824 first_error (_("invalid instruction shape"));
13827 regshape
= neon_shape_tab
[ns
].el
[i
];
13828 regwidth
= neon_shape_el_size
[regshape
];
13830 /* In VFP mode, operands must match register widths. If we
13831 have a key operand, use its width, else use the width of
13832 the current operand. */
13838 if (regwidth
!= match
)
13840 first_error (_("operand size must match register width"));
13845 if ((thisarg
& N_EQK
) == 0)
13847 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
13849 if ((given_type
& types_allowed
) == 0)
13851 first_error (_("bad type in Neon instruction"));
13857 enum neon_el_type mod_k_type
= k_type
;
13858 unsigned mod_k_size
= k_size
;
13859 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
13860 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
13862 first_error (_("inconsistent types in Neon instruction"));
13870 return inst
.vectype
.el
[key_el
];
13873 /* Neon-style VFP instruction forwarding. */
13875 /* Thumb VFP instructions have 0xE in the condition field. */
13878 do_vfp_cond_or_thumb (void)
13883 inst
.instruction
|= 0xe0000000;
13885 inst
.instruction
|= inst
.cond
<< 28;
13888 /* Look up and encode a simple mnemonic, for use as a helper function for the
13889 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13890 etc. It is assumed that operand parsing has already been done, and that the
13891 operands are in the form expected by the given opcode (this isn't necessarily
13892 the same as the form in which they were parsed, hence some massaging must
13893 take place before this function is called).
13894 Checks current arch version against that in the looked-up opcode. */
13897 do_vfp_nsyn_opcode (const char *opname
)
13899 const struct asm_opcode
*opcode
;
13901 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
13906 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
13907 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
13914 inst
.instruction
= opcode
->tvalue
;
13915 opcode
->tencode ();
13919 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
13920 opcode
->aencode ();
13925 do_vfp_nsyn_add_sub (enum neon_shape rs
)
13927 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
13932 do_vfp_nsyn_opcode ("fadds");
13934 do_vfp_nsyn_opcode ("fsubs");
13939 do_vfp_nsyn_opcode ("faddd");
13941 do_vfp_nsyn_opcode ("fsubd");
13945 /* Check operand types to see if this is a VFP instruction, and if so call
13949 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
13951 enum neon_shape rs
;
13952 struct neon_type_el et
;
13957 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
13958 et
= neon_check_type (2, rs
,
13959 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13963 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
13964 et
= neon_check_type (3, rs
,
13965 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13972 if (et
.type
!= NT_invtype
)
13983 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
13985 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
13990 do_vfp_nsyn_opcode ("fmacs");
13992 do_vfp_nsyn_opcode ("fnmacs");
13997 do_vfp_nsyn_opcode ("fmacd");
13999 do_vfp_nsyn_opcode ("fnmacd");
14004 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14006 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14011 do_vfp_nsyn_opcode ("ffmas");
14013 do_vfp_nsyn_opcode ("ffnmas");
14018 do_vfp_nsyn_opcode ("ffmad");
14020 do_vfp_nsyn_opcode ("ffnmad");
14025 do_vfp_nsyn_mul (enum neon_shape rs
)
14028 do_vfp_nsyn_opcode ("fmuls");
14030 do_vfp_nsyn_opcode ("fmuld");
14034 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14036 int is_neg
= (inst
.instruction
& 0x80) != 0;
14037 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
14042 do_vfp_nsyn_opcode ("fnegs");
14044 do_vfp_nsyn_opcode ("fabss");
14049 do_vfp_nsyn_opcode ("fnegd");
14051 do_vfp_nsyn_opcode ("fabsd");
14055 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14056 insns belong to Neon, and are handled elsewhere. */
14059 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14061 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14065 do_vfp_nsyn_opcode ("fldmdbs");
14067 do_vfp_nsyn_opcode ("fldmias");
14072 do_vfp_nsyn_opcode ("fstmdbs");
14074 do_vfp_nsyn_opcode ("fstmias");
14079 do_vfp_nsyn_sqrt (void)
14081 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
14082 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14085 do_vfp_nsyn_opcode ("fsqrts");
14087 do_vfp_nsyn_opcode ("fsqrtd");
14091 do_vfp_nsyn_div (void)
14093 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
14094 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14095 N_F32
| N_F64
| N_KEY
| N_VFP
);
14098 do_vfp_nsyn_opcode ("fdivs");
14100 do_vfp_nsyn_opcode ("fdivd");
14104 do_vfp_nsyn_nmul (void)
14106 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
14107 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14108 N_F32
| N_F64
| N_KEY
| N_VFP
);
14112 NEON_ENCODE (SINGLE
, inst
);
14113 do_vfp_sp_dyadic ();
14117 NEON_ENCODE (DOUBLE
, inst
);
14118 do_vfp_dp_rd_rn_rm ();
14120 do_vfp_cond_or_thumb ();
14124 do_vfp_nsyn_cmp (void)
14126 if (inst
.operands
[1].isreg
)
14128 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
14129 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14133 NEON_ENCODE (SINGLE
, inst
);
14134 do_vfp_sp_monadic ();
14138 NEON_ENCODE (DOUBLE
, inst
);
14139 do_vfp_dp_rd_rm ();
14144 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
14145 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
14147 switch (inst
.instruction
& 0x0fffffff)
14150 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14153 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14161 NEON_ENCODE (SINGLE
, inst
);
14162 do_vfp_sp_compare_z ();
14166 NEON_ENCODE (DOUBLE
, inst
);
14170 do_vfp_cond_or_thumb ();
14174 nsyn_insert_sp (void)
14176 inst
.operands
[1] = inst
.operands
[0];
14177 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14178 inst
.operands
[0].reg
= REG_SP
;
14179 inst
.operands
[0].isreg
= 1;
14180 inst
.operands
[0].writeback
= 1;
14181 inst
.operands
[0].present
= 1;
14185 do_vfp_nsyn_push (void)
14188 if (inst
.operands
[1].issingle
)
14189 do_vfp_nsyn_opcode ("fstmdbs");
14191 do_vfp_nsyn_opcode ("fstmdbd");
14195 do_vfp_nsyn_pop (void)
14198 if (inst
.operands
[1].issingle
)
14199 do_vfp_nsyn_opcode ("fldmias");
14201 do_vfp_nsyn_opcode ("fldmiad");
14204 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14205 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14208 neon_dp_fixup (struct arm_it
* insn
)
14210 unsigned int i
= insn
->instruction
;
14215 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14226 insn
->instruction
= i
;
14229 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14233 neon_logbits (unsigned x
)
14235 return ffs (x
) - 4;
14238 #define LOW4(R) ((R) & 0xf)
14239 #define HI1(R) (((R) >> 4) & 1)
14241 /* Encode insns with bit pattern:
14243 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14244 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14246 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14247 different meaning for some instruction. */
14250 neon_three_same (int isquad
, int ubit
, int size
)
14252 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14253 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14254 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14255 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14256 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14257 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14258 inst
.instruction
|= (isquad
!= 0) << 6;
14259 inst
.instruction
|= (ubit
!= 0) << 24;
14261 inst
.instruction
|= neon_logbits (size
) << 20;
14263 neon_dp_fixup (&inst
);
14266 /* Encode instructions of the form:
14268 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14269 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14271 Don't write size if SIZE == -1. */
14274 neon_two_same (int qbit
, int ubit
, int size
)
14276 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14277 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14278 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14279 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14280 inst
.instruction
|= (qbit
!= 0) << 6;
14281 inst
.instruction
|= (ubit
!= 0) << 24;
14284 inst
.instruction
|= neon_logbits (size
) << 18;
14286 neon_dp_fixup (&inst
);
14289 /* Neon instruction encoders, in approximate order of appearance. */
14292 do_neon_dyadic_i_su (void)
14294 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14295 struct neon_type_el et
= neon_check_type (3, rs
,
14296 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14297 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14301 do_neon_dyadic_i64_su (void)
14303 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14304 struct neon_type_el et
= neon_check_type (3, rs
,
14305 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14306 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14310 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14313 unsigned size
= et
.size
>> 3;
14314 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14315 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14316 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14317 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14318 inst
.instruction
|= (isquad
!= 0) << 6;
14319 inst
.instruction
|= immbits
<< 16;
14320 inst
.instruction
|= (size
>> 3) << 7;
14321 inst
.instruction
|= (size
& 0x7) << 19;
14323 inst
.instruction
|= (uval
!= 0) << 24;
14325 neon_dp_fixup (&inst
);
14329 do_neon_shl_imm (void)
14331 if (!inst
.operands
[2].isreg
)
14333 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14334 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14335 int imm
= inst
.operands
[2].imm
;
14337 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14338 _("immediate out of range for shift"));
14339 NEON_ENCODE (IMMED
, inst
);
14340 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14344 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14345 struct neon_type_el et
= neon_check_type (3, rs
,
14346 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14349 /* VSHL/VQSHL 3-register variants have syntax such as:
14351 whereas other 3-register operations encoded by neon_three_same have
14354 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14356 tmp
= inst
.operands
[2].reg
;
14357 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14358 inst
.operands
[1].reg
= tmp
;
14359 NEON_ENCODE (INTEGER
, inst
);
14360 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14365 do_neon_qshl_imm (void)
14367 if (!inst
.operands
[2].isreg
)
14369 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14370 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14371 int imm
= inst
.operands
[2].imm
;
14373 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14374 _("immediate out of range for shift"));
14375 NEON_ENCODE (IMMED
, inst
);
14376 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14380 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14381 struct neon_type_el et
= neon_check_type (3, rs
,
14382 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14385 /* See note in do_neon_shl_imm. */
14386 tmp
= inst
.operands
[2].reg
;
14387 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14388 inst
.operands
[1].reg
= tmp
;
14389 NEON_ENCODE (INTEGER
, inst
);
14390 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14395 do_neon_rshl (void)
14397 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14398 struct neon_type_el et
= neon_check_type (3, rs
,
14399 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14402 tmp
= inst
.operands
[2].reg
;
14403 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14404 inst
.operands
[1].reg
= tmp
;
14405 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14409 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14411 /* Handle .I8 pseudo-instructions. */
14414 /* Unfortunately, this will make everything apart from zero out-of-range.
14415 FIXME is this the intended semantics? There doesn't seem much point in
14416 accepting .I8 if so. */
14417 immediate
|= immediate
<< 8;
14423 if (immediate
== (immediate
& 0x000000ff))
14425 *immbits
= immediate
;
14428 else if (immediate
== (immediate
& 0x0000ff00))
14430 *immbits
= immediate
>> 8;
14433 else if (immediate
== (immediate
& 0x00ff0000))
14435 *immbits
= immediate
>> 16;
14438 else if (immediate
== (immediate
& 0xff000000))
14440 *immbits
= immediate
>> 24;
14443 if ((immediate
& 0xffff) != (immediate
>> 16))
14444 goto bad_immediate
;
14445 immediate
&= 0xffff;
14448 if (immediate
== (immediate
& 0x000000ff))
14450 *immbits
= immediate
;
14453 else if (immediate
== (immediate
& 0x0000ff00))
14455 *immbits
= immediate
>> 8;
14460 first_error (_("immediate value out of range"));
14465 do_neon_logic (void)
14467 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14469 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14470 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14471 /* U bit and size field were set as part of the bitmask. */
14472 NEON_ENCODE (INTEGER
, inst
);
14473 neon_three_same (neon_quad (rs
), 0, -1);
14477 const int three_ops_form
= (inst
.operands
[2].present
14478 && !inst
.operands
[2].isreg
);
14479 const int immoperand
= (three_ops_form
? 2 : 1);
14480 enum neon_shape rs
= (three_ops_form
14481 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14482 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14483 struct neon_type_el et
= neon_check_type (2, rs
,
14484 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14485 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14489 if (et
.type
== NT_invtype
)
14492 if (three_ops_form
)
14493 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14494 _("first and second operands shall be the same register"));
14496 NEON_ENCODE (IMMED
, inst
);
14498 immbits
= inst
.operands
[immoperand
].imm
;
14501 /* .i64 is a pseudo-op, so the immediate must be a repeating
14503 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14504 inst
.operands
[immoperand
].reg
: 0))
14506 /* Set immbits to an invalid constant. */
14507 immbits
= 0xdeadbeef;
14514 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14518 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14522 /* Pseudo-instruction for VBIC. */
14523 neon_invert_size (&immbits
, 0, et
.size
);
14524 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14528 /* Pseudo-instruction for VORR. */
14529 neon_invert_size (&immbits
, 0, et
.size
);
14530 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14540 inst
.instruction
|= neon_quad (rs
) << 6;
14541 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14542 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14543 inst
.instruction
|= cmode
<< 8;
14544 neon_write_immbits (immbits
);
14546 neon_dp_fixup (&inst
);
14551 do_neon_bitfield (void)
14553 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14554 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14555 neon_three_same (neon_quad (rs
), 0, -1);
14559 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14562 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14563 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14565 if (et
.type
== NT_float
)
14567 NEON_ENCODE (FLOAT
, inst
);
14568 neon_three_same (neon_quad (rs
), 0, -1);
14572 NEON_ENCODE (INTEGER
, inst
);
14573 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14578 do_neon_dyadic_if_su (void)
14580 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14584 do_neon_dyadic_if_su_d (void)
14586 /* This version only allow D registers, but that constraint is enforced during
14587 operand parsing so we don't need to do anything extra here. */
14588 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14592 do_neon_dyadic_if_i_d (void)
14594 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14595 affected if we specify unsigned args. */
14596 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14599 enum vfp_or_neon_is_neon_bits
14602 NEON_CHECK_ARCH
= 2,
14603 NEON_CHECK_ARCH8
= 4
14606 /* Call this function if an instruction which may have belonged to the VFP or
14607 Neon instruction sets, but turned out to be a Neon instruction (due to the
14608 operand types involved, etc.). We have to check and/or fix-up a couple of
14611 - Make sure the user hasn't attempted to make a Neon instruction
14613 - Alter the value in the condition code field if necessary.
14614 - Make sure that the arch supports Neon instructions.
14616 Which of these operations take place depends on bits from enum
14617 vfp_or_neon_is_neon_bits.
14619 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14620 current instruction's condition is COND_ALWAYS, the condition field is
14621 changed to inst.uncond_value. This is necessary because instructions shared
14622 between VFP and Neon may be conditional for the VFP variants only, and the
14623 unconditional Neon version must have, e.g., 0xF in the condition field. */
14626 vfp_or_neon_is_neon (unsigned check
)
14628 /* Conditions are always legal in Thumb mode (IT blocks). */
14629 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14631 if (inst
.cond
!= COND_ALWAYS
)
14633 first_error (_(BAD_COND
));
14636 if (inst
.uncond_value
!= -1)
14637 inst
.instruction
|= inst
.uncond_value
<< 28;
14640 if ((check
& NEON_CHECK_ARCH
)
14641 && !mark_feature_used (&fpu_neon_ext_v1
))
14643 first_error (_(BAD_FPU
));
14647 if ((check
& NEON_CHECK_ARCH8
)
14648 && !mark_feature_used (&fpu_neon_ext_armv8
))
14650 first_error (_(BAD_FPU
));
14658 do_neon_addsub_if_i (void)
14660 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14663 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14666 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14667 affected if we specify unsigned args. */
14668 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14671 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14673 V<op> A,B (A is operand 0, B is operand 2)
14678 so handle that case specially. */
14681 neon_exchange_operands (void)
14683 void *scratch
= alloca (sizeof (inst
.operands
[0]));
14684 if (inst
.operands
[1].present
)
14686 /* Swap operands[1] and operands[2]. */
14687 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14688 inst
.operands
[1] = inst
.operands
[2];
14689 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14693 inst
.operands
[1] = inst
.operands
[2];
14694 inst
.operands
[2] = inst
.operands
[0];
14699 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14701 if (inst
.operands
[2].isreg
)
14704 neon_exchange_operands ();
14705 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14709 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14710 struct neon_type_el et
= neon_check_type (2, rs
,
14711 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
14713 NEON_ENCODE (IMMED
, inst
);
14714 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14715 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14716 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14717 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14718 inst
.instruction
|= neon_quad (rs
) << 6;
14719 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14720 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14722 neon_dp_fixup (&inst
);
14729 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
14733 do_neon_cmp_inv (void)
14735 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
14741 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
14744 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14745 scalars, which are encoded in 5 bits, M : Rm.
14746 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14747 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14751 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
14753 unsigned regno
= NEON_SCALAR_REG (scalar
);
14754 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
14759 if (regno
> 7 || elno
> 3)
14761 return regno
| (elno
<< 3);
14764 if (regno
> 15 || elno
> 1)
14766 return regno
| (elno
<< 4);
14770 first_error (_("scalar out of range for multiply instruction"));
14776 /* Encode multiply / multiply-accumulate scalar instructions. */
14779 neon_mul_mac (struct neon_type_el et
, int ubit
)
14783 /* Give a more helpful error message if we have an invalid type. */
14784 if (et
.type
== NT_invtype
)
14787 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
14788 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14789 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14790 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14791 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14792 inst
.instruction
|= LOW4 (scalar
);
14793 inst
.instruction
|= HI1 (scalar
) << 5;
14794 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14795 inst
.instruction
|= neon_logbits (et
.size
) << 20;
14796 inst
.instruction
|= (ubit
!= 0) << 24;
14798 neon_dp_fixup (&inst
);
14802 do_neon_mac_maybe_scalar (void)
14804 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
14807 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14810 if (inst
.operands
[2].isscalar
)
14812 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14813 struct neon_type_el et
= neon_check_type (3, rs
,
14814 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
14815 NEON_ENCODE (SCALAR
, inst
);
14816 neon_mul_mac (et
, neon_quad (rs
));
14820 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14821 affected if we specify unsigned args. */
14822 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14827 do_neon_fmac (void)
14829 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
14832 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14835 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14841 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14842 struct neon_type_el et
= neon_check_type (3, rs
,
14843 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14844 neon_three_same (neon_quad (rs
), 0, et
.size
);
14847 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14848 same types as the MAC equivalents. The polynomial type for this instruction
14849 is encoded the same as the integer type. */
14854 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
14857 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14860 if (inst
.operands
[2].isscalar
)
14861 do_neon_mac_maybe_scalar ();
14863 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
14867 do_neon_qdmulh (void)
14869 if (inst
.operands
[2].isscalar
)
14871 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14872 struct neon_type_el et
= neon_check_type (3, rs
,
14873 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14874 NEON_ENCODE (SCALAR
, inst
);
14875 neon_mul_mac (et
, neon_quad (rs
));
14879 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14880 struct neon_type_el et
= neon_check_type (3, rs
,
14881 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14882 NEON_ENCODE (INTEGER
, inst
);
14883 /* The U bit (rounding) comes from bit mask. */
14884 neon_three_same (neon_quad (rs
), 0, et
.size
);
14889 do_neon_fcmp_absolute (void)
14891 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14892 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14893 /* Size field comes from bit mask. */
14894 neon_three_same (neon_quad (rs
), 1, -1);
14898 do_neon_fcmp_absolute_inv (void)
14900 neon_exchange_operands ();
14901 do_neon_fcmp_absolute ();
14905 do_neon_step (void)
14907 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14908 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14909 neon_three_same (neon_quad (rs
), 0, -1);
14913 do_neon_abs_neg (void)
14915 enum neon_shape rs
;
14916 struct neon_type_el et
;
14918 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
14921 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14924 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14925 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
14927 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14928 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14929 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14930 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14931 inst
.instruction
|= neon_quad (rs
) << 6;
14932 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14933 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14935 neon_dp_fixup (&inst
);
14941 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14942 struct neon_type_el et
= neon_check_type (2, rs
,
14943 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14944 int imm
= inst
.operands
[2].imm
;
14945 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14946 _("immediate out of range for insert"));
14947 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14953 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14954 struct neon_type_el et
= neon_check_type (2, rs
,
14955 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14956 int imm
= inst
.operands
[2].imm
;
14957 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14958 _("immediate out of range for insert"));
14959 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
14963 do_neon_qshlu_imm (void)
14965 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14966 struct neon_type_el et
= neon_check_type (2, rs
,
14967 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
14968 int imm
= inst
.operands
[2].imm
;
14969 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14970 _("immediate out of range for shift"));
14971 /* Only encodes the 'U present' variant of the instruction.
14972 In this case, signed types have OP (bit 8) set to 0.
14973 Unsigned types have OP set to 1. */
14974 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
14975 /* The rest of the bits are the same as other immediate shifts. */
14976 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14980 do_neon_qmovn (void)
14982 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
14983 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
14984 /* Saturating move where operands can be signed or unsigned, and the
14985 destination has the same signedness. */
14986 NEON_ENCODE (INTEGER
, inst
);
14987 if (et
.type
== NT_unsigned
)
14988 inst
.instruction
|= 0xc0;
14990 inst
.instruction
|= 0x80;
14991 neon_two_same (0, 1, et
.size
/ 2);
14995 do_neon_qmovun (void)
14997 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
14998 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
14999 /* Saturating move with unsigned results. Operands must be signed. */
15000 NEON_ENCODE (INTEGER
, inst
);
15001 neon_two_same (0, 1, et
.size
/ 2);
15005 do_neon_rshift_sat_narrow (void)
15007 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15008 or unsigned. If operands are unsigned, results must also be unsigned. */
15009 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15010 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15011 int imm
= inst
.operands
[2].imm
;
15012 /* This gets the bounds check, size encoding and immediate bits calculation
15016 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15017 VQMOVN.I<size> <Dd>, <Qm>. */
15020 inst
.operands
[2].present
= 0;
15021 inst
.instruction
= N_MNEM_vqmovn
;
15026 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15027 _("immediate out of range"));
15028 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15032 do_neon_rshift_sat_narrow_u (void)
15034 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15035 or unsigned. If operands are unsigned, results must also be unsigned. */
15036 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15037 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15038 int imm
= inst
.operands
[2].imm
;
15039 /* This gets the bounds check, size encoding and immediate bits calculation
15043 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15044 VQMOVUN.I<size> <Dd>, <Qm>. */
15047 inst
.operands
[2].present
= 0;
15048 inst
.instruction
= N_MNEM_vqmovun
;
15053 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15054 _("immediate out of range"));
15055 /* FIXME: The manual is kind of unclear about what value U should have in
15056 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15058 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15062 do_neon_movn (void)
15064 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15065 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15066 NEON_ENCODE (INTEGER
, inst
);
15067 neon_two_same (0, 1, et
.size
/ 2);
15071 do_neon_rshift_narrow (void)
15073 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15074 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15075 int imm
= inst
.operands
[2].imm
;
15076 /* This gets the bounds check, size encoding and immediate bits calculation
15080 /* If immediate is zero then we are a pseudo-instruction for
15081 VMOVN.I<size> <Dd>, <Qm> */
15084 inst
.operands
[2].present
= 0;
15085 inst
.instruction
= N_MNEM_vmovn
;
15090 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15091 _("immediate out of range for narrowing operation"));
15092 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15096 do_neon_shll (void)
15098 /* FIXME: Type checking when lengthening. */
15099 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15100 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15101 unsigned imm
= inst
.operands
[2].imm
;
15103 if (imm
== et
.size
)
15105 /* Maximum shift variant. */
15106 NEON_ENCODE (INTEGER
, inst
);
15107 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15108 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15109 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15110 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15111 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15113 neon_dp_fixup (&inst
);
15117 /* A more-specific type check for non-max versions. */
15118 et
= neon_check_type (2, NS_QDI
,
15119 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15120 NEON_ENCODE (IMMED
, inst
);
15121 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15125 /* Check the various types for the VCVT instruction, and return which version
15126 the current instruction is. */
15128 #define CVT_FLAVOUR_VAR \
15129 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15130 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15131 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15132 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15133 /* Half-precision conversions. */ \
15134 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15135 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15136 /* VFP instructions. */ \
15137 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15138 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15139 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15140 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15141 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15142 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15143 /* VFP instructions with bitshift. */ \
15144 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15145 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15146 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15147 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15148 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15149 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15150 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15151 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15153 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15154 neon_cvt_flavour_##C,
15156 /* The different types of conversions we can do. */
15157 enum neon_cvt_flavour
15160 neon_cvt_flavour_invalid
,
15161 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15166 static enum neon_cvt_flavour
15167 get_neon_cvt_flavour (enum neon_shape rs
)
15169 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15170 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15171 if (et.type != NT_invtype) \
15173 inst.error = NULL; \
15174 return (neon_cvt_flavour_##C); \
15177 struct neon_type_el et
;
15178 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15179 || rs
== NS_FF
) ? N_VFP
: 0;
15180 /* The instruction versions which take an immediate take one register
15181 argument, which is extended to the width of the full register. Thus the
15182 "source" and "destination" registers must have the same width. Hack that
15183 here by making the size equal to the key (wider, in this case) operand. */
15184 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15188 return neon_cvt_flavour_invalid
;
15203 /* Neon-syntax VFP conversions. */
15206 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15208 const char *opname
= 0;
15210 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
15212 /* Conversions with immediate bitshift. */
15213 const char *enc
[] =
15215 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15221 if (flavour
< (int) ARRAY_SIZE (enc
))
15223 opname
= enc
[flavour
];
15224 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15225 _("operands 0 and 1 must be the same register"));
15226 inst
.operands
[1] = inst
.operands
[2];
15227 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15232 /* Conversions without bitshift. */
15233 const char *enc
[] =
15235 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15241 if (flavour
< (int) ARRAY_SIZE (enc
))
15242 opname
= enc
[flavour
];
15246 do_vfp_nsyn_opcode (opname
);
15250 do_vfp_nsyn_cvtz (void)
15252 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
15253 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15254 const char *enc
[] =
15256 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15262 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15263 do_vfp_nsyn_opcode (enc
[flavour
]);
15267 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15268 enum neon_cvt_mode mode
)
15273 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15274 D register operands. */
15275 if (flavour
== neon_cvt_flavour_s32_f64
15276 || flavour
== neon_cvt_flavour_u32_f64
)
15277 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15280 set_it_insn_type (OUTSIDE_IT_INSN
);
15284 case neon_cvt_flavour_s32_f64
:
15288 case neon_cvt_flavour_s32_f32
:
15292 case neon_cvt_flavour_u32_f64
:
15296 case neon_cvt_flavour_u32_f32
:
15301 first_error (_("invalid instruction shape"));
15307 case neon_cvt_mode_a
: rm
= 0; break;
15308 case neon_cvt_mode_n
: rm
= 1; break;
15309 case neon_cvt_mode_p
: rm
= 2; break;
15310 case neon_cvt_mode_m
: rm
= 3; break;
15311 default: first_error (_("invalid rounding mode")); return;
15314 NEON_ENCODE (FPV8
, inst
);
15315 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15316 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15317 inst
.instruction
|= sz
<< 8;
15318 inst
.instruction
|= op
<< 7;
15319 inst
.instruction
|= rm
<< 16;
15320 inst
.instruction
|= 0xf0000000;
15321 inst
.is_neon
= TRUE
;
15325 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15327 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15328 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
15329 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15331 /* PR11109: Handle round-to-zero for VCVT conversions. */
15332 if (mode
== neon_cvt_mode_z
15333 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15334 && (flavour
== neon_cvt_flavour_s32_f32
15335 || flavour
== neon_cvt_flavour_u32_f32
15336 || flavour
== neon_cvt_flavour_s32_f64
15337 || flavour
== neon_cvt_flavour_u32_f64
)
15338 && (rs
== NS_FD
|| rs
== NS_FF
))
15340 do_vfp_nsyn_cvtz ();
15344 /* VFP rather than Neon conversions. */
15345 if (flavour
>= neon_cvt_flavour_first_fp
)
15347 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15348 do_vfp_nsyn_cvt (rs
, flavour
);
15350 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15361 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15363 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15366 /* Fixed-point conversion with #0 immediate is encoded as an
15367 integer conversion. */
15368 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15370 immbits
= 32 - inst
.operands
[2].imm
;
15371 NEON_ENCODE (IMMED
, inst
);
15372 if (flavour
!= neon_cvt_flavour_invalid
)
15373 inst
.instruction
|= enctab
[flavour
];
15374 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15375 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15376 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15377 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15378 inst
.instruction
|= neon_quad (rs
) << 6;
15379 inst
.instruction
|= 1 << 21;
15380 inst
.instruction
|= immbits
<< 16;
15382 neon_dp_fixup (&inst
);
15388 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15390 NEON_ENCODE (FLOAT
, inst
);
15391 set_it_insn_type (OUTSIDE_IT_INSN
);
15393 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15396 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15397 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15398 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15399 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15400 inst
.instruction
|= neon_quad (rs
) << 6;
15401 inst
.instruction
|= (flavour
== neon_cvt_flavour_u32_f32
) << 7;
15402 inst
.instruction
|= mode
<< 8;
15404 inst
.instruction
|= 0xfc000000;
15406 inst
.instruction
|= 0xf0000000;
15412 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
15414 NEON_ENCODE (INTEGER
, inst
);
15416 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15419 if (flavour
!= neon_cvt_flavour_invalid
)
15420 inst
.instruction
|= enctab
[flavour
];
15422 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15423 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15424 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15425 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15426 inst
.instruction
|= neon_quad (rs
) << 6;
15427 inst
.instruction
|= 2 << 18;
15429 neon_dp_fixup (&inst
);
15434 /* Half-precision conversions for Advanced SIMD -- neon. */
15439 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15441 as_bad (_("operand size must match register width"));
15446 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15448 as_bad (_("operand size must match register width"));
15453 inst
.instruction
= 0x3b60600;
15455 inst
.instruction
= 0x3b60700;
15457 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15458 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15459 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15460 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15461 neon_dp_fixup (&inst
);
15465 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15466 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15467 do_vfp_nsyn_cvt (rs
, flavour
);
15469 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15474 do_neon_cvtr (void)
15476 do_neon_cvt_1 (neon_cvt_mode_x
);
15482 do_neon_cvt_1 (neon_cvt_mode_z
);
15486 do_neon_cvta (void)
15488 do_neon_cvt_1 (neon_cvt_mode_a
);
15492 do_neon_cvtn (void)
15494 do_neon_cvt_1 (neon_cvt_mode_n
);
15498 do_neon_cvtp (void)
15500 do_neon_cvt_1 (neon_cvt_mode_p
);
15504 do_neon_cvtm (void)
15506 do_neon_cvt_1 (neon_cvt_mode_m
);
15510 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15513 mark_feature_used (&fpu_vfp_ext_armv8
);
15515 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15516 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15517 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15518 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15519 inst
.instruction
|= to
? 0x10000 : 0;
15520 inst
.instruction
|= t
? 0x80 : 0;
15521 inst
.instruction
|= is_double
? 0x100 : 0;
15522 do_vfp_cond_or_thumb ();
15526 do_neon_cvttb_1 (bfd_boolean t
)
15528 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_DF
, NS_NULL
);
15532 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15535 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15537 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15540 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15542 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15544 /* The VCVTB and VCVTT instructions with D-register operands
15545 don't work for SP only targets. */
15546 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15550 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15552 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15554 /* The VCVTB and VCVTT instructions with D-register operands
15555 don't work for SP only targets. */
15556 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15560 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15567 do_neon_cvtb (void)
15569 do_neon_cvttb_1 (FALSE
);
15574 do_neon_cvtt (void)
15576 do_neon_cvttb_1 (TRUE
);
15580 neon_move_immediate (void)
15582 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15583 struct neon_type_el et
= neon_check_type (2, rs
,
15584 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15585 unsigned immlo
, immhi
= 0, immbits
;
15586 int op
, cmode
, float_p
;
15588 constraint (et
.type
== NT_invtype
,
15589 _("operand size must be specified for immediate VMOV"));
15591 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15592 op
= (inst
.instruction
& (1 << 5)) != 0;
15594 immlo
= inst
.operands
[1].imm
;
15595 if (inst
.operands
[1].regisimm
)
15596 immhi
= inst
.operands
[1].reg
;
15598 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
15599 _("immediate has bits set outside the operand size"));
15601 float_p
= inst
.operands
[1].immisfloat
;
15603 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
15604 et
.size
, et
.type
)) == FAIL
)
15606 /* Invert relevant bits only. */
15607 neon_invert_size (&immlo
, &immhi
, et
.size
);
15608 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15609 with one or the other; those cases are caught by
15610 neon_cmode_for_move_imm. */
15612 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
15613 &op
, et
.size
, et
.type
)) == FAIL
)
15615 first_error (_("immediate out of range"));
15620 inst
.instruction
&= ~(1 << 5);
15621 inst
.instruction
|= op
<< 5;
15623 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15624 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15625 inst
.instruction
|= neon_quad (rs
) << 6;
15626 inst
.instruction
|= cmode
<< 8;
15628 neon_write_immbits (immbits
);
15634 if (inst
.operands
[1].isreg
)
15636 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15638 NEON_ENCODE (INTEGER
, inst
);
15639 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15640 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15641 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15642 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15643 inst
.instruction
|= neon_quad (rs
) << 6;
15647 NEON_ENCODE (IMMED
, inst
);
15648 neon_move_immediate ();
15651 neon_dp_fixup (&inst
);
15654 /* Encode instructions of form:
15656 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15657 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15660 neon_mixed_length (struct neon_type_el et
, unsigned size
)
15662 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15663 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15664 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15665 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15666 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15667 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15668 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
15669 inst
.instruction
|= neon_logbits (size
) << 20;
15671 neon_dp_fixup (&inst
);
15675 do_neon_dyadic_long (void)
15677 /* FIXME: Type checking for lengthening op. */
15678 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15679 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15680 neon_mixed_length (et
, et
.size
);
15684 do_neon_abal (void)
15686 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15687 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15688 neon_mixed_length (et
, et
.size
);
15692 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
15694 if (inst
.operands
[2].isscalar
)
15696 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
15697 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
15698 NEON_ENCODE (SCALAR
, inst
);
15699 neon_mul_mac (et
, et
.type
== NT_unsigned
);
15703 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15704 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
15705 NEON_ENCODE (INTEGER
, inst
);
15706 neon_mixed_length (et
, et
.size
);
15711 do_neon_mac_maybe_scalar_long (void)
15713 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
15717 do_neon_dyadic_wide (void)
15719 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
15720 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15721 neon_mixed_length (et
, et
.size
);
15725 do_neon_dyadic_narrow (void)
15727 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15728 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
15729 /* Operand sign is unimportant, and the U bit is part of the opcode,
15730 so force the operand type to integer. */
15731 et
.type
= NT_integer
;
15732 neon_mixed_length (et
, et
.size
/ 2);
15736 do_neon_mul_sat_scalar_long (void)
15738 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
15742 do_neon_vmull (void)
15744 if (inst
.operands
[2].isscalar
)
15745 do_neon_mac_maybe_scalar_long ();
15748 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15749 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
15751 if (et
.type
== NT_poly
)
15752 NEON_ENCODE (POLY
, inst
);
15754 NEON_ENCODE (INTEGER
, inst
);
15756 /* For polynomial encoding the U bit must be zero, and the size must
15757 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15758 obviously, as 0b10). */
15761 /* Check we're on the correct architecture. */
15762 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
15764 _("Instruction form not available on this architecture.");
15769 neon_mixed_length (et
, et
.size
);
15776 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
15777 struct neon_type_el et
= neon_check_type (3, rs
,
15778 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15779 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
15781 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
15782 _("shift out of range"));
15783 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15784 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15785 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15786 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15787 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15788 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15789 inst
.instruction
|= neon_quad (rs
) << 6;
15790 inst
.instruction
|= imm
<< 8;
15792 neon_dp_fixup (&inst
);
15798 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15799 struct neon_type_el et
= neon_check_type (2, rs
,
15800 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15801 unsigned op
= (inst
.instruction
>> 7) & 3;
15802 /* N (width of reversed regions) is encoded as part of the bitmask. We
15803 extract it here to check the elements to be reversed are smaller.
15804 Otherwise we'd get a reserved instruction. */
15805 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
15806 gas_assert (elsize
!= 0);
15807 constraint (et
.size
>= elsize
,
15808 _("elements must be smaller than reversal region"));
15809 neon_two_same (neon_quad (rs
), 1, et
.size
);
15815 if (inst
.operands
[1].isscalar
)
15817 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
15818 struct neon_type_el et
= neon_check_type (2, rs
,
15819 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15820 unsigned sizebits
= et
.size
>> 3;
15821 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
15822 int logsize
= neon_logbits (et
.size
);
15823 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
15825 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
15828 NEON_ENCODE (SCALAR
, inst
);
15829 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15830 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15831 inst
.instruction
|= LOW4 (dm
);
15832 inst
.instruction
|= HI1 (dm
) << 5;
15833 inst
.instruction
|= neon_quad (rs
) << 6;
15834 inst
.instruction
|= x
<< 17;
15835 inst
.instruction
|= sizebits
<< 16;
15837 neon_dp_fixup (&inst
);
15841 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
15842 struct neon_type_el et
= neon_check_type (2, rs
,
15843 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
15844 /* Duplicate ARM register to lanes of vector. */
15845 NEON_ENCODE (ARMREG
, inst
);
15848 case 8: inst
.instruction
|= 0x400000; break;
15849 case 16: inst
.instruction
|= 0x000020; break;
15850 case 32: inst
.instruction
|= 0x000000; break;
15853 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
15854 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
15855 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
15856 inst
.instruction
|= neon_quad (rs
) << 21;
15857 /* The encoding for this instruction is identical for the ARM and Thumb
15858 variants, except for the condition field. */
15859 do_vfp_cond_or_thumb ();
15863 /* VMOV has particularly many variations. It can be one of:
15864 0. VMOV<c><q> <Qd>, <Qm>
15865 1. VMOV<c><q> <Dd>, <Dm>
15866 (Register operations, which are VORR with Rm = Rn.)
15867 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15868 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15870 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15871 (ARM register to scalar.)
15872 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15873 (Two ARM registers to vector.)
15874 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15875 (Scalar to ARM register.)
15876 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15877 (Vector to two ARM registers.)
15878 8. VMOV.F32 <Sd>, <Sm>
15879 9. VMOV.F64 <Dd>, <Dm>
15880 (VFP register moves.)
15881 10. VMOV.F32 <Sd>, #imm
15882 11. VMOV.F64 <Dd>, #imm
15883 (VFP float immediate load.)
15884 12. VMOV <Rd>, <Sm>
15885 (VFP single to ARM reg.)
15886 13. VMOV <Sd>, <Rm>
15887 (ARM reg to VFP single.)
15888 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15889 (Two ARM regs to two VFP singles.)
15890 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15891 (Two VFP singles to two ARM regs.)
15893 These cases can be disambiguated using neon_select_shape, except cases 1/9
15894 and 3/11 which depend on the operand type too.
15896 All the encoded bits are hardcoded by this function.
15898 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15899 Cases 5, 7 may be used with VFPv2 and above.
15901 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15902 can specify a type where it doesn't make sense to, and is ignored). */
15907 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
15908 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
15910 struct neon_type_el et
;
15911 const char *ldconst
= 0;
15915 case NS_DD
: /* case 1/9. */
15916 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
15917 /* It is not an error here if no type is given. */
15919 if (et
.type
== NT_float
&& et
.size
== 64)
15921 do_vfp_nsyn_opcode ("fcpyd");
15924 /* fall through. */
15926 case NS_QQ
: /* case 0/1. */
15928 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15930 /* The architecture manual I have doesn't explicitly state which
15931 value the U bit should have for register->register moves, but
15932 the equivalent VORR instruction has U = 0, so do that. */
15933 inst
.instruction
= 0x0200110;
15934 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15935 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15936 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15937 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15938 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15939 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15940 inst
.instruction
|= neon_quad (rs
) << 6;
15942 neon_dp_fixup (&inst
);
15946 case NS_DI
: /* case 3/11. */
15947 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
15949 if (et
.type
== NT_float
&& et
.size
== 64)
15951 /* case 11 (fconstd). */
15952 ldconst
= "fconstd";
15953 goto encode_fconstd
;
15955 /* fall through. */
15957 case NS_QI
: /* case 2/3. */
15958 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15960 inst
.instruction
= 0x0800010;
15961 neon_move_immediate ();
15962 neon_dp_fixup (&inst
);
15965 case NS_SR
: /* case 4. */
15967 unsigned bcdebits
= 0;
15969 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
15970 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
15972 /* .<size> is optional here, defaulting to .32. */
15973 if (inst
.vectype
.elems
== 0
15974 && inst
.operands
[0].vectype
.type
== NT_invtype
15975 && inst
.operands
[1].vectype
.type
== NT_invtype
)
15977 inst
.vectype
.el
[0].type
= NT_untyped
;
15978 inst
.vectype
.el
[0].size
= 32;
15979 inst
.vectype
.elems
= 1;
15982 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
15983 logsize
= neon_logbits (et
.size
);
15985 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
15987 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
15988 && et
.size
!= 32, _(BAD_FPU
));
15989 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
15990 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
15994 case 8: bcdebits
= 0x8; break;
15995 case 16: bcdebits
= 0x1; break;
15996 case 32: bcdebits
= 0x0; break;
16000 bcdebits
|= x
<< logsize
;
16002 inst
.instruction
= 0xe000b10;
16003 do_vfp_cond_or_thumb ();
16004 inst
.instruction
|= LOW4 (dn
) << 16;
16005 inst
.instruction
|= HI1 (dn
) << 7;
16006 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16007 inst
.instruction
|= (bcdebits
& 3) << 5;
16008 inst
.instruction
|= (bcdebits
>> 2) << 21;
16012 case NS_DRR
: /* case 5 (fmdrr). */
16013 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16016 inst
.instruction
= 0xc400b10;
16017 do_vfp_cond_or_thumb ();
16018 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16019 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16020 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16021 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16024 case NS_RS
: /* case 6. */
16027 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16028 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16029 unsigned abcdebits
= 0;
16031 /* .<dt> is optional here, defaulting to .32. */
16032 if (inst
.vectype
.elems
== 0
16033 && inst
.operands
[0].vectype
.type
== NT_invtype
16034 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16036 inst
.vectype
.el
[0].type
= NT_untyped
;
16037 inst
.vectype
.el
[0].size
= 32;
16038 inst
.vectype
.elems
= 1;
16041 et
= neon_check_type (2, NS_NULL
,
16042 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16043 logsize
= neon_logbits (et
.size
);
16045 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16047 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16048 && et
.size
!= 32, _(BAD_FPU
));
16049 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16050 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16054 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16055 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16056 case 32: abcdebits
= 0x00; break;
16060 abcdebits
|= x
<< logsize
;
16061 inst
.instruction
= 0xe100b10;
16062 do_vfp_cond_or_thumb ();
16063 inst
.instruction
|= LOW4 (dn
) << 16;
16064 inst
.instruction
|= HI1 (dn
) << 7;
16065 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16066 inst
.instruction
|= (abcdebits
& 3) << 5;
16067 inst
.instruction
|= (abcdebits
>> 2) << 21;
16071 case NS_RRD
: /* case 7 (fmrrd). */
16072 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16075 inst
.instruction
= 0xc500b10;
16076 do_vfp_cond_or_thumb ();
16077 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16078 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16079 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16080 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16083 case NS_FF
: /* case 8 (fcpys). */
16084 do_vfp_nsyn_opcode ("fcpys");
16087 case NS_FI
: /* case 10 (fconsts). */
16088 ldconst
= "fconsts";
16090 if (is_quarter_float (inst
.operands
[1].imm
))
16092 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16093 do_vfp_nsyn_opcode (ldconst
);
16096 first_error (_("immediate out of range"));
16099 case NS_RF
: /* case 12 (fmrs). */
16100 do_vfp_nsyn_opcode ("fmrs");
16103 case NS_FR
: /* case 13 (fmsr). */
16104 do_vfp_nsyn_opcode ("fmsr");
16107 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16108 (one of which is a list), but we have parsed four. Do some fiddling to
16109 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16111 case NS_RRFF
: /* case 14 (fmrrs). */
16112 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16113 _("VFP registers must be adjacent"));
16114 inst
.operands
[2].imm
= 2;
16115 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16116 do_vfp_nsyn_opcode ("fmrrs");
16119 case NS_FFRR
: /* case 15 (fmsrr). */
16120 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16121 _("VFP registers must be adjacent"));
16122 inst
.operands
[1] = inst
.operands
[2];
16123 inst
.operands
[2] = inst
.operands
[3];
16124 inst
.operands
[0].imm
= 2;
16125 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16126 do_vfp_nsyn_opcode ("fmsrr");
16130 /* neon_select_shape has determined that the instruction
16131 shape is wrong and has already set the error message. */
16140 do_neon_rshift_round_imm (void)
16142 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16143 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16144 int imm
= inst
.operands
[2].imm
;
16146 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16149 inst
.operands
[2].present
= 0;
16154 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16155 _("immediate out of range for shift"));
16156 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16161 do_neon_movl (void)
16163 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16164 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16165 unsigned sizebits
= et
.size
>> 3;
16166 inst
.instruction
|= sizebits
<< 19;
16167 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16173 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16174 struct neon_type_el et
= neon_check_type (2, rs
,
16175 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16176 NEON_ENCODE (INTEGER
, inst
);
16177 neon_two_same (neon_quad (rs
), 1, et
.size
);
16181 do_neon_zip_uzp (void)
16183 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16184 struct neon_type_el et
= neon_check_type (2, rs
,
16185 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16186 if (rs
== NS_DD
&& et
.size
== 32)
16188 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16189 inst
.instruction
= N_MNEM_vtrn
;
16193 neon_two_same (neon_quad (rs
), 1, et
.size
);
16197 do_neon_sat_abs_neg (void)
16199 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16200 struct neon_type_el et
= neon_check_type (2, rs
,
16201 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16202 neon_two_same (neon_quad (rs
), 1, et
.size
);
16206 do_neon_pair_long (void)
16208 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16209 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16210 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16211 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16212 neon_two_same (neon_quad (rs
), 1, et
.size
);
16216 do_neon_recip_est (void)
16218 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16219 struct neon_type_el et
= neon_check_type (2, rs
,
16220 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
16221 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16222 neon_two_same (neon_quad (rs
), 1, et
.size
);
16228 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16229 struct neon_type_el et
= neon_check_type (2, rs
,
16230 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16231 neon_two_same (neon_quad (rs
), 1, et
.size
);
16237 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16238 struct neon_type_el et
= neon_check_type (2, rs
,
16239 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16240 neon_two_same (neon_quad (rs
), 1, et
.size
);
16246 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16247 struct neon_type_el et
= neon_check_type (2, rs
,
16248 N_EQK
| N_INT
, N_8
| N_KEY
);
16249 neon_two_same (neon_quad (rs
), 1, et
.size
);
16255 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16256 neon_two_same (neon_quad (rs
), 1, -1);
16260 do_neon_tbl_tbx (void)
16262 unsigned listlenbits
;
16263 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16265 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16267 first_error (_("bad list length for table lookup"));
16271 listlenbits
= inst
.operands
[1].imm
- 1;
16272 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16273 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16274 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16275 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16276 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16277 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16278 inst
.instruction
|= listlenbits
<< 8;
16280 neon_dp_fixup (&inst
);
16284 do_neon_ldm_stm (void)
16286 /* P, U and L bits are part of bitmask. */
16287 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16288 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16290 if (inst
.operands
[1].issingle
)
16292 do_vfp_nsyn_ldm_stm (is_dbmode
);
16296 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16297 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16299 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16300 _("register list must contain at least 1 and at most 16 "
16303 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16304 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16305 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16306 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16308 inst
.instruction
|= offsetbits
;
16310 do_vfp_cond_or_thumb ();
16314 do_neon_ldr_str (void)
16316 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16318 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16319 And is UNPREDICTABLE in thumb mode. */
16321 && inst
.operands
[1].reg
== REG_PC
16322 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16325 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16326 else if (warn_on_deprecated
)
16327 as_tsktsk (_("Use of PC here is deprecated"));
16330 if (inst
.operands
[0].issingle
)
16333 do_vfp_nsyn_opcode ("flds");
16335 do_vfp_nsyn_opcode ("fsts");
16340 do_vfp_nsyn_opcode ("fldd");
16342 do_vfp_nsyn_opcode ("fstd");
16346 /* "interleave" version also handles non-interleaving register VLD1/VST1
16350 do_neon_ld_st_interleave (void)
16352 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16353 N_8
| N_16
| N_32
| N_64
);
16354 unsigned alignbits
= 0;
16356 /* The bits in this table go:
16357 0: register stride of one (0) or two (1)
16358 1,2: register list length, minus one (1, 2, 3, 4).
16359 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16360 We use -1 for invalid entries. */
16361 const int typetable
[] =
16363 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16364 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16365 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16366 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16370 if (et
.type
== NT_invtype
)
16373 if (inst
.operands
[1].immisalign
)
16374 switch (inst
.operands
[1].imm
>> 8)
16376 case 64: alignbits
= 1; break;
16378 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16379 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16380 goto bad_alignment
;
16384 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16385 goto bad_alignment
;
16390 first_error (_("bad alignment"));
16394 inst
.instruction
|= alignbits
<< 4;
16395 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16397 /* Bits [4:6] of the immediate in a list specifier encode register stride
16398 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16399 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16400 up the right value for "type" in a table based on this value and the given
16401 list style, then stick it back. */
16402 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16403 | (((inst
.instruction
>> 8) & 3) << 3);
16405 typebits
= typetable
[idx
];
16407 constraint (typebits
== -1, _("bad list type for instruction"));
16408 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16409 _("bad element type for instruction"));
16411 inst
.instruction
&= ~0xf00;
16412 inst
.instruction
|= typebits
<< 8;
16415 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16416 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16417 otherwise. The variable arguments are a list of pairs of legal (size, align)
16418 values, terminated with -1. */
16421 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
16424 int result
= FAIL
, thissize
, thisalign
;
16426 if (!inst
.operands
[1].immisalign
)
16432 va_start (ap
, do_align
);
16436 thissize
= va_arg (ap
, int);
16437 if (thissize
== -1)
16439 thisalign
= va_arg (ap
, int);
16441 if (size
== thissize
&& align
== thisalign
)
16444 while (result
!= SUCCESS
);
16448 if (result
== SUCCESS
)
16451 first_error (_("unsupported alignment for instruction"));
16457 do_neon_ld_st_lane (void)
16459 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16460 int align_good
, do_align
= 0;
16461 int logsize
= neon_logbits (et
.size
);
16462 int align
= inst
.operands
[1].imm
>> 8;
16463 int n
= (inst
.instruction
>> 8) & 3;
16464 int max_el
= 64 / et
.size
;
16466 if (et
.type
== NT_invtype
)
16469 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16470 _("bad list length"));
16471 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16472 _("scalar index out of range"));
16473 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16475 _("stride of 2 unavailable when element size is 8"));
16479 case 0: /* VLD1 / VST1. */
16480 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
16482 if (align_good
== FAIL
)
16486 unsigned alignbits
= 0;
16489 case 16: alignbits
= 0x1; break;
16490 case 32: alignbits
= 0x3; break;
16493 inst
.instruction
|= alignbits
<< 4;
16497 case 1: /* VLD2 / VST2. */
16498 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
16500 if (align_good
== FAIL
)
16503 inst
.instruction
|= 1 << 4;
16506 case 2: /* VLD3 / VST3. */
16507 constraint (inst
.operands
[1].immisalign
,
16508 _("can't use alignment with this instruction"));
16511 case 3: /* VLD4 / VST4. */
16512 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16513 16, 64, 32, 64, 32, 128, -1);
16514 if (align_good
== FAIL
)
16518 unsigned alignbits
= 0;
16521 case 8: alignbits
= 0x1; break;
16522 case 16: alignbits
= 0x1; break;
16523 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16526 inst
.instruction
|= alignbits
<< 4;
16533 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16534 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16535 inst
.instruction
|= 1 << (4 + logsize
);
16537 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16538 inst
.instruction
|= logsize
<< 10;
16541 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16544 do_neon_ld_dup (void)
16546 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16547 int align_good
, do_align
= 0;
16549 if (et
.type
== NT_invtype
)
16552 switch ((inst
.instruction
>> 8) & 3)
16554 case 0: /* VLD1. */
16555 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16556 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16557 &do_align
, 16, 16, 32, 32, -1);
16558 if (align_good
== FAIL
)
16560 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
16563 case 2: inst
.instruction
|= 1 << 5; break;
16564 default: first_error (_("bad list length")); return;
16566 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16569 case 1: /* VLD2. */
16570 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16571 &do_align
, 8, 16, 16, 32, 32, 64, -1);
16572 if (align_good
== FAIL
)
16574 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
16575 _("bad list length"));
16576 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16577 inst
.instruction
|= 1 << 5;
16578 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16581 case 2: /* VLD3. */
16582 constraint (inst
.operands
[1].immisalign
,
16583 _("can't use alignment with this instruction"));
16584 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
16585 _("bad list length"));
16586 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16587 inst
.instruction
|= 1 << 5;
16588 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16591 case 3: /* VLD4. */
16593 int align
= inst
.operands
[1].imm
>> 8;
16594 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16595 16, 64, 32, 64, 32, 128, -1);
16596 if (align_good
== FAIL
)
16598 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
16599 _("bad list length"));
16600 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16601 inst
.instruction
|= 1 << 5;
16602 if (et
.size
== 32 && align
== 128)
16603 inst
.instruction
|= 0x3 << 6;
16605 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16612 inst
.instruction
|= do_align
<< 4;
16615 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16616 apart from bits [11:4]. */
16619 do_neon_ldx_stx (void)
16621 if (inst
.operands
[1].isreg
)
16622 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
16624 switch (NEON_LANE (inst
.operands
[0].imm
))
16626 case NEON_INTERLEAVE_LANES
:
16627 NEON_ENCODE (INTERLV
, inst
);
16628 do_neon_ld_st_interleave ();
16631 case NEON_ALL_LANES
:
16632 NEON_ENCODE (DUP
, inst
);
16633 if (inst
.instruction
== N_INV
)
16635 first_error ("only loads support such operands");
16642 NEON_ENCODE (LANE
, inst
);
16643 do_neon_ld_st_lane ();
16646 /* L bit comes from bit mask. */
16647 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16648 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16649 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16651 if (inst
.operands
[1].postind
)
16653 int postreg
= inst
.operands
[1].imm
& 0xf;
16654 constraint (!inst
.operands
[1].immisreg
,
16655 _("post-index must be a register"));
16656 constraint (postreg
== 0xd || postreg
== 0xf,
16657 _("bad register for post-index"));
16658 inst
.instruction
|= postreg
;
16662 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
16663 constraint (inst
.reloc
.exp
.X_op
!= O_constant
16664 || inst
.reloc
.exp
.X_add_number
!= 0,
16667 if (inst
.operands
[1].writeback
)
16669 inst
.instruction
|= 0xd;
16672 inst
.instruction
|= 0xf;
16676 inst
.instruction
|= 0xf9000000;
16678 inst
.instruction
|= 0xf4000000;
16683 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
16685 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16686 D register operands. */
16687 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16688 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16691 NEON_ENCODE (FPV8
, inst
);
16694 do_vfp_sp_dyadic ();
16696 do_vfp_dp_rd_rn_rm ();
16699 inst
.instruction
|= 0x100;
16701 inst
.instruction
|= 0xf0000000;
16707 set_it_insn_type (OUTSIDE_IT_INSN
);
16709 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
16710 first_error (_("invalid instruction shape"));
16716 set_it_insn_type (OUTSIDE_IT_INSN
);
16718 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
16721 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16724 neon_dyadic_misc (NT_untyped
, N_F32
, 0);
16728 do_vrint_1 (enum neon_cvt_mode mode
)
16730 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
16731 struct neon_type_el et
;
16736 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16737 D register operands. */
16738 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16739 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16742 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
16743 if (et
.type
!= NT_invtype
)
16745 /* VFP encodings. */
16746 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
16747 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
16748 set_it_insn_type (OUTSIDE_IT_INSN
);
16750 NEON_ENCODE (FPV8
, inst
);
16752 do_vfp_sp_monadic ();
16754 do_vfp_dp_rd_rm ();
16758 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
16759 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
16760 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
16761 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
16762 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
16763 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
16764 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
16768 inst
.instruction
|= (rs
== NS_DD
) << 8;
16769 do_vfp_cond_or_thumb ();
16773 /* Neon encodings (or something broken...). */
16775 et
= neon_check_type (2, rs
, N_EQK
, N_F32
| N_KEY
);
16777 if (et
.type
== NT_invtype
)
16780 set_it_insn_type (OUTSIDE_IT_INSN
);
16781 NEON_ENCODE (FLOAT
, inst
);
16783 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16786 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16787 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16788 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16789 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16790 inst
.instruction
|= neon_quad (rs
) << 6;
16793 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
16794 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
16795 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
16796 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
16797 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
16798 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
16799 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
16804 inst
.instruction
|= 0xfc000000;
16806 inst
.instruction
|= 0xf0000000;
16813 do_vrint_1 (neon_cvt_mode_x
);
16819 do_vrint_1 (neon_cvt_mode_z
);
16825 do_vrint_1 (neon_cvt_mode_r
);
16831 do_vrint_1 (neon_cvt_mode_a
);
16837 do_vrint_1 (neon_cvt_mode_n
);
16843 do_vrint_1 (neon_cvt_mode_p
);
16849 do_vrint_1 (neon_cvt_mode_m
);
16852 /* Crypto v1 instructions. */
16854 do_crypto_2op_1 (unsigned elttype
, int op
)
16856 set_it_insn_type (OUTSIDE_IT_INSN
);
16858 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
16864 NEON_ENCODE (INTEGER
, inst
);
16865 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16866 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16867 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16868 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16870 inst
.instruction
|= op
<< 6;
16873 inst
.instruction
|= 0xfc000000;
16875 inst
.instruction
|= 0xf0000000;
16879 do_crypto_3op_1 (int u
, int op
)
16881 set_it_insn_type (OUTSIDE_IT_INSN
);
16883 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
16884 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
16889 NEON_ENCODE (INTEGER
, inst
);
16890 neon_three_same (1, u
, 8 << op
);
16896 do_crypto_2op_1 (N_8
, 0);
16902 do_crypto_2op_1 (N_8
, 1);
16908 do_crypto_2op_1 (N_8
, 2);
16914 do_crypto_2op_1 (N_8
, 3);
16920 do_crypto_3op_1 (0, 0);
16926 do_crypto_3op_1 (0, 1);
16932 do_crypto_3op_1 (0, 2);
16938 do_crypto_3op_1 (0, 3);
16944 do_crypto_3op_1 (1, 0);
16950 do_crypto_3op_1 (1, 1);
16954 do_sha256su1 (void)
16956 do_crypto_3op_1 (1, 2);
16962 do_crypto_2op_1 (N_32
, -1);
16968 do_crypto_2op_1 (N_32
, 0);
16972 do_sha256su0 (void)
16974 do_crypto_2op_1 (N_32
, 1);
16978 do_crc32_1 (unsigned int poly
, unsigned int sz
)
16980 unsigned int Rd
= inst
.operands
[0].reg
;
16981 unsigned int Rn
= inst
.operands
[1].reg
;
16982 unsigned int Rm
= inst
.operands
[2].reg
;
16984 set_it_insn_type (OUTSIDE_IT_INSN
);
16985 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
16986 inst
.instruction
|= LOW4 (Rn
) << 16;
16987 inst
.instruction
|= LOW4 (Rm
);
16988 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
16989 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
16991 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
16992 as_warn (UNPRED_REG ("r15"));
16993 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
16994 as_warn (UNPRED_REG ("r13"));
17034 /* Overall per-instruction processing. */
17036 /* We need to be able to fix up arbitrary expressions in some statements.
17037 This is so that we can handle symbols that are an arbitrary distance from
17038 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17039 which returns part of an address in a form which will be valid for
17040 a data instruction. We do this by pushing the expression into a symbol
17041 in the expr_section, and creating a fix for that. */
17044 fix_new_arm (fragS
* frag
,
17058 /* Create an absolute valued symbol, so we have something to
17059 refer to in the object file. Unfortunately for us, gas's
17060 generic expression parsing will already have folded out
17061 any use of .set foo/.type foo %function that may have
17062 been used to set type information of the target location,
17063 that's being specified symbolically. We have to presume
17064 the user knows what they are doing. */
17068 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17070 symbol
= symbol_find_or_make (name
);
17071 S_SET_SEGMENT (symbol
, absolute_section
);
17072 symbol_set_frag (symbol
, &zero_address_frag
);
17073 S_SET_VALUE (symbol
, exp
->X_add_number
);
17074 exp
->X_op
= O_symbol
;
17075 exp
->X_add_symbol
= symbol
;
17076 exp
->X_add_number
= 0;
17082 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17083 (enum bfd_reloc_code_real
) reloc
);
17087 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17088 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17092 /* Mark whether the fix is to a THUMB instruction, or an ARM
17094 new_fix
->tc_fix_data
= thumb_mode
;
17097 /* Create a frg for an instruction requiring relaxation. */
17099 output_relax_insn (void)
17105 /* The size of the instruction is unknown, so tie the debug info to the
17106 start of the instruction. */
17107 dwarf2_emit_insn (0);
17109 switch (inst
.reloc
.exp
.X_op
)
17112 sym
= inst
.reloc
.exp
.X_add_symbol
;
17113 offset
= inst
.reloc
.exp
.X_add_number
;
17117 offset
= inst
.reloc
.exp
.X_add_number
;
17120 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17124 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17125 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17126 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17129 /* Write a 32-bit thumb instruction to buf. */
17131 put_thumb32_insn (char * buf
, unsigned long insn
)
17133 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17134 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17138 output_inst (const char * str
)
17144 as_bad ("%s -- `%s'", inst
.error
, str
);
17149 output_relax_insn ();
17152 if (inst
.size
== 0)
17155 to
= frag_more (inst
.size
);
17156 /* PR 9814: Record the thumb mode into the current frag so that we know
17157 what type of NOP padding to use, if necessary. We override any previous
17158 setting so that if the mode has changed then the NOPS that we use will
17159 match the encoding of the last instruction in the frag. */
17160 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17162 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17164 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17165 put_thumb32_insn (to
, inst
.instruction
);
17167 else if (inst
.size
> INSN_SIZE
)
17169 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17170 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17171 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17174 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17176 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17177 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17178 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17181 dwarf2_emit_insn (inst
.size
);
17185 output_it_inst (int cond
, int mask
, char * to
)
17187 unsigned long instruction
= 0xbf00;
17190 instruction
|= mask
;
17191 instruction
|= cond
<< 4;
17195 to
= frag_more (2);
17197 dwarf2_emit_insn (2);
17201 md_number_to_chars (to
, instruction
, 2);
17206 /* Tag values used in struct asm_opcode's tag field. */
17209 OT_unconditional
, /* Instruction cannot be conditionalized.
17210 The ARM condition field is still 0xE. */
17211 OT_unconditionalF
, /* Instruction cannot be conditionalized
17212 and carries 0xF in its ARM condition field. */
17213 OT_csuffix
, /* Instruction takes a conditional suffix. */
17214 OT_csuffixF
, /* Some forms of the instruction take a conditional
17215 suffix, others place 0xF where the condition field
17217 OT_cinfix3
, /* Instruction takes a conditional infix,
17218 beginning at character index 3. (In
17219 unified mode, it becomes a suffix.) */
17220 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17221 tsts, cmps, cmns, and teqs. */
17222 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17223 character index 3, even in unified mode. Used for
17224 legacy instructions where suffix and infix forms
17225 may be ambiguous. */
17226 OT_csuf_or_in3
, /* Instruction takes either a conditional
17227 suffix or an infix at character index 3. */
17228 OT_odd_infix_unc
, /* This is the unconditional variant of an
17229 instruction that takes a conditional infix
17230 at an unusual position. In unified mode,
17231 this variant will accept a suffix. */
17232 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17233 are the conditional variants of instructions that
17234 take conditional infixes in unusual positions.
17235 The infix appears at character index
17236 (tag - OT_odd_infix_0). These are not accepted
17237 in unified mode. */
17240 /* Subroutine of md_assemble, responsible for looking up the primary
17241 opcode from the mnemonic the user wrote. STR points to the
17242 beginning of the mnemonic.
17244 This is not simply a hash table lookup, because of conditional
17245 variants. Most instructions have conditional variants, which are
17246 expressed with a _conditional affix_ to the mnemonic. If we were
17247 to encode each conditional variant as a literal string in the opcode
17248 table, it would have approximately 20,000 entries.
17250 Most mnemonics take this affix as a suffix, and in unified syntax,
17251 'most' is upgraded to 'all'. However, in the divided syntax, some
17252 instructions take the affix as an infix, notably the s-variants of
17253 the arithmetic instructions. Of those instructions, all but six
17254 have the infix appear after the third character of the mnemonic.
17256 Accordingly, the algorithm for looking up primary opcodes given
17259 1. Look up the identifier in the opcode table.
17260 If we find a match, go to step U.
17262 2. Look up the last two characters of the identifier in the
17263 conditions table. If we find a match, look up the first N-2
17264 characters of the identifier in the opcode table. If we
17265 find a match, go to step CE.
17267 3. Look up the fourth and fifth characters of the identifier in
17268 the conditions table. If we find a match, extract those
17269 characters from the identifier, and look up the remaining
17270 characters in the opcode table. If we find a match, go
17275 U. Examine the tag field of the opcode structure, in case this is
17276 one of the six instructions with its conditional infix in an
17277 unusual place. If it is, the tag tells us where to find the
17278 infix; look it up in the conditions table and set inst.cond
17279 accordingly. Otherwise, this is an unconditional instruction.
17280 Again set inst.cond accordingly. Return the opcode structure.
17282 CE. Examine the tag field to make sure this is an instruction that
17283 should receive a conditional suffix. If it is not, fail.
17284 Otherwise, set inst.cond from the suffix we already looked up,
17285 and return the opcode structure.
17287 CM. Examine the tag field to make sure this is an instruction that
17288 should receive a conditional infix after the third character.
17289 If it is not, fail. Otherwise, undo the edits to the current
17290 line of input and proceed as for case CE. */
17292 static const struct asm_opcode
*
17293 opcode_lookup (char **str
)
17297 const struct asm_opcode
*opcode
;
17298 const struct asm_cond
*cond
;
17301 /* Scan up to the end of the mnemonic, which must end in white space,
17302 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17303 for (base
= end
= *str
; *end
!= '\0'; end
++)
17304 if (*end
== ' ' || *end
== '.')
17310 /* Handle a possible width suffix and/or Neon type suffix. */
17315 /* The .w and .n suffixes are only valid if the unified syntax is in
17317 if (unified_syntax
&& end
[1] == 'w')
17319 else if (unified_syntax
&& end
[1] == 'n')
17324 inst
.vectype
.elems
= 0;
17326 *str
= end
+ offset
;
17328 if (end
[offset
] == '.')
17330 /* See if we have a Neon type suffix (possible in either unified or
17331 non-unified ARM syntax mode). */
17332 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17335 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17341 /* Look for unaffixed or special-case affixed mnemonic. */
17342 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17347 if (opcode
->tag
< OT_odd_infix_0
)
17349 inst
.cond
= COND_ALWAYS
;
17353 if (warn_on_deprecated
&& unified_syntax
)
17354 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17355 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17356 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17359 inst
.cond
= cond
->value
;
17363 /* Cannot have a conditional suffix on a mnemonic of less than two
17365 if (end
- base
< 3)
17368 /* Look for suffixed mnemonic. */
17370 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17371 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17373 if (opcode
&& cond
)
17376 switch (opcode
->tag
)
17378 case OT_cinfix3_legacy
:
17379 /* Ignore conditional suffixes matched on infix only mnemonics. */
17383 case OT_cinfix3_deprecated
:
17384 case OT_odd_infix_unc
:
17385 if (!unified_syntax
)
17387 /* else fall through */
17391 case OT_csuf_or_in3
:
17392 inst
.cond
= cond
->value
;
17395 case OT_unconditional
:
17396 case OT_unconditionalF
:
17398 inst
.cond
= cond
->value
;
17401 /* Delayed diagnostic. */
17402 inst
.error
= BAD_COND
;
17403 inst
.cond
= COND_ALWAYS
;
17412 /* Cannot have a usual-position infix on a mnemonic of less than
17413 six characters (five would be a suffix). */
17414 if (end
- base
< 6)
17417 /* Look for infixed mnemonic in the usual position. */
17419 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17423 memcpy (save
, affix
, 2);
17424 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17425 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17427 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17428 memcpy (affix
, save
, 2);
17431 && (opcode
->tag
== OT_cinfix3
17432 || opcode
->tag
== OT_cinfix3_deprecated
17433 || opcode
->tag
== OT_csuf_or_in3
17434 || opcode
->tag
== OT_cinfix3_legacy
))
17437 if (warn_on_deprecated
&& unified_syntax
17438 && (opcode
->tag
== OT_cinfix3
17439 || opcode
->tag
== OT_cinfix3_deprecated
))
17440 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17442 inst
.cond
= cond
->value
;
17449 /* This function generates an initial IT instruction, leaving its block
17450 virtually open for the new instructions. Eventually,
17451 the mask will be updated by now_it_add_mask () each time
17452 a new instruction needs to be included in the IT block.
17453 Finally, the block is closed with close_automatic_it_block ().
17454 The block closure can be requested either from md_assemble (),
17455 a tencode (), or due to a label hook. */
17458 new_automatic_it_block (int cond
)
17460 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17461 now_it
.mask
= 0x18;
17463 now_it
.block_length
= 1;
17464 mapping_state (MAP_THUMB
);
17465 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
17466 now_it
.warn_deprecated
= FALSE
;
17467 now_it
.insn_cond
= TRUE
;
17470 /* Close an automatic IT block.
17471 See comments in new_automatic_it_block (). */
17474 close_automatic_it_block (void)
17476 now_it
.mask
= 0x10;
17477 now_it
.block_length
= 0;
17480 /* Update the mask of the current automatically-generated IT
17481 instruction. See comments in new_automatic_it_block (). */
17484 now_it_add_mask (int cond
)
17486 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17487 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17488 | ((bitvalue) << (nbit)))
17489 const int resulting_bit
= (cond
& 1);
17491 now_it
.mask
&= 0xf;
17492 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17494 (5 - now_it
.block_length
));
17495 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17497 ((5 - now_it
.block_length
) - 1) );
17498 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
17501 #undef SET_BIT_VALUE
17504 /* The IT blocks handling machinery is accessed through the these functions:
17505 it_fsm_pre_encode () from md_assemble ()
17506 set_it_insn_type () optional, from the tencode functions
17507 set_it_insn_type_last () ditto
17508 in_it_block () ditto
17509 it_fsm_post_encode () from md_assemble ()
17510 force_automatic_it_block_close () from label habdling functions
17513 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17514 initializing the IT insn type with a generic initial value depending
17515 on the inst.condition.
17516 2) During the tencode function, two things may happen:
17517 a) The tencode function overrides the IT insn type by
17518 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17519 b) The tencode function queries the IT block state by
17520 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17522 Both set_it_insn_type and in_it_block run the internal FSM state
17523 handling function (handle_it_state), because: a) setting the IT insn
17524 type may incur in an invalid state (exiting the function),
17525 and b) querying the state requires the FSM to be updated.
17526 Specifically we want to avoid creating an IT block for conditional
17527 branches, so it_fsm_pre_encode is actually a guess and we can't
17528 determine whether an IT block is required until the tencode () routine
17529 has decided what type of instruction this actually it.
17530 Because of this, if set_it_insn_type and in_it_block have to be used,
17531 set_it_insn_type has to be called first.
17533 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17534 determines the insn IT type depending on the inst.cond code.
17535 When a tencode () routine encodes an instruction that can be
17536 either outside an IT block, or, in the case of being inside, has to be
17537 the last one, set_it_insn_type_last () will determine the proper
17538 IT instruction type based on the inst.cond code. Otherwise,
17539 set_it_insn_type can be called for overriding that logic or
17540 for covering other cases.
17542 Calling handle_it_state () may not transition the IT block state to
17543 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17544 still queried. Instead, if the FSM determines that the state should
17545 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17546 after the tencode () function: that's what it_fsm_post_encode () does.
17548 Since in_it_block () calls the state handling function to get an
17549 updated state, an error may occur (due to invalid insns combination).
17550 In that case, inst.error is set.
17551 Therefore, inst.error has to be checked after the execution of
17552 the tencode () routine.
17554 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17555 any pending state change (if any) that didn't take place in
17556 handle_it_state () as explained above. */
17559 it_fsm_pre_encode (void)
17561 if (inst
.cond
!= COND_ALWAYS
)
17562 inst
.it_insn_type
= INSIDE_IT_INSN
;
17564 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
17566 now_it
.state_handled
= 0;
17569 /* IT state FSM handling function. */
17572 handle_it_state (void)
17574 now_it
.state_handled
= 1;
17575 now_it
.insn_cond
= FALSE
;
17577 switch (now_it
.state
)
17579 case OUTSIDE_IT_BLOCK
:
17580 switch (inst
.it_insn_type
)
17582 case OUTSIDE_IT_INSN
:
17585 case INSIDE_IT_INSN
:
17586 case INSIDE_IT_LAST_INSN
:
17587 if (thumb_mode
== 0)
17590 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
17591 as_tsktsk (_("Warning: conditional outside an IT block"\
17596 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
17597 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
17599 /* Automatically generate the IT instruction. */
17600 new_automatic_it_block (inst
.cond
);
17601 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
17602 close_automatic_it_block ();
17606 inst
.error
= BAD_OUT_IT
;
17612 case IF_INSIDE_IT_LAST_INSN
:
17613 case NEUTRAL_IT_INSN
:
17617 now_it
.state
= MANUAL_IT_BLOCK
;
17618 now_it
.block_length
= 0;
17623 case AUTOMATIC_IT_BLOCK
:
17624 /* Three things may happen now:
17625 a) We should increment current it block size;
17626 b) We should close current it block (closing insn or 4 insns);
17627 c) We should close current it block and start a new one (due
17628 to incompatible conditions or
17629 4 insns-length block reached). */
17631 switch (inst
.it_insn_type
)
17633 case OUTSIDE_IT_INSN
:
17634 /* The closure of the block shall happen immediatelly,
17635 so any in_it_block () call reports the block as closed. */
17636 force_automatic_it_block_close ();
17639 case INSIDE_IT_INSN
:
17640 case INSIDE_IT_LAST_INSN
:
17641 case IF_INSIDE_IT_LAST_INSN
:
17642 now_it
.block_length
++;
17644 if (now_it
.block_length
> 4
17645 || !now_it_compatible (inst
.cond
))
17647 force_automatic_it_block_close ();
17648 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
17649 new_automatic_it_block (inst
.cond
);
17653 now_it
.insn_cond
= TRUE
;
17654 now_it_add_mask (inst
.cond
);
17657 if (now_it
.state
== AUTOMATIC_IT_BLOCK
17658 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
17659 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
17660 close_automatic_it_block ();
17663 case NEUTRAL_IT_INSN
:
17664 now_it
.block_length
++;
17665 now_it
.insn_cond
= TRUE
;
17667 if (now_it
.block_length
> 4)
17668 force_automatic_it_block_close ();
17670 now_it_add_mask (now_it
.cc
& 1);
17674 close_automatic_it_block ();
17675 now_it
.state
= MANUAL_IT_BLOCK
;
17680 case MANUAL_IT_BLOCK
:
17682 /* Check conditional suffixes. */
17683 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
17686 now_it
.mask
&= 0x1f;
17687 is_last
= (now_it
.mask
== 0x10);
17688 now_it
.insn_cond
= TRUE
;
17690 switch (inst
.it_insn_type
)
17692 case OUTSIDE_IT_INSN
:
17693 inst
.error
= BAD_NOT_IT
;
17696 case INSIDE_IT_INSN
:
17697 if (cond
!= inst
.cond
)
17699 inst
.error
= BAD_IT_COND
;
17704 case INSIDE_IT_LAST_INSN
:
17705 case IF_INSIDE_IT_LAST_INSN
:
17706 if (cond
!= inst
.cond
)
17708 inst
.error
= BAD_IT_COND
;
17713 inst
.error
= BAD_BRANCH
;
17718 case NEUTRAL_IT_INSN
:
17719 /* The BKPT instruction is unconditional even in an IT block. */
17723 inst
.error
= BAD_IT_IT
;
17733 struct depr_insn_mask
17735 unsigned long pattern
;
17736 unsigned long mask
;
17737 const char* description
;
17740 /* List of 16-bit instruction patterns deprecated in an IT block in
17742 static const struct depr_insn_mask depr_it_insns
[] = {
17743 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17744 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17745 { 0xa000, 0xb800, N_("ADR") },
17746 { 0x4800, 0xf800, N_("Literal loads") },
17747 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17748 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17749 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17750 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17751 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17756 it_fsm_post_encode (void)
17760 if (!now_it
.state_handled
)
17761 handle_it_state ();
17763 if (now_it
.insn_cond
17764 && !now_it
.warn_deprecated
17765 && warn_on_deprecated
17766 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
17768 if (inst
.instruction
>= 0x10000)
17770 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17771 "deprecated in ARMv8"));
17772 now_it
.warn_deprecated
= TRUE
;
17776 const struct depr_insn_mask
*p
= depr_it_insns
;
17778 while (p
->mask
!= 0)
17780 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
17782 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17783 "of the following class are deprecated in ARMv8: "
17784 "%s"), p
->description
);
17785 now_it
.warn_deprecated
= TRUE
;
17793 if (now_it
.block_length
> 1)
17795 as_tsktsk (_("IT blocks containing more than one conditional "
17796 "instruction are deprecated in ARMv8"));
17797 now_it
.warn_deprecated
= TRUE
;
17801 is_last
= (now_it
.mask
== 0x10);
17804 now_it
.state
= OUTSIDE_IT_BLOCK
;
17810 force_automatic_it_block_close (void)
17812 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
17814 close_automatic_it_block ();
17815 now_it
.state
= OUTSIDE_IT_BLOCK
;
17823 if (!now_it
.state_handled
)
17824 handle_it_state ();
17826 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
17829 /* Whether OPCODE only has T32 encoding. Since this function is only used by
17830 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
17831 here, hence the "known" in the function name. */
17834 known_t32_only_insn (const struct asm_opcode
*opcode
)
17836 /* Original Thumb-1 wide instruction. */
17837 if (opcode
->tencode
== do_t_blx
17838 || opcode
->tencode
== do_t_branch23
17839 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
17840 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
17843 /* Wide-only instruction added to ARMv8-M. */
17844 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m
)
17845 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
17846 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
17847 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
17853 /* Whether wide instruction variant can be used if available for a valid OPCODE
17857 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
17859 if (known_t32_only_insn (opcode
))
17862 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
17863 of variant T3 of B.W is checked in do_t_branch. */
17864 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
17865 && opcode
->tencode
== do_t_branch
)
17868 /* Wide instruction variants of all instructions with narrow *and* wide
17869 variants become available with ARMv6t2. Other opcodes are either
17870 narrow-only or wide-only and are thus available if OPCODE is valid. */
17871 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
17874 /* OPCODE with narrow only instruction variant or wide variant not
17880 md_assemble (char *str
)
17883 const struct asm_opcode
* opcode
;
17885 /* Align the previous label if needed. */
17886 if (last_label_seen
!= NULL
)
17888 symbol_set_frag (last_label_seen
, frag_now
);
17889 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
17890 S_SET_SEGMENT (last_label_seen
, now_seg
);
17893 memset (&inst
, '\0', sizeof (inst
));
17894 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
17896 opcode
= opcode_lookup (&p
);
17899 /* It wasn't an instruction, but it might be a register alias of
17900 the form alias .req reg, or a Neon .dn/.qn directive. */
17901 if (! create_register_alias (str
, p
)
17902 && ! create_neon_reg_alias (str
, p
))
17903 as_bad (_("bad instruction `%s'"), str
);
17908 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
17909 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17911 /* The value which unconditional instructions should have in place of the
17912 condition field. */
17913 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
17917 arm_feature_set variant
;
17919 variant
= cpu_variant
;
17920 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17921 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
17922 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
17923 /* Check that this instruction is supported for this CPU. */
17924 if (!opcode
->tvariant
17925 || (thumb_mode
== 1
17926 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
17928 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
17931 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
17932 && opcode
->tencode
!= do_t_branch
)
17934 as_bad (_("Thumb does not support conditional execution"));
17938 /* Two things are addressed here:
17939 1) Implicit require narrow instructions on Thumb-1.
17940 This avoids relaxation accidentally introducing Thumb-2
17942 2) Reject wide instructions in non Thumb-2 cores.
17944 Only instructions with narrow and wide variants need to be handled
17945 but selecting all non wide-only instructions is easier. */
17946 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
17947 && !t32_insn_ok (variant
, opcode
))
17949 if (inst
.size_req
== 0)
17951 else if (inst
.size_req
== 4)
17953 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
17954 as_bad (_("selected processor does not support 32bit wide "
17955 "variant of instruction `%s'"), str
);
17957 as_bad (_("selected processor does not support `%s' in "
17958 "Thumb-2 mode"), str
);
17963 inst
.instruction
= opcode
->tvalue
;
17965 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
17967 /* Prepare the it_insn_type for those encodings that don't set
17969 it_fsm_pre_encode ();
17971 opcode
->tencode ();
17973 it_fsm_post_encode ();
17976 if (!(inst
.error
|| inst
.relax
))
17978 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
17979 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
17980 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
17982 as_bad (_("cannot honor width suffix -- `%s'"), str
);
17987 /* Something has gone badly wrong if we try to relax a fixed size
17989 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
17991 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
17992 *opcode
->tvariant
);
17993 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17994 set those bits when Thumb-2 32-bit instructions are seen. The impact
17995 of relaxable instructions will be considered later after we finish all
17997 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
17998 variant
= arm_arch_none
;
18000 variant
= cpu_variant
;
18001 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
18002 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18005 check_neon_suffixes
;
18009 mapping_state (MAP_THUMB
);
18012 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18016 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18017 is_bx
= (opcode
->aencode
== do_bx
);
18019 /* Check that this instruction is supported for this CPU. */
18020 if (!(is_bx
&& fix_v4bx
)
18021 && !(opcode
->avariant
&&
18022 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18024 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18029 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18033 inst
.instruction
= opcode
->avalue
;
18034 if (opcode
->tag
== OT_unconditionalF
)
18035 inst
.instruction
|= 0xFU
<< 28;
18037 inst
.instruction
|= inst
.cond
<< 28;
18038 inst
.size
= INSN_SIZE
;
18039 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18041 it_fsm_pre_encode ();
18042 opcode
->aencode ();
18043 it_fsm_post_encode ();
18045 /* Arm mode bx is marked as both v4T and v5 because it's still required
18046 on a hypothetical non-thumb v5 core. */
18048 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18050 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18051 *opcode
->avariant
);
18053 check_neon_suffixes
;
18057 mapping_state (MAP_ARM
);
18062 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18070 check_it_blocks_finished (void)
18075 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18076 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18077 == MANUAL_IT_BLOCK
)
18079 as_warn (_("section '%s' finished with an open IT block."),
18083 if (now_it
.state
== MANUAL_IT_BLOCK
)
18084 as_warn (_("file finished with an open IT block."));
18088 /* Various frobbings of labels and their addresses. */
18091 arm_start_line_hook (void)
18093 last_label_seen
= NULL
;
18097 arm_frob_label (symbolS
* sym
)
18099 last_label_seen
= sym
;
18101 ARM_SET_THUMB (sym
, thumb_mode
);
18103 #if defined OBJ_COFF || defined OBJ_ELF
18104 ARM_SET_INTERWORK (sym
, support_interwork
);
18107 force_automatic_it_block_close ();
18109 /* Note - do not allow local symbols (.Lxxx) to be labelled
18110 as Thumb functions. This is because these labels, whilst
18111 they exist inside Thumb code, are not the entry points for
18112 possible ARM->Thumb calls. Also, these labels can be used
18113 as part of a computed goto or switch statement. eg gcc
18114 can generate code that looks like this:
18116 ldr r2, [pc, .Laaa]
18126 The first instruction loads the address of the jump table.
18127 The second instruction converts a table index into a byte offset.
18128 The third instruction gets the jump address out of the table.
18129 The fourth instruction performs the jump.
18131 If the address stored at .Laaa is that of a symbol which has the
18132 Thumb_Func bit set, then the linker will arrange for this address
18133 to have the bottom bit set, which in turn would mean that the
18134 address computation performed by the third instruction would end
18135 up with the bottom bit set. Since the ARM is capable of unaligned
18136 word loads, the instruction would then load the incorrect address
18137 out of the jump table, and chaos would ensue. */
18138 if (label_is_thumb_function_name
18139 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18140 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18142 /* When the address of a Thumb function is taken the bottom
18143 bit of that address should be set. This will allow
18144 interworking between Arm and Thumb functions to work
18147 THUMB_SET_FUNC (sym
, 1);
18149 label_is_thumb_function_name
= FALSE
;
18152 dwarf2_emit_label (sym
);
18156 arm_data_in_code (void)
18158 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18160 *input_line_pointer
= '/';
18161 input_line_pointer
+= 5;
18162 *input_line_pointer
= 0;
18170 arm_canonicalize_symbol_name (char * name
)
18174 if (thumb_mode
&& (len
= strlen (name
)) > 5
18175 && streq (name
+ len
- 5, "/data"))
18176 *(name
+ len
- 5) = 0;
18181 /* Table of all register names defined by default. The user can
18182 define additional names with .req. Note that all register names
18183 should appear in both upper and lowercase variants. Some registers
18184 also have mixed-case names. */
18186 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18187 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18188 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18189 #define REGSET(p,t) \
18190 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18191 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18192 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18193 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18194 #define REGSETH(p,t) \
18195 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18196 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18197 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18198 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18199 #define REGSET2(p,t) \
18200 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18201 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18202 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18203 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18204 #define SPLRBANK(base,bank,t) \
18205 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18206 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18207 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18208 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18209 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18210 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18212 static const struct reg_entry reg_names
[] =
18214 /* ARM integer registers. */
18215 REGSET(r
, RN
), REGSET(R
, RN
),
18217 /* ATPCS synonyms. */
18218 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18219 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18220 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18222 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18223 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18224 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18226 /* Well-known aliases. */
18227 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18228 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18230 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18231 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18233 /* Coprocessor numbers. */
18234 REGSET(p
, CP
), REGSET(P
, CP
),
18236 /* Coprocessor register numbers. The "cr" variants are for backward
18238 REGSET(c
, CN
), REGSET(C
, CN
),
18239 REGSET(cr
, CN
), REGSET(CR
, CN
),
18241 /* ARM banked registers. */
18242 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18243 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18244 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18245 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18246 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18247 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18248 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18250 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18251 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18252 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18253 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18254 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18255 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18256 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18257 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18259 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18260 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18261 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18262 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18263 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18264 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18265 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18266 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18267 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18269 /* FPA registers. */
18270 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18271 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18273 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18274 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18276 /* VFP SP registers. */
18277 REGSET(s
,VFS
), REGSET(S
,VFS
),
18278 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18280 /* VFP DP Registers. */
18281 REGSET(d
,VFD
), REGSET(D
,VFD
),
18282 /* Extra Neon DP registers. */
18283 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18285 /* Neon QP registers. */
18286 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18288 /* VFP control registers. */
18289 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18290 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18291 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18292 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18293 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18294 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18296 /* Maverick DSP coprocessor registers. */
18297 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18298 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18300 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18301 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18302 REGDEF(dspsc
,0,DSPSC
),
18304 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18305 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18306 REGDEF(DSPSC
,0,DSPSC
),
18308 /* iWMMXt data registers - p0, c0-15. */
18309 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18311 /* iWMMXt control registers - p1, c0-3. */
18312 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18313 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18314 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18315 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18317 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18318 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18319 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18320 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18321 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18323 /* XScale accumulator registers. */
18324 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18330 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18331 within psr_required_here. */
18332 static const struct asm_psr psrs
[] =
18334 /* Backward compatibility notation. Note that "all" is no longer
18335 truly all possible PSR bits. */
18336 {"all", PSR_c
| PSR_f
},
18340 /* Individual flags. */
18346 /* Combinations of flags. */
18347 {"fs", PSR_f
| PSR_s
},
18348 {"fx", PSR_f
| PSR_x
},
18349 {"fc", PSR_f
| PSR_c
},
18350 {"sf", PSR_s
| PSR_f
},
18351 {"sx", PSR_s
| PSR_x
},
18352 {"sc", PSR_s
| PSR_c
},
18353 {"xf", PSR_x
| PSR_f
},
18354 {"xs", PSR_x
| PSR_s
},
18355 {"xc", PSR_x
| PSR_c
},
18356 {"cf", PSR_c
| PSR_f
},
18357 {"cs", PSR_c
| PSR_s
},
18358 {"cx", PSR_c
| PSR_x
},
18359 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18360 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18361 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18362 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18363 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18364 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18365 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18366 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18367 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18368 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18369 {"scf", PSR_s
| PSR_c
| PSR_f
},
18370 {"scx", PSR_s
| PSR_c
| PSR_x
},
18371 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18372 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18373 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18374 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18375 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18376 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18377 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18378 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18379 {"csf", PSR_c
| PSR_s
| PSR_f
},
18380 {"csx", PSR_c
| PSR_s
| PSR_x
},
18381 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18382 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18383 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18384 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18385 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18386 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18387 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18388 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18389 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18390 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18391 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18392 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18393 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18394 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18395 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18396 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18397 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18398 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18399 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18400 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18401 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18402 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18403 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18404 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18405 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18406 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18409 /* Table of V7M psr names. */
18410 static const struct asm_psr v7m_psrs
[] =
18412 {"apsr", 0 }, {"APSR", 0 },
18413 {"iapsr", 1 }, {"IAPSR", 1 },
18414 {"eapsr", 2 }, {"EAPSR", 2 },
18415 {"psr", 3 }, {"PSR", 3 },
18416 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18417 {"ipsr", 5 }, {"IPSR", 5 },
18418 {"epsr", 6 }, {"EPSR", 6 },
18419 {"iepsr", 7 }, {"IEPSR", 7 },
18420 {"msp", 8 }, {"MSP", 8 },
18421 {"psp", 9 }, {"PSP", 9 },
18422 {"primask", 16}, {"PRIMASK", 16},
18423 {"basepri", 17}, {"BASEPRI", 17},
18424 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18425 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18426 {"faultmask", 19}, {"FAULTMASK", 19},
18427 {"control", 20}, {"CONTROL", 20}
18430 /* Table of all shift-in-operand names. */
18431 static const struct asm_shift_name shift_names
[] =
18433 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18434 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18435 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18436 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18437 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18438 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18441 /* Table of all explicit relocation names. */
18443 static struct reloc_entry reloc_names
[] =
18445 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
18446 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
18447 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
18448 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
18449 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
18450 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
18451 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
18452 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
18453 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
18454 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
18455 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
18456 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
18457 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
18458 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
18459 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
18460 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
18461 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
18462 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
18466 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18467 static const struct asm_cond conds
[] =
18471 {"cs", 0x2}, {"hs", 0x2},
18472 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18486 #define UL_BARRIER(L,U,CODE,FEAT) \
18487 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18488 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18490 static struct asm_barrier_opt barrier_opt_names
[] =
18492 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
18493 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
18494 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
18495 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
18496 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
18497 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
18498 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
18499 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
18500 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
18501 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
18502 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
18503 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
18504 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
18505 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
18506 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
18507 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
18512 /* Table of ARM-format instructions. */
18514 /* Macros for gluing together operand strings. N.B. In all cases
18515 other than OPS0, the trailing OP_stop comes from default
18516 zero-initialization of the unspecified elements of the array. */
18517 #define OPS0() { OP_stop, }
18518 #define OPS1(a) { OP_##a, }
18519 #define OPS2(a,b) { OP_##a,OP_##b, }
18520 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18521 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18522 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18523 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18525 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18526 This is useful when mixing operands for ARM and THUMB, i.e. using the
18527 MIX_ARM_THUMB_OPERANDS macro.
18528 In order to use these macros, prefix the number of operands with _
18530 #define OPS_1(a) { a, }
18531 #define OPS_2(a,b) { a,b, }
18532 #define OPS_3(a,b,c) { a,b,c, }
18533 #define OPS_4(a,b,c,d) { a,b,c,d, }
18534 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18535 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18537 /* These macros abstract out the exact format of the mnemonic table and
18538 save some repeated characters. */
18540 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18541 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18542 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18543 THUMB_VARIANT, do_##ae, do_##te }
18545 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18546 a T_MNEM_xyz enumerator. */
18547 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18548 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18549 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18550 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18552 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18553 infix after the third character. */
18554 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18555 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18556 THUMB_VARIANT, do_##ae, do_##te }
18557 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18558 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18559 THUMB_VARIANT, do_##ae, do_##te }
18560 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18561 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18562 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18563 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18564 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18565 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18566 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18567 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18569 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18570 field is still 0xE. Many of the Thumb variants can be executed
18571 conditionally, so this is checked separately. */
18572 #define TUE(mnem, op, top, nops, ops, ae, te) \
18573 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18574 THUMB_VARIANT, do_##ae, do_##te }
18576 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18577 Used by mnemonics that have very minimal differences in the encoding for
18578 ARM and Thumb variants and can be handled in a common function. */
18579 #define TUEc(mnem, op, top, nops, ops, en) \
18580 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18581 THUMB_VARIANT, do_##en, do_##en }
18583 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18584 condition code field. */
18585 #define TUF(mnem, op, top, nops, ops, ae, te) \
18586 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18587 THUMB_VARIANT, do_##ae, do_##te }
18589 /* ARM-only variants of all the above. */
18590 #define CE(mnem, op, nops, ops, ae) \
18591 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18593 #define C3(mnem, op, nops, ops, ae) \
18594 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18596 /* Legacy mnemonics that always have conditional infix after the third
18598 #define CL(mnem, op, nops, ops, ae) \
18599 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18600 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18602 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18603 #define cCE(mnem, op, nops, ops, ae) \
18604 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18606 /* Legacy coprocessor instructions where conditional infix and conditional
18607 suffix are ambiguous. For consistency this includes all FPA instructions,
18608 not just the potentially ambiguous ones. */
18609 #define cCL(mnem, op, nops, ops, ae) \
18610 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18611 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18613 /* Coprocessor, takes either a suffix or a position-3 infix
18614 (for an FPA corner case). */
18615 #define C3E(mnem, op, nops, ops, ae) \
18616 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18617 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18619 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18620 { m1 #m2 m3, OPS##nops ops, \
18621 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18622 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18624 #define CM(m1, m2, op, nops, ops, ae) \
18625 xCM_ (m1, , m2, op, nops, ops, ae), \
18626 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18627 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18628 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18629 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18630 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18631 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18632 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18633 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18634 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18635 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18636 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18637 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18638 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18639 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18640 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18641 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18642 xCM_ (m1, le, m2, op, nops, ops, ae), \
18643 xCM_ (m1, al, m2, op, nops, ops, ae)
18645 #define UE(mnem, op, nops, ops, ae) \
18646 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18648 #define UF(mnem, op, nops, ops, ae) \
18649 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18651 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18652 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18653 use the same encoding function for each. */
18654 #define NUF(mnem, op, nops, ops, enc) \
18655 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18656 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18658 /* Neon data processing, version which indirects through neon_enc_tab for
18659 the various overloaded versions of opcodes. */
18660 #define nUF(mnem, op, nops, ops, enc) \
18661 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18662 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18664 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18666 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18667 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18668 THUMB_VARIANT, do_##enc, do_##enc }
18670 #define NCE(mnem, op, nops, ops, enc) \
18671 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18673 #define NCEF(mnem, op, nops, ops, enc) \
18674 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18676 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18677 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18678 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18679 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18681 #define nCE(mnem, op, nops, ops, enc) \
18682 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18684 #define nCEF(mnem, op, nops, ops, enc) \
18685 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18689 static const struct asm_opcode insns
[] =
18691 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18692 #define THUMB_VARIANT & arm_ext_v4t
18693 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18694 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18695 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18696 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18697 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18698 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18699 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18700 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18701 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18702 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18703 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18704 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18705 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18706 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18707 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18708 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18710 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18711 for setting PSR flag bits. They are obsolete in V6 and do not
18712 have Thumb equivalents. */
18713 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18714 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18715 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
18716 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18717 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18718 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
18719 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18720 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18721 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
18723 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
18724 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
18725 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18726 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18728 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
18729 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18730 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
18732 OP_ADDRGLDR
),ldst
, t_ldst
),
18733 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18735 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18736 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18737 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18738 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18739 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18740 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18742 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18743 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18744 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
18745 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
18748 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
18749 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
18750 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
18751 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
18753 /* Thumb-compatibility pseudo ops. */
18754 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18755 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18756 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18757 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18758 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18759 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18760 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18761 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18762 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
18763 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
18764 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
18765 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
18767 /* These may simplify to neg. */
18768 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18769 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18771 #undef THUMB_VARIANT
18772 #define THUMB_VARIANT & arm_ext_v6
18774 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
18776 /* V1 instructions with no Thumb analogue prior to V6T2. */
18777 #undef THUMB_VARIANT
18778 #define THUMB_VARIANT & arm_ext_v6t2
18780 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18781 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18782 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
18784 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18785 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18786 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
18787 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18789 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18790 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18792 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18793 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18795 /* V1 instructions with no Thumb analogue at all. */
18796 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
18797 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
18799 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18800 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18801 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18802 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18803 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18804 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18805 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18806 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18809 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18810 #undef THUMB_VARIANT
18811 #define THUMB_VARIANT & arm_ext_v4t
18813 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18814 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18816 #undef THUMB_VARIANT
18817 #define THUMB_VARIANT & arm_ext_v6t2
18819 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
18820 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
18822 /* Generic coprocessor instructions. */
18823 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18824 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18825 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18826 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18827 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18828 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18829 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18832 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18834 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18835 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18838 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18839 #undef THUMB_VARIANT
18840 #define THUMB_VARIANT & arm_ext_msr
18842 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
18843 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
18846 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18847 #undef THUMB_VARIANT
18848 #define THUMB_VARIANT & arm_ext_v6t2
18850 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18851 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18852 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18853 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18854 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18855 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18856 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18857 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18860 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18861 #undef THUMB_VARIANT
18862 #define THUMB_VARIANT & arm_ext_v4t
18864 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18865 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18866 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18867 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18868 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18869 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18872 #define ARM_VARIANT & arm_ext_v4t_5
18874 /* ARM Architecture 4T. */
18875 /* Note: bx (and blx) are required on V5, even if the processor does
18876 not support Thumb. */
18877 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
18880 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18881 #undef THUMB_VARIANT
18882 #define THUMB_VARIANT & arm_ext_v5t
18884 /* Note: blx has 2 variants; the .value coded here is for
18885 BLX(2). Only this variant has conditional execution. */
18886 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
18887 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
18889 #undef THUMB_VARIANT
18890 #define THUMB_VARIANT & arm_ext_v6t2
18892 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
18893 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18894 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18895 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18896 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18897 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18898 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18899 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18902 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18903 #undef THUMB_VARIANT
18904 #define THUMB_VARIANT & arm_ext_v5exp
18906 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18907 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18908 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18909 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18911 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18912 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18914 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18915 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18916 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18917 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18919 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18920 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18921 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18922 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18924 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18925 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18927 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18928 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18929 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18930 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18933 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18934 #undef THUMB_VARIANT
18935 #define THUMB_VARIANT & arm_ext_v6t2
18937 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
18938 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
18940 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
18941 ADDRGLDRS
), ldrd
, t_ldstd
),
18943 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18944 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18947 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18949 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
18952 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18953 #undef THUMB_VARIANT
18954 #define THUMB_VARIANT & arm_ext_v6
18956 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
18957 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
18958 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18959 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18960 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18961 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18962 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18963 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18964 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18965 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
18967 #undef THUMB_VARIANT
18968 #define THUMB_VARIANT & arm_ext_v6t2_v8m
18970 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
18971 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
18973 #undef THUMB_VARIANT
18974 #define THUMB_VARIANT & arm_ext_v6t2
18976 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18977 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18979 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
18980 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
18982 /* ARM V6 not included in V7M. */
18983 #undef THUMB_VARIANT
18984 #define THUMB_VARIANT & arm_ext_v6_notm
18985 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18986 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18987 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
18988 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
18989 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
18990 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18991 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
18992 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
18993 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
18994 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18995 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18996 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18997 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
18998 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
18999 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
19000 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
19001 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19002 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19003 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
19005 /* ARM V6 not included in V7M (eg. integer SIMD). */
19006 #undef THUMB_VARIANT
19007 #define THUMB_VARIANT & arm_ext_v6_dsp
19008 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19009 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19010 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19011 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19012 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19013 /* Old name for QASX. */
19014 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19015 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19016 /* Old name for QSAX. */
19017 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19018 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19019 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19020 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19021 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19022 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19023 /* Old name for SASX. */
19024 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19025 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19026 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19027 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19028 /* Old name for SHASX. */
19029 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19030 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19031 /* Old name for SHSAX. */
19032 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19033 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19034 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19035 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19036 /* Old name for SSAX. */
19037 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19038 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19039 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19040 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19041 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19042 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19043 /* Old name for UASX. */
19044 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19045 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19046 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19047 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19048 /* Old name for UHASX. */
19049 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19050 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19051 /* Old name for UHSAX. */
19052 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19053 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19054 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19055 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19056 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19057 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19058 /* Old name for UQASX. */
19059 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19060 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19061 /* Old name for UQSAX. */
19062 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19063 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19064 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19065 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19066 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19067 /* Old name for USAX. */
19068 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19069 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19070 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19071 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19072 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19073 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19074 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19075 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19076 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19077 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19078 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19079 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19080 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19081 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19082 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19083 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19084 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19085 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19086 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19087 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19088 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19089 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19090 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19091 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19092 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19093 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19094 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19095 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19096 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19097 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19098 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19099 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19100 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19101 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19104 #define ARM_VARIANT & arm_ext_v6k
19105 #undef THUMB_VARIANT
19106 #define THUMB_VARIANT & arm_ext_v6k
19108 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19109 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19110 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19111 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19113 #undef THUMB_VARIANT
19114 #define THUMB_VARIANT & arm_ext_v6_notm
19115 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19117 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19118 RRnpcb
), strexd
, t_strexd
),
19120 #undef THUMB_VARIANT
19121 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19122 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19124 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19126 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19128 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19130 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19133 #define ARM_VARIANT & arm_ext_sec
19134 #undef THUMB_VARIANT
19135 #define THUMB_VARIANT & arm_ext_sec
19137 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19140 #define ARM_VARIANT & arm_ext_virt
19141 #undef THUMB_VARIANT
19142 #define THUMB_VARIANT & arm_ext_virt
19144 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19145 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19148 #define ARM_VARIANT & arm_ext_pan
19149 #undef THUMB_VARIANT
19150 #define THUMB_VARIANT & arm_ext_pan
19152 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19155 #define ARM_VARIANT & arm_ext_v6t2
19156 #undef THUMB_VARIANT
19157 #define THUMB_VARIANT & arm_ext_v6t2
19159 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19160 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19161 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19162 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19164 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19165 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19167 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19168 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19169 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19170 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19172 #undef THUMB_VARIANT
19173 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19174 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19175 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19177 /* Thumb-only instructions. */
19179 #define ARM_VARIANT NULL
19180 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19181 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19183 /* ARM does not really have an IT instruction, so always allow it.
19184 The opcode is copied from Thumb in order to allow warnings in
19185 -mimplicit-it=[never | arm] modes. */
19187 #define ARM_VARIANT & arm_ext_v1
19188 #undef THUMB_VARIANT
19189 #define THUMB_VARIANT & arm_ext_v6t2
19191 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19192 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19193 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19194 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19195 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19196 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19197 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19198 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19199 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19200 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19201 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19202 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19203 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19204 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19205 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19206 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19207 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19208 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19210 /* Thumb2 only instructions. */
19212 #define ARM_VARIANT NULL
19214 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19215 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19216 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19217 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19218 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19219 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19221 /* Hardware division instructions. */
19223 #define ARM_VARIANT & arm_ext_adiv
19224 #undef THUMB_VARIANT
19225 #define THUMB_VARIANT & arm_ext_div
19227 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19228 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19230 /* ARM V6M/V7 instructions. */
19232 #define ARM_VARIANT & arm_ext_barrier
19233 #undef THUMB_VARIANT
19234 #define THUMB_VARIANT & arm_ext_barrier
19236 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19237 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19238 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19240 /* ARM V7 instructions. */
19242 #define ARM_VARIANT & arm_ext_v7
19243 #undef THUMB_VARIANT
19244 #define THUMB_VARIANT & arm_ext_v7
19246 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19247 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19250 #define ARM_VARIANT & arm_ext_mp
19251 #undef THUMB_VARIANT
19252 #define THUMB_VARIANT & arm_ext_mp
19254 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19256 /* AArchv8 instructions. */
19258 #define ARM_VARIANT & arm_ext_v8
19260 /* Instructions shared between armv8-a and armv8-m. */
19261 #undef THUMB_VARIANT
19262 #define THUMB_VARIANT & arm_ext_atomics
19264 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19265 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19266 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19267 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19268 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19269 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19270 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19271 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19272 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19273 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19275 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19277 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19279 #undef THUMB_VARIANT
19280 #define THUMB_VARIANT & arm_ext_v8
19282 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19283 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19284 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19286 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19288 /* ARMv8 T32 only. */
19290 #define ARM_VARIANT NULL
19291 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19292 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19293 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19295 /* FP for ARMv8. */
19297 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19298 #undef THUMB_VARIANT
19299 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19301 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19302 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19303 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19304 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19305 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19306 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19307 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19308 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19309 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19310 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19311 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19312 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19313 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19314 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19315 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19316 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19317 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19319 /* Crypto v1 extensions. */
19321 #define ARM_VARIANT & fpu_crypto_ext_armv8
19322 #undef THUMB_VARIANT
19323 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19325 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19326 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19327 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19328 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19329 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19330 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19331 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19332 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19333 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19334 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19335 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19336 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19337 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19338 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19341 #define ARM_VARIANT & crc_ext_armv8
19342 #undef THUMB_VARIANT
19343 #define THUMB_VARIANT & crc_ext_armv8
19344 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19345 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19346 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19347 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19348 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19349 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19351 /* ARMv8.2 RAS extension. */
19353 #define ARM_VARIANT & arm_ext_v8_2
19354 #undef THUMB_VARIANT
19355 #define THUMB_VARIANT & arm_ext_v8_2
19356 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
19359 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19360 #undef THUMB_VARIANT
19361 #define THUMB_VARIANT NULL
19363 cCE("wfs", e200110
, 1, (RR
), rd
),
19364 cCE("rfs", e300110
, 1, (RR
), rd
),
19365 cCE("wfc", e400110
, 1, (RR
), rd
),
19366 cCE("rfc", e500110
, 1, (RR
), rd
),
19368 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19369 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19370 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19371 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19373 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19374 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19375 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19376 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19378 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19379 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19380 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19381 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19382 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19383 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19384 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19385 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19386 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19387 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19388 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19389 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19391 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19392 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19393 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19394 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19395 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19396 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19397 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19398 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19399 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19400 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19401 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19402 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19404 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19405 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19406 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19407 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19408 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19409 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19410 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19411 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19412 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19413 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19414 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19415 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19417 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19418 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19419 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19420 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19421 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19422 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19423 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19424 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19425 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19426 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19427 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19428 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19430 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19431 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19432 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19433 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19434 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19435 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19436 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
19437 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
19438 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
19439 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
19440 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
19441 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
19443 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
19444 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
19445 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
19446 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
19447 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
19448 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
19449 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
19450 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
19451 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
19452 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
19453 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
19454 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
19456 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
19457 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
19458 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
19459 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
19460 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
19461 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
19462 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
19463 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
19464 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
19465 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
19466 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
19467 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
19469 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
19470 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
19471 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
19472 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
19473 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
19474 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
19475 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
19476 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
19477 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
19478 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
19479 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
19480 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
19482 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
19483 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
19484 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
19485 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
19486 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
19487 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
19488 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
19489 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
19490 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
19491 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
19492 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
19493 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
19495 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
19496 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
19497 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
19498 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
19499 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
19500 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
19501 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
19502 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
19503 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
19504 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
19505 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
19506 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
19508 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
19509 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
19510 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
19511 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
19512 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
19513 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
19514 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
19515 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
19516 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
19517 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
19518 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
19519 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
19521 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
19522 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
19523 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
19524 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
19525 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
19526 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
19527 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
19528 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
19529 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
19530 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
19531 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
19532 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
19534 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
19535 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
19536 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
19537 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
19538 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
19539 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
19540 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
19541 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
19542 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
19543 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
19544 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
19545 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
19547 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
19548 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
19549 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
19550 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
19551 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
19552 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
19553 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
19554 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
19555 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
19556 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
19557 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
19558 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
19560 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
19561 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
19562 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
19563 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
19564 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
19565 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
19566 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
19567 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
19568 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
19569 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
19570 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
19571 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
19573 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
19574 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
19575 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
19576 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
19577 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
19578 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
19579 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
19580 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
19581 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
19582 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
19583 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
19584 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
19586 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19587 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19588 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19589 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19590 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19591 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19592 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19593 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19594 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19595 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19596 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19597 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19599 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19600 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19601 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19602 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19603 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19604 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19605 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19606 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19607 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19608 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19609 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19610 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19612 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19613 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19614 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19615 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19616 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19617 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19618 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19619 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19620 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19621 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19622 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19623 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19625 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19626 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19627 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19628 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19629 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19630 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19631 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19632 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19633 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19634 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19635 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19636 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19638 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19639 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19640 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19641 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19642 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19643 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19644 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19645 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19646 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19647 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19648 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19649 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19651 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19652 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19653 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19654 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19655 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19656 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19657 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19658 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19659 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19660 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19661 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19662 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19664 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19665 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19666 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19667 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19668 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19669 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19670 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19671 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19672 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19673 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19674 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19675 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19677 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19678 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19679 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19680 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19681 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19682 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19683 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19684 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19685 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19686 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19687 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19688 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19690 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19691 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19692 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19693 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19694 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19695 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19696 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19697 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19698 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19699 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19700 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19701 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19703 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19704 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19705 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19706 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19707 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19708 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19709 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19710 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19711 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19712 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19713 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19714 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19716 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19717 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19718 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19719 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19720 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19721 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19722 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19723 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19724 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19725 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19726 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19727 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19729 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19730 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19731 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19732 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19733 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19734 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19735 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19736 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19737 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19738 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19739 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19740 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19742 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19743 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19744 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19745 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19746 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19747 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19748 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19749 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19750 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19751 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19752 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19753 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19755 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19756 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19757 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19758 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19760 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
19761 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
19762 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
19763 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
19764 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
19765 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
19766 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
19767 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
19768 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
19769 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
19770 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
19771 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
19773 /* The implementation of the FIX instruction is broken on some
19774 assemblers, in that it accepts a precision specifier as well as a
19775 rounding specifier, despite the fact that this is meaningless.
19776 To be more compatible, we accept it as well, though of course it
19777 does not set any bits. */
19778 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
19779 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
19780 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
19781 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
19782 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
19783 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
19784 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
19785 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
19786 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
19787 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
19788 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
19789 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
19790 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
19792 /* Instructions that were new with the real FPA, call them V2. */
19794 #define ARM_VARIANT & fpu_fpa_ext_v2
19796 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19797 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19798 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19799 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19800 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19801 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19804 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19806 /* Moves and type conversions. */
19807 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19808 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
19809 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
19810 cCE("fmstat", ef1fa10
, 0, (), noargs
),
19811 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
19812 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
19813 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19814 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19815 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19816 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19817 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19818 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19819 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
19820 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
19822 /* Memory operations. */
19823 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19824 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19825 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19826 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19827 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19828 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19829 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19830 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19831 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19832 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19833 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19834 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19835 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19836 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19837 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19838 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19839 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19840 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19842 /* Monadic operations. */
19843 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19844 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19845 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19847 /* Dyadic operations. */
19848 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19849 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19850 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19851 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19852 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19853 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19854 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19855 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19856 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19859 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19860 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
19861 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19862 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
19864 /* Double precision load/store are still present on single precision
19865 implementations. */
19866 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19867 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19868 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19869 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19870 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19871 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19872 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19873 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19874 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19875 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19878 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19880 /* Moves and type conversions. */
19881 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19882 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19883 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19884 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19885 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19886 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19887 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19888 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19889 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19890 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19891 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19892 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19893 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19895 /* Monadic operations. */
19896 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19897 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19898 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19900 /* Dyadic operations. */
19901 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19902 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19903 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19904 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19905 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19906 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19907 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19908 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19909 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19912 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19913 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
19914 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19915 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
19918 #define ARM_VARIANT & fpu_vfp_ext_v2
19920 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
19921 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
19922 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
19923 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
19925 /* Instructions which may belong to either the Neon or VFP instruction sets.
19926 Individual encoder functions perform additional architecture checks. */
19928 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19929 #undef THUMB_VARIANT
19930 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19932 /* These mnemonics are unique to VFP. */
19933 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
19934 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
19935 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19936 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19937 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19938 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
19939 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
19940 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
19941 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
19942 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
19944 /* Mnemonics shared by Neon and VFP. */
19945 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
19946 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
19947 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
19949 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
19950 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
19952 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
19953 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
19955 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19956 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19957 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19958 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19959 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19960 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19961 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
19962 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
19964 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
19965 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
19966 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
19967 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
19970 /* NOTE: All VMOV encoding is special-cased! */
19971 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
19972 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
19974 #undef THUMB_VARIANT
19975 #define THUMB_VARIANT & fpu_neon_ext_v1
19977 #define ARM_VARIANT & fpu_neon_ext_v1
19979 /* Data processing with three registers of the same length. */
19980 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19981 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
19982 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
19983 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19984 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19985 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19986 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19987 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19988 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19989 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19990 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
19991 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
19992 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
19993 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
19994 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
19995 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
19996 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
19997 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
19998 /* If not immediate, fall back to neon_dyadic_i64_su.
19999 shl_imm should accept I8 I16 I32 I64,
20000 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20001 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
20002 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
20003 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
20004 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
20005 /* Logic ops, types optional & ignored. */
20006 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20007 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20008 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20009 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20010 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20011 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20012 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20013 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20014 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20015 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20016 /* Bitfield ops, untyped. */
20017 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20018 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20019 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20020 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20021 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20022 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20023 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20024 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20025 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20026 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20027 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20028 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20029 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20030 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20031 back to neon_dyadic_if_su. */
20032 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20033 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20034 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20035 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20036 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20037 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20038 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20039 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20040 /* Comparison. Type I8 I16 I32 F32. */
20041 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20042 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20043 /* As above, D registers only. */
20044 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20045 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20046 /* Int and float variants, signedness unimportant. */
20047 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20048 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20049 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20050 /* Add/sub take types I8 I16 I32 I64 F32. */
20051 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20052 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20053 /* vtst takes sizes 8, 16, 32. */
20054 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20055 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20056 /* VMUL takes I8 I16 I32 F32 P8. */
20057 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20058 /* VQD{R}MULH takes S16 S32. */
20059 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20060 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20061 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20062 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20063 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20064 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20065 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20066 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20067 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20068 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20069 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20070 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20071 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20072 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20073 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20074 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20075 /* ARM v8.1 extension. */
20076 nUF(vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20077 nUF(vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20078 nUF(vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20079 nUF(vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20081 /* Two address, int/float. Types S8 S16 S32 F32. */
20082 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20083 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20085 /* Data processing with two registers and a shift amount. */
20086 /* Right shifts, and variants with rounding.
20087 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20088 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20089 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20090 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20091 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20092 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20093 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20094 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20095 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20096 /* Shift and insert. Sizes accepted 8 16 32 64. */
20097 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20098 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20099 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20100 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20101 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20102 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20103 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20104 /* Right shift immediate, saturating & narrowing, with rounding variants.
20105 Types accepted S16 S32 S64 U16 U32 U64. */
20106 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20107 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20108 /* As above, unsigned. Types accepted S16 S32 S64. */
20109 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20110 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20111 /* Right shift narrowing. Types accepted I16 I32 I64. */
20112 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20113 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20114 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20115 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20116 /* CVT with optional immediate for fixed-point variant. */
20117 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20119 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20120 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20122 /* Data processing, three registers of different lengths. */
20123 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20124 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20125 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20126 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20127 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20128 /* If not scalar, fall back to neon_dyadic_long.
20129 Vector types as above, scalar types S16 S32 U16 U32. */
20130 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20131 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20132 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20133 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20134 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20135 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20136 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20137 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20138 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20139 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20140 /* Saturating doubling multiplies. Types S16 S32. */
20141 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20142 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20143 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20144 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20145 S16 S32 U16 U32. */
20146 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20148 /* Extract. Size 8. */
20149 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20150 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20152 /* Two registers, miscellaneous. */
20153 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20154 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20155 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20156 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20157 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20158 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20159 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20160 /* Vector replicate. Sizes 8 16 32. */
20161 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20162 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20163 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20164 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20165 /* VMOVN. Types I16 I32 I64. */
20166 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20167 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20168 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20169 /* VQMOVUN. Types S16 S32 S64. */
20170 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20171 /* VZIP / VUZP. Sizes 8 16 32. */
20172 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20173 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20174 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20175 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20176 /* VQABS / VQNEG. Types S8 S16 S32. */
20177 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20178 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20179 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20180 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20181 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20182 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20183 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20184 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20185 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20186 /* Reciprocal estimates. Types U32 F32. */
20187 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20188 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20189 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20190 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20191 /* VCLS. Types S8 S16 S32. */
20192 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20193 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20194 /* VCLZ. Types I8 I16 I32. */
20195 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20196 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20197 /* VCNT. Size 8. */
20198 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20199 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20200 /* Two address, untyped. */
20201 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20202 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20203 /* VTRN. Sizes 8 16 32. */
20204 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20205 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20207 /* Table lookup. Size 8. */
20208 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20209 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20211 #undef THUMB_VARIANT
20212 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20214 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20216 /* Neon element/structure load/store. */
20217 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20218 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20219 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20220 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20221 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20222 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20223 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20224 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20226 #undef THUMB_VARIANT
20227 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20229 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20230 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20231 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20232 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20233 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20234 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20235 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20236 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20237 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20238 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20240 #undef THUMB_VARIANT
20241 #define THUMB_VARIANT & fpu_vfp_ext_v3
20243 #define ARM_VARIANT & fpu_vfp_ext_v3
20245 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20246 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20247 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20248 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20249 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20250 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20251 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20252 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20253 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20256 #define ARM_VARIANT & fpu_vfp_ext_fma
20257 #undef THUMB_VARIANT
20258 #define THUMB_VARIANT & fpu_vfp_ext_fma
20259 /* Mnemonics shared by Neon and VFP. These are included in the
20260 VFP FMA variant; NEON and VFP FMA always includes the NEON
20261 FMA instructions. */
20262 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20263 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20264 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20265 the v form should always be used. */
20266 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20267 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20268 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20269 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20270 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20271 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20273 #undef THUMB_VARIANT
20275 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20277 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20278 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20279 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20280 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20281 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20282 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20283 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20284 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20287 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20289 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20290 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20291 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20292 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20293 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20294 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20295 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20296 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20297 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20298 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20299 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20300 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20301 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20302 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20303 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20304 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20305 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20306 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20307 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20308 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20309 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20310 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20311 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20312 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20313 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20314 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20315 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20316 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20317 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20318 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20319 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20320 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20321 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20322 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20323 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20324 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20325 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20326 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20327 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20328 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20329 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20330 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20331 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20332 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20333 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20334 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20335 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20336 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20337 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20338 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20339 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20340 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20341 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20342 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20343 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20344 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20345 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20346 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20347 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20348 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20349 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20350 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20351 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20352 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20353 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20354 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20355 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20356 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20357 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20358 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20359 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20360 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20361 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20362 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20363 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20364 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20365 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20366 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20367 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20368 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20369 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20370 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20371 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20372 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20373 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20374 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20375 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20376 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20377 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20378 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20379 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20380 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20381 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20382 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20383 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20384 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20385 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20386 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20387 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20388 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20389 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20390 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20391 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20392 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20393 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20394 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20395 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20396 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20397 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20398 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20399 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20400 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20401 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20402 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20403 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20404 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20405 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20406 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20407 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20408 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20409 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20410 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20411 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20412 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20413 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20414 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20415 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20416 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20417 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20418 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20419 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20420 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20421 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20422 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20423 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20424 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20425 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20426 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20427 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20428 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20429 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20430 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20431 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20432 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20433 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20434 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20435 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20436 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20437 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20438 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20439 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20440 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20441 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20442 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20443 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20444 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20445 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20446 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20447 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20448 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20449 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20450 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
20453 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20455 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
20456 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
20457 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
20458 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20459 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20460 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20461 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20462 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20463 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20464 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20465 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20466 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20467 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20468 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20469 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20470 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20471 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20472 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20473 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20474 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20475 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
20476 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20477 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20478 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20479 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20480 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20481 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20482 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20483 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20484 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20485 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20486 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20487 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20488 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20489 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20490 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20491 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20492 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20493 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20494 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20495 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20496 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20497 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20498 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20499 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20500 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20501 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20502 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20503 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20504 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20505 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20506 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20507 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20508 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20509 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20510 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20511 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20514 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20516 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20517 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20518 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20519 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20520 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20521 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20522 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20523 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20524 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
20525 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
20526 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
20527 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
20528 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
20529 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
20530 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
20531 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
20532 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
20533 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
20534 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
20535 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
20536 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
20537 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
20538 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
20539 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
20540 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
20541 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
20542 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
20543 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
20544 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
20545 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
20546 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
20547 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
20548 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
20549 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
20550 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
20551 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
20552 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
20553 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
20554 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
20555 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
20556 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
20557 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
20558 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
20559 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
20560 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
20561 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
20562 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
20563 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
20564 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
20565 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
20566 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
20567 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
20568 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
20569 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
20570 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20571 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20572 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20573 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20574 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20575 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20576 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
20577 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
20578 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
20579 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
20580 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20581 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20582 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20583 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20584 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20585 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20586 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20587 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20588 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20589 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20590 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20591 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20594 #define ARM_VARIANT NULL
20595 #undef THUMB_VARIANT
20596 #define THUMB_VARIANT & arm_ext_v8m
20597 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
20598 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
20601 #undef THUMB_VARIANT
20627 /* MD interface: bits in the object file. */
20629 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20630 for use in the a.out file, and stores them in the array pointed to by buf.
20631 This knows about the endian-ness of the target machine and does
20632 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20633 2 (short) and 4 (long) Floating numbers are put out as a series of
20634 LITTLENUMS (shorts, here at least). */
20637 md_number_to_chars (char * buf
, valueT val
, int n
)
20639 if (target_big_endian
)
20640 number_to_chars_bigendian (buf
, val
, n
);
20642 number_to_chars_littleendian (buf
, val
, n
);
20646 md_chars_to_number (char * buf
, int n
)
20649 unsigned char * where
= (unsigned char *) buf
;
20651 if (target_big_endian
)
20656 result
|= (*where
++ & 255);
20664 result
|= (where
[n
] & 255);
20671 /* MD interface: Sections. */
20673 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20674 that an rs_machine_dependent frag may reach. */
20677 arm_frag_max_var (fragS
*fragp
)
20679 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20680 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20682 Note that we generate relaxable instructions even for cases that don't
20683 really need it, like an immediate that's a trivial constant. So we're
20684 overestimating the instruction size for some of those cases. Rather
20685 than putting more intelligence here, it would probably be better to
20686 avoid generating a relaxation frag in the first place when it can be
20687 determined up front that a short instruction will suffice. */
20689 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
20693 /* Estimate the size of a frag before relaxing. Assume everything fits in
20697 md_estimate_size_before_relax (fragS
* fragp
,
20698 segT segtype ATTRIBUTE_UNUSED
)
20704 /* Convert a machine dependent frag. */
20707 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
20709 unsigned long insn
;
20710 unsigned long old_op
;
20718 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20720 old_op
= bfd_get_16(abfd
, buf
);
20721 if (fragp
->fr_symbol
)
20723 exp
.X_op
= O_symbol
;
20724 exp
.X_add_symbol
= fragp
->fr_symbol
;
20728 exp
.X_op
= O_constant
;
20730 exp
.X_add_number
= fragp
->fr_offset
;
20731 opcode
= fragp
->fr_subtype
;
20734 case T_MNEM_ldr_pc
:
20735 case T_MNEM_ldr_pc2
:
20736 case T_MNEM_ldr_sp
:
20737 case T_MNEM_str_sp
:
20744 if (fragp
->fr_var
== 4)
20746 insn
= THUMB_OP32 (opcode
);
20747 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
20749 insn
|= (old_op
& 0x700) << 4;
20753 insn
|= (old_op
& 7) << 12;
20754 insn
|= (old_op
& 0x38) << 13;
20756 insn
|= 0x00000c00;
20757 put_thumb32_insn (buf
, insn
);
20758 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
20762 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
20764 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
20767 if (fragp
->fr_var
== 4)
20769 insn
= THUMB_OP32 (opcode
);
20770 insn
|= (old_op
& 0xf0) << 4;
20771 put_thumb32_insn (buf
, insn
);
20772 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
20776 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20777 exp
.X_add_number
-= 4;
20785 if (fragp
->fr_var
== 4)
20787 int r0off
= (opcode
== T_MNEM_mov
20788 || opcode
== T_MNEM_movs
) ? 0 : 8;
20789 insn
= THUMB_OP32 (opcode
);
20790 insn
= (insn
& 0xe1ffffff) | 0x10000000;
20791 insn
|= (old_op
& 0x700) << r0off
;
20792 put_thumb32_insn (buf
, insn
);
20793 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20797 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
20802 if (fragp
->fr_var
== 4)
20804 insn
= THUMB_OP32(opcode
);
20805 put_thumb32_insn (buf
, insn
);
20806 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
20809 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
20813 if (fragp
->fr_var
== 4)
20815 insn
= THUMB_OP32(opcode
);
20816 insn
|= (old_op
& 0xf00) << 14;
20817 put_thumb32_insn (buf
, insn
);
20818 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
20821 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
20824 case T_MNEM_add_sp
:
20825 case T_MNEM_add_pc
:
20826 case T_MNEM_inc_sp
:
20827 case T_MNEM_dec_sp
:
20828 if (fragp
->fr_var
== 4)
20830 /* ??? Choose between add and addw. */
20831 insn
= THUMB_OP32 (opcode
);
20832 insn
|= (old_op
& 0xf0) << 4;
20833 put_thumb32_insn (buf
, insn
);
20834 if (opcode
== T_MNEM_add_pc
)
20835 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
20837 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20840 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20848 if (fragp
->fr_var
== 4)
20850 insn
= THUMB_OP32 (opcode
);
20851 insn
|= (old_op
& 0xf0) << 4;
20852 insn
|= (old_op
& 0xf) << 16;
20853 put_thumb32_insn (buf
, insn
);
20854 if (insn
& (1 << 20))
20855 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20857 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20860 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20866 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
20867 (enum bfd_reloc_code_real
) reloc_type
);
20868 fixp
->fx_file
= fragp
->fr_file
;
20869 fixp
->fx_line
= fragp
->fr_line
;
20870 fragp
->fr_fix
+= fragp
->fr_var
;
20872 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20873 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
20874 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
20875 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
20878 /* Return the size of a relaxable immediate operand instruction.
20879 SHIFT and SIZE specify the form of the allowable immediate. */
20881 relax_immediate (fragS
*fragp
, int size
, int shift
)
20887 /* ??? Should be able to do better than this. */
20888 if (fragp
->fr_symbol
)
20891 low
= (1 << shift
) - 1;
20892 mask
= (1 << (shift
+ size
)) - (1 << shift
);
20893 offset
= fragp
->fr_offset
;
20894 /* Force misaligned offsets to 32-bit variant. */
20897 if (offset
& ~mask
)
20902 /* Get the address of a symbol during relaxation. */
20904 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
20910 sym
= fragp
->fr_symbol
;
20911 sym_frag
= symbol_get_frag (sym
);
20912 know (S_GET_SEGMENT (sym
) != absolute_section
20913 || sym_frag
== &zero_address_frag
);
20914 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
20916 /* If frag has yet to be reached on this pass, assume it will
20917 move by STRETCH just as we did. If this is not so, it will
20918 be because some frag between grows, and that will force
20922 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
20926 /* Adjust stretch for any alignment frag. Note that if have
20927 been expanding the earlier code, the symbol may be
20928 defined in what appears to be an earlier frag. FIXME:
20929 This doesn't handle the fr_subtype field, which specifies
20930 a maximum number of bytes to skip when doing an
20932 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
20934 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
20937 stretch
= - ((- stretch
)
20938 & ~ ((1 << (int) f
->fr_offset
) - 1));
20940 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
20952 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20955 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
20960 /* Assume worst case for symbols not known to be in the same section. */
20961 if (fragp
->fr_symbol
== NULL
20962 || !S_IS_DEFINED (fragp
->fr_symbol
)
20963 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
20964 || S_IS_WEAK (fragp
->fr_symbol
))
20967 val
= relaxed_symbol_addr (fragp
, stretch
);
20968 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
20969 addr
= (addr
+ 4) & ~3;
20970 /* Force misaligned targets to 32-bit variant. */
20974 if (val
< 0 || val
> 1020)
20979 /* Return the size of a relaxable add/sub immediate instruction. */
20981 relax_addsub (fragS
*fragp
, asection
*sec
)
20986 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20987 op
= bfd_get_16(sec
->owner
, buf
);
20988 if ((op
& 0xf) == ((op
>> 4) & 0xf))
20989 return relax_immediate (fragp
, 8, 0);
20991 return relax_immediate (fragp
, 3, 0);
20994 /* Return TRUE iff the definition of symbol S could be pre-empted
20995 (overridden) at link or load time. */
20997 symbol_preemptible (symbolS
*s
)
20999 /* Weak symbols can always be pre-empted. */
21003 /* Non-global symbols cannot be pre-empted. */
21004 if (! S_IS_EXTERNAL (s
))
21008 /* In ELF, a global symbol can be marked protected, or private. In that
21009 case it can't be pre-empted (other definitions in the same link unit
21010 would violate the ODR). */
21011 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21015 /* Other global symbols might be pre-empted. */
21019 /* Return the size of a relaxable branch instruction. BITS is the
21020 size of the offset field in the narrow instruction. */
21023 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21029 /* Assume worst case for symbols not known to be in the same section. */
21030 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21031 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21032 || S_IS_WEAK (fragp
->fr_symbol
))
21036 /* A branch to a function in ARM state will require interworking. */
21037 if (S_IS_DEFINED (fragp
->fr_symbol
)
21038 && ARM_IS_FUNC (fragp
->fr_symbol
))
21042 if (symbol_preemptible (fragp
->fr_symbol
))
21045 val
= relaxed_symbol_addr (fragp
, stretch
);
21046 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21049 /* Offset is a signed value *2 */
21051 if (val
>= limit
|| val
< -limit
)
21057 /* Relax a machine dependent frag. This returns the amount by which
21058 the current size of the frag should change. */
21061 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21066 oldsize
= fragp
->fr_var
;
21067 switch (fragp
->fr_subtype
)
21069 case T_MNEM_ldr_pc2
:
21070 newsize
= relax_adr (fragp
, sec
, stretch
);
21072 case T_MNEM_ldr_pc
:
21073 case T_MNEM_ldr_sp
:
21074 case T_MNEM_str_sp
:
21075 newsize
= relax_immediate (fragp
, 8, 2);
21079 newsize
= relax_immediate (fragp
, 5, 2);
21083 newsize
= relax_immediate (fragp
, 5, 1);
21087 newsize
= relax_immediate (fragp
, 5, 0);
21090 newsize
= relax_adr (fragp
, sec
, stretch
);
21096 newsize
= relax_immediate (fragp
, 8, 0);
21099 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21102 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21104 case T_MNEM_add_sp
:
21105 case T_MNEM_add_pc
:
21106 newsize
= relax_immediate (fragp
, 8, 2);
21108 case T_MNEM_inc_sp
:
21109 case T_MNEM_dec_sp
:
21110 newsize
= relax_immediate (fragp
, 7, 2);
21116 newsize
= relax_addsub (fragp
, sec
);
21122 fragp
->fr_var
= newsize
;
21123 /* Freeze wide instructions that are at or before the same location as
21124 in the previous pass. This avoids infinite loops.
21125 Don't freeze them unconditionally because targets may be artificially
21126 misaligned by the expansion of preceding frags. */
21127 if (stretch
<= 0 && newsize
> 2)
21129 md_convert_frag (sec
->owner
, sec
, fragp
);
21133 return newsize
- oldsize
;
21136 /* Round up a section size to the appropriate boundary. */
21139 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21142 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21143 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21145 /* For a.out, force the section size to be aligned. If we don't do
21146 this, BFD will align it for us, but it will not write out the
21147 final bytes of the section. This may be a bug in BFD, but it is
21148 easier to fix it here since that is how the other a.out targets
21152 align
= bfd_get_section_alignment (stdoutput
, segment
);
21153 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21160 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21161 of an rs_align_code fragment. */
21164 arm_handle_align (fragS
* fragP
)
21166 static char const arm_noop
[2][2][4] =
21169 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21170 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21173 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21174 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21177 static char const thumb_noop
[2][2][2] =
21180 {0xc0, 0x46}, /* LE */
21181 {0x46, 0xc0}, /* BE */
21184 {0x00, 0xbf}, /* LE */
21185 {0xbf, 0x00} /* BE */
21188 static char const wide_thumb_noop
[2][4] =
21189 { /* Wide Thumb-2 */
21190 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21191 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21194 unsigned bytes
, fix
, noop_size
;
21197 const char *narrow_noop
= NULL
;
21202 if (fragP
->fr_type
!= rs_align_code
)
21205 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21206 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21209 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21210 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21212 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21214 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21216 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21217 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21219 narrow_noop
= thumb_noop
[1][target_big_endian
];
21220 noop
= wide_thumb_noop
[target_big_endian
];
21223 noop
= thumb_noop
[0][target_big_endian
];
21231 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21232 ? selected_cpu
: arm_arch_none
,
21234 [target_big_endian
];
21241 fragP
->fr_var
= noop_size
;
21243 if (bytes
& (noop_size
- 1))
21245 fix
= bytes
& (noop_size
- 1);
21247 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21249 memset (p
, 0, fix
);
21256 if (bytes
& noop_size
)
21258 /* Insert a narrow noop. */
21259 memcpy (p
, narrow_noop
, noop_size
);
21261 bytes
-= noop_size
;
21265 /* Use wide noops for the remainder */
21269 while (bytes
>= noop_size
)
21271 memcpy (p
, noop
, noop_size
);
21273 bytes
-= noop_size
;
21277 fragP
->fr_fix
+= fix
;
21280 /* Called from md_do_align. Used to create an alignment
21281 frag in a code section. */
21284 arm_frag_align_code (int n
, int max
)
21288 /* We assume that there will never be a requirement
21289 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21290 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21295 _("alignments greater than %d bytes not supported in .text sections."),
21296 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21297 as_fatal ("%s", err_msg
);
21300 p
= frag_var (rs_align_code
,
21301 MAX_MEM_FOR_RS_ALIGN_CODE
,
21303 (relax_substateT
) max
,
21310 /* Perform target specific initialisation of a frag.
21311 Note - despite the name this initialisation is not done when the frag
21312 is created, but only when its type is assigned. A frag can be created
21313 and used a long time before its type is set, so beware of assuming that
21314 this initialisationis performed first. */
21318 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21320 /* Record whether this frag is in an ARM or a THUMB area. */
21321 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21324 #else /* OBJ_ELF is defined. */
21326 arm_init_frag (fragS
* fragP
, int max_chars
)
21328 int frag_thumb_mode
;
21330 /* If the current ARM vs THUMB mode has not already
21331 been recorded into this frag then do so now. */
21332 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21333 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21335 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
21337 /* Record a mapping symbol for alignment frags. We will delete this
21338 later if the alignment ends up empty. */
21339 switch (fragP
->fr_type
)
21342 case rs_align_test
:
21344 mapping_state_2 (MAP_DATA
, max_chars
);
21346 case rs_align_code
:
21347 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21354 /* When we change sections we need to issue a new mapping symbol. */
21357 arm_elf_change_section (void)
21359 /* Link an unlinked unwind index table section to the .text section. */
21360 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21361 && elf_linked_to_section (now_seg
) == NULL
)
21362 elf_linked_to_section (now_seg
) = text_section
;
21366 arm_elf_section_type (const char * str
, size_t len
)
21368 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21369 return SHT_ARM_EXIDX
;
21374 /* Code to deal with unwinding tables. */
21376 static void add_unwind_adjustsp (offsetT
);
21378 /* Generate any deferred unwind frame offset. */
21381 flush_pending_unwind (void)
21385 offset
= unwind
.pending_offset
;
21386 unwind
.pending_offset
= 0;
21388 add_unwind_adjustsp (offset
);
21391 /* Add an opcode to this list for this function. Two-byte opcodes should
21392 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21396 add_unwind_opcode (valueT op
, int length
)
21398 /* Add any deferred stack adjustment. */
21399 if (unwind
.pending_offset
)
21400 flush_pending_unwind ();
21402 unwind
.sp_restored
= 0;
21404 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21406 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21407 if (unwind
.opcodes
)
21408 unwind
.opcodes
= (unsigned char *) xrealloc (unwind
.opcodes
,
21409 unwind
.opcode_alloc
);
21411 unwind
.opcodes
= (unsigned char *) xmalloc (unwind
.opcode_alloc
);
21416 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
21418 unwind
.opcode_count
++;
21422 /* Add unwind opcodes to adjust the stack pointer. */
21425 add_unwind_adjustsp (offsetT offset
)
21429 if (offset
> 0x200)
21431 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21436 /* Long form: 0xb2, uleb128. */
21437 /* This might not fit in a word so add the individual bytes,
21438 remembering the list is built in reverse order. */
21439 o
= (valueT
) ((offset
- 0x204) >> 2);
21441 add_unwind_opcode (0, 1);
21443 /* Calculate the uleb128 encoding of the offset. */
21447 bytes
[n
] = o
& 0x7f;
21453 /* Add the insn. */
21455 add_unwind_opcode (bytes
[n
- 1], 1);
21456 add_unwind_opcode (0xb2, 1);
21458 else if (offset
> 0x100)
21460 /* Two short opcodes. */
21461 add_unwind_opcode (0x3f, 1);
21462 op
= (offset
- 0x104) >> 2;
21463 add_unwind_opcode (op
, 1);
21465 else if (offset
> 0)
21467 /* Short opcode. */
21468 op
= (offset
- 4) >> 2;
21469 add_unwind_opcode (op
, 1);
21471 else if (offset
< 0)
21474 while (offset
> 0x100)
21476 add_unwind_opcode (0x7f, 1);
21479 op
= ((offset
- 4) >> 2) | 0x40;
21480 add_unwind_opcode (op
, 1);
21484 /* Finish the list of unwind opcodes for this function. */
21486 finish_unwind_opcodes (void)
21490 if (unwind
.fp_used
)
21492 /* Adjust sp as necessary. */
21493 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
21494 flush_pending_unwind ();
21496 /* After restoring sp from the frame pointer. */
21497 op
= 0x90 | unwind
.fp_reg
;
21498 add_unwind_opcode (op
, 1);
21501 flush_pending_unwind ();
21505 /* Start an exception table entry. If idx is nonzero this is an index table
21509 start_unwind_section (const segT text_seg
, int idx
)
21511 const char * text_name
;
21512 const char * prefix
;
21513 const char * prefix_once
;
21514 const char * group_name
;
21518 size_t sec_name_len
;
21525 prefix
= ELF_STRING_ARM_unwind
;
21526 prefix_once
= ELF_STRING_ARM_unwind_once
;
21527 type
= SHT_ARM_EXIDX
;
21531 prefix
= ELF_STRING_ARM_unwind_info
;
21532 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
21533 type
= SHT_PROGBITS
;
21536 text_name
= segment_name (text_seg
);
21537 if (streq (text_name
, ".text"))
21540 if (strncmp (text_name
, ".gnu.linkonce.t.",
21541 strlen (".gnu.linkonce.t.")) == 0)
21543 prefix
= prefix_once
;
21544 text_name
+= strlen (".gnu.linkonce.t.");
21547 prefix_len
= strlen (prefix
);
21548 text_len
= strlen (text_name
);
21549 sec_name_len
= prefix_len
+ text_len
;
21550 sec_name
= (char *) xmalloc (sec_name_len
+ 1);
21551 memcpy (sec_name
, prefix
, prefix_len
);
21552 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
21553 sec_name
[prefix_len
+ text_len
] = '\0';
21559 /* Handle COMDAT group. */
21560 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
21562 group_name
= elf_group_name (text_seg
);
21563 if (group_name
== NULL
)
21565 as_bad (_("Group section `%s' has no group signature"),
21566 segment_name (text_seg
));
21567 ignore_rest_of_line ();
21570 flags
|= SHF_GROUP
;
21574 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
21576 /* Set the section link for index tables. */
21578 elf_linked_to_section (now_seg
) = text_seg
;
21582 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21583 personality routine data. Returns zero, or the index table value for
21584 an inline entry. */
21587 create_unwind_entry (int have_data
)
21592 /* The current word of data. */
21594 /* The number of bytes left in this word. */
21597 finish_unwind_opcodes ();
21599 /* Remember the current text section. */
21600 unwind
.saved_seg
= now_seg
;
21601 unwind
.saved_subseg
= now_subseg
;
21603 start_unwind_section (now_seg
, 0);
21605 if (unwind
.personality_routine
== NULL
)
21607 if (unwind
.personality_index
== -2)
21610 as_bad (_("handlerdata in cantunwind frame"));
21611 return 1; /* EXIDX_CANTUNWIND. */
21614 /* Use a default personality routine if none is specified. */
21615 if (unwind
.personality_index
== -1)
21617 if (unwind
.opcode_count
> 3)
21618 unwind
.personality_index
= 1;
21620 unwind
.personality_index
= 0;
21623 /* Space for the personality routine entry. */
21624 if (unwind
.personality_index
== 0)
21626 if (unwind
.opcode_count
> 3)
21627 as_bad (_("too many unwind opcodes for personality routine 0"));
21631 /* All the data is inline in the index table. */
21634 while (unwind
.opcode_count
> 0)
21636 unwind
.opcode_count
--;
21637 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21641 /* Pad with "finish" opcodes. */
21643 data
= (data
<< 8) | 0xb0;
21650 /* We get two opcodes "free" in the first word. */
21651 size
= unwind
.opcode_count
- 2;
21655 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21656 if (unwind
.personality_index
!= -1)
21658 as_bad (_("attempt to recreate an unwind entry"));
21662 /* An extra byte is required for the opcode count. */
21663 size
= unwind
.opcode_count
+ 1;
21666 size
= (size
+ 3) >> 2;
21668 as_bad (_("too many unwind opcodes"));
21670 frag_align (2, 0, 0);
21671 record_alignment (now_seg
, 2);
21672 unwind
.table_entry
= expr_build_dot ();
21674 /* Allocate the table entry. */
21675 ptr
= frag_more ((size
<< 2) + 4);
21676 /* PR 13449: Zero the table entries in case some of them are not used. */
21677 memset (ptr
, 0, (size
<< 2) + 4);
21678 where
= frag_now_fix () - ((size
<< 2) + 4);
21680 switch (unwind
.personality_index
)
21683 /* ??? Should this be a PLT generating relocation? */
21684 /* Custom personality routine. */
21685 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
21686 BFD_RELOC_ARM_PREL31
);
21691 /* Set the first byte to the number of additional words. */
21692 data
= size
> 0 ? size
- 1 : 0;
21696 /* ABI defined personality routines. */
21698 /* Three opcodes bytes are packed into the first word. */
21705 /* The size and first two opcode bytes go in the first word. */
21706 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
21711 /* Should never happen. */
21715 /* Pack the opcodes into words (MSB first), reversing the list at the same
21717 while (unwind
.opcode_count
> 0)
21721 md_number_to_chars (ptr
, data
, 4);
21726 unwind
.opcode_count
--;
21728 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21731 /* Finish off the last word. */
21734 /* Pad with "finish" opcodes. */
21736 data
= (data
<< 8) | 0xb0;
21738 md_number_to_chars (ptr
, data
, 4);
21743 /* Add an empty descriptor if there is no user-specified data. */
21744 ptr
= frag_more (4);
21745 md_number_to_chars (ptr
, 0, 4);
21752 /* Initialize the DWARF-2 unwind information for this procedure. */
21755 tc_arm_frame_initial_instructions (void)
21757 cfi_add_CFA_def_cfa (REG_SP
, 0);
21759 #endif /* OBJ_ELF */
21761 /* Convert REGNAME to a DWARF-2 register number. */
21764 tc_arm_regname_to_dw2regnum (char *regname
)
21766 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
21770 /* PR 16694: Allow VFP registers as well. */
21771 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
21775 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
21784 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
21788 exp
.X_op
= O_secrel
;
21789 exp
.X_add_symbol
= symbol
;
21790 exp
.X_add_number
= 0;
21791 emit_expr (&exp
, size
);
21795 /* MD interface: Symbol and relocation handling. */
21797 /* Return the address within the segment that a PC-relative fixup is
21798 relative to. For ARM, PC-relative fixups applied to instructions
21799 are generally relative to the location of the fixup plus 8 bytes.
21800 Thumb branches are offset by 4, and Thumb loads relative to PC
21801 require special handling. */
21804 md_pcrel_from_section (fixS
* fixP
, segT seg
)
21806 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21808 /* If this is pc-relative and we are going to emit a relocation
21809 then we just want to put out any pipeline compensation that the linker
21810 will need. Otherwise we want to use the calculated base.
21811 For WinCE we skip the bias for externals as well, since this
21812 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21814 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
21815 || (arm_force_relocation (fixP
)
21817 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
21823 switch (fixP
->fx_r_type
)
21825 /* PC relative addressing on the Thumb is slightly odd as the
21826 bottom two bits of the PC are forced to zero for the
21827 calculation. This happens *after* application of the
21828 pipeline offset. However, Thumb adrl already adjusts for
21829 this, so we need not do it again. */
21830 case BFD_RELOC_ARM_THUMB_ADD
:
21833 case BFD_RELOC_ARM_THUMB_OFFSET
:
21834 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
21835 case BFD_RELOC_ARM_T32_ADD_PC12
:
21836 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
21837 return (base
+ 4) & ~3;
21839 /* Thumb branches are simply offset by +4. */
21840 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
21841 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
21842 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
21843 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21844 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21847 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21849 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21850 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21851 && ARM_IS_FUNC (fixP
->fx_addsy
)
21852 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21853 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21856 /* BLX is like branches above, but forces the low two bits of PC to
21858 case BFD_RELOC_THUMB_PCREL_BLX
:
21860 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21861 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21862 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21863 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21864 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21865 return (base
+ 4) & ~3;
21867 /* ARM mode branches are offset by +8. However, the Windows CE
21868 loader expects the relocation not to take this into account. */
21869 case BFD_RELOC_ARM_PCREL_BLX
:
21871 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21872 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21873 && ARM_IS_FUNC (fixP
->fx_addsy
)
21874 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21875 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21878 case BFD_RELOC_ARM_PCREL_CALL
:
21880 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21881 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21882 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21883 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21884 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21887 case BFD_RELOC_ARM_PCREL_BRANCH
:
21888 case BFD_RELOC_ARM_PCREL_JUMP
:
21889 case BFD_RELOC_ARM_PLT32
:
21891 /* When handling fixups immediately, because we have already
21892 discovered the value of a symbol, or the address of the frag involved
21893 we must account for the offset by +8, as the OS loader will never see the reloc.
21894 see fixup_segment() in write.c
21895 The S_IS_EXTERNAL test handles the case of global symbols.
21896 Those need the calculated base, not just the pipe compensation the linker will need. */
21898 && fixP
->fx_addsy
!= NULL
21899 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21900 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
21908 /* ARM mode loads relative to PC are also offset by +8. Unlike
21909 branches, the Windows CE loader *does* expect the relocation
21910 to take this into account. */
21911 case BFD_RELOC_ARM_OFFSET_IMM
:
21912 case BFD_RELOC_ARM_OFFSET_IMM8
:
21913 case BFD_RELOC_ARM_HWLITERAL
:
21914 case BFD_RELOC_ARM_LITERAL
:
21915 case BFD_RELOC_ARM_CP_OFF_IMM
:
21919 /* Other PC-relative relocations are un-offset. */
21925 static bfd_boolean flag_warn_syms
= TRUE
;
21928 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
21930 /* PR 18347 - Warn if the user attempts to create a symbol with the same
21931 name as an ARM instruction. Whilst strictly speaking it is allowed, it
21932 does mean that the resulting code might be very confusing to the reader.
21933 Also this warning can be triggered if the user omits an operand before
21934 an immediate address, eg:
21938 GAS treats this as an assignment of the value of the symbol foo to a
21939 symbol LDR, and so (without this code) it will not issue any kind of
21940 warning or error message.
21942 Note - ARM instructions are case-insensitive but the strings in the hash
21943 table are all stored in lower case, so we must first ensure that name is
21945 if (flag_warn_syms
&& arm_ops_hsh
)
21947 char * nbuf
= strdup (name
);
21950 for (p
= nbuf
; *p
; p
++)
21952 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
21954 static struct hash_control
* already_warned
= NULL
;
21956 if (already_warned
== NULL
)
21957 already_warned
= hash_new ();
21958 /* Only warn about the symbol once. To keep the code
21959 simple we let hash_insert do the lookup for us. */
21960 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
21961 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
21970 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21971 Otherwise we have no need to default values of symbols. */
21974 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
21977 if (name
[0] == '_' && name
[1] == 'G'
21978 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
21982 if (symbol_find (name
))
21983 as_bad (_("GOT already in the symbol table"));
21985 GOT_symbol
= symbol_new (name
, undefined_section
,
21986 (valueT
) 0, & zero_address_frag
);
21996 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21997 computed as two separate immediate values, added together. We
21998 already know that this value cannot be computed by just one ARM
22001 static unsigned int
22002 validate_immediate_twopart (unsigned int val
,
22003 unsigned int * highpart
)
22008 for (i
= 0; i
< 32; i
+= 2)
22009 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22015 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22017 else if (a
& 0xff0000)
22019 if (a
& 0xff000000)
22021 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22025 gas_assert (a
& 0xff000000);
22026 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22029 return (a
& 0xff) | (i
<< 7);
22036 validate_offset_imm (unsigned int val
, int hwse
)
22038 if ((hwse
&& val
> 255) || val
> 4095)
22043 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22044 negative immediate constant by altering the instruction. A bit of
22049 by inverting the second operand, and
22052 by negating the second operand. */
22055 negate_data_op (unsigned long * instruction
,
22056 unsigned long value
)
22059 unsigned long negated
, inverted
;
22061 negated
= encode_arm_immediate (-value
);
22062 inverted
= encode_arm_immediate (~value
);
22064 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22067 /* First negates. */
22068 case OPCODE_SUB
: /* ADD <-> SUB */
22069 new_inst
= OPCODE_ADD
;
22074 new_inst
= OPCODE_SUB
;
22078 case OPCODE_CMP
: /* CMP <-> CMN */
22079 new_inst
= OPCODE_CMN
;
22084 new_inst
= OPCODE_CMP
;
22088 /* Now Inverted ops. */
22089 case OPCODE_MOV
: /* MOV <-> MVN */
22090 new_inst
= OPCODE_MVN
;
22095 new_inst
= OPCODE_MOV
;
22099 case OPCODE_AND
: /* AND <-> BIC */
22100 new_inst
= OPCODE_BIC
;
22105 new_inst
= OPCODE_AND
;
22109 case OPCODE_ADC
: /* ADC <-> SBC */
22110 new_inst
= OPCODE_SBC
;
22115 new_inst
= OPCODE_ADC
;
22119 /* We cannot do anything. */
22124 if (value
== (unsigned) FAIL
)
22127 *instruction
&= OPCODE_MASK
;
22128 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22132 /* Like negate_data_op, but for Thumb-2. */
22134 static unsigned int
22135 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22139 unsigned int negated
, inverted
;
22141 negated
= encode_thumb32_immediate (-value
);
22142 inverted
= encode_thumb32_immediate (~value
);
22144 rd
= (*instruction
>> 8) & 0xf;
22145 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22148 /* ADD <-> SUB. Includes CMP <-> CMN. */
22149 case T2_OPCODE_SUB
:
22150 new_inst
= T2_OPCODE_ADD
;
22154 case T2_OPCODE_ADD
:
22155 new_inst
= T2_OPCODE_SUB
;
22159 /* ORR <-> ORN. Includes MOV <-> MVN. */
22160 case T2_OPCODE_ORR
:
22161 new_inst
= T2_OPCODE_ORN
;
22165 case T2_OPCODE_ORN
:
22166 new_inst
= T2_OPCODE_ORR
;
22170 /* AND <-> BIC. TST has no inverted equivalent. */
22171 case T2_OPCODE_AND
:
22172 new_inst
= T2_OPCODE_BIC
;
22179 case T2_OPCODE_BIC
:
22180 new_inst
= T2_OPCODE_AND
;
22185 case T2_OPCODE_ADC
:
22186 new_inst
= T2_OPCODE_SBC
;
22190 case T2_OPCODE_SBC
:
22191 new_inst
= T2_OPCODE_ADC
;
22195 /* We cannot do anything. */
22200 if (value
== (unsigned int)FAIL
)
22203 *instruction
&= T2_OPCODE_MASK
;
22204 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22208 /* Read a 32-bit thumb instruction from buf. */
22209 static unsigned long
22210 get_thumb32_insn (char * buf
)
22212 unsigned long insn
;
22213 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22214 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22220 /* We usually want to set the low bit on the address of thumb function
22221 symbols. In particular .word foo - . should have the low bit set.
22222 Generic code tries to fold the difference of two symbols to
22223 a constant. Prevent this and force a relocation when the first symbols
22224 is a thumb function. */
22227 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22229 if (op
== O_subtract
22230 && l
->X_op
== O_symbol
22231 && r
->X_op
== O_symbol
22232 && THUMB_IS_FUNC (l
->X_add_symbol
))
22234 l
->X_op
= O_subtract
;
22235 l
->X_op_symbol
= r
->X_add_symbol
;
22236 l
->X_add_number
-= r
->X_add_number
;
22240 /* Process as normal. */
22244 /* Encode Thumb2 unconditional branches and calls. The encoding
22245 for the 2 are identical for the immediate values. */
22248 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22250 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22253 addressT S
, I1
, I2
, lo
, hi
;
22255 S
= (value
>> 24) & 0x01;
22256 I1
= (value
>> 23) & 0x01;
22257 I2
= (value
>> 22) & 0x01;
22258 hi
= (value
>> 12) & 0x3ff;
22259 lo
= (value
>> 1) & 0x7ff;
22260 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22261 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22262 newval
|= (S
<< 10) | hi
;
22263 newval2
&= ~T2I1I2MASK
;
22264 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22265 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22266 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22270 md_apply_fix (fixS
* fixP
,
22274 offsetT value
= * valP
;
22276 unsigned int newimm
;
22277 unsigned long temp
;
22279 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22281 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22283 /* Note whether this will delete the relocation. */
22285 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22288 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22289 consistency with the behaviour on 32-bit hosts. Remember value
22291 value
&= 0xffffffff;
22292 value
^= 0x80000000;
22293 value
-= 0x80000000;
22296 fixP
->fx_addnumber
= value
;
22298 /* Same treatment for fixP->fx_offset. */
22299 fixP
->fx_offset
&= 0xffffffff;
22300 fixP
->fx_offset
^= 0x80000000;
22301 fixP
->fx_offset
-= 0x80000000;
22303 switch (fixP
->fx_r_type
)
22305 case BFD_RELOC_NONE
:
22306 /* This will need to go in the object file. */
22310 case BFD_RELOC_ARM_IMMEDIATE
:
22311 /* We claim that this fixup has been processed here,
22312 even if in fact we generate an error because we do
22313 not have a reloc for it, so tc_gen_reloc will reject it. */
22316 if (fixP
->fx_addsy
)
22318 const char *msg
= 0;
22320 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22321 msg
= _("undefined symbol %s used as an immediate value");
22322 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22323 msg
= _("symbol %s is in a different section");
22324 else if (S_IS_WEAK (fixP
->fx_addsy
))
22325 msg
= _("symbol %s is weak and may be overridden later");
22329 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22330 msg
, S_GET_NAME (fixP
->fx_addsy
));
22335 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22337 /* If the offset is negative, we should use encoding A2 for ADR. */
22338 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22339 newimm
= negate_data_op (&temp
, value
);
22342 newimm
= encode_arm_immediate (value
);
22344 /* If the instruction will fail, see if we can fix things up by
22345 changing the opcode. */
22346 if (newimm
== (unsigned int) FAIL
)
22347 newimm
= negate_data_op (&temp
, value
);
22350 if (newimm
== (unsigned int) FAIL
)
22352 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22353 _("invalid constant (%lx) after fixup"),
22354 (unsigned long) value
);
22358 newimm
|= (temp
& 0xfffff000);
22359 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22362 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22364 unsigned int highpart
= 0;
22365 unsigned int newinsn
= 0xe1a00000; /* nop. */
22367 if (fixP
->fx_addsy
)
22369 const char *msg
= 0;
22371 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22372 msg
= _("undefined symbol %s used as an immediate value");
22373 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22374 msg
= _("symbol %s is in a different section");
22375 else if (S_IS_WEAK (fixP
->fx_addsy
))
22376 msg
= _("symbol %s is weak and may be overridden later");
22380 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22381 msg
, S_GET_NAME (fixP
->fx_addsy
));
22386 newimm
= encode_arm_immediate (value
);
22387 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22389 /* If the instruction will fail, see if we can fix things up by
22390 changing the opcode. */
22391 if (newimm
== (unsigned int) FAIL
22392 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22394 /* No ? OK - try using two ADD instructions to generate
22396 newimm
= validate_immediate_twopart (value
, & highpart
);
22398 /* Yes - then make sure that the second instruction is
22400 if (newimm
!= (unsigned int) FAIL
)
22402 /* Still No ? Try using a negated value. */
22403 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22404 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22405 /* Otherwise - give up. */
22408 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22409 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22414 /* Replace the first operand in the 2nd instruction (which
22415 is the PC) with the destination register. We have
22416 already added in the PC in the first instruction and we
22417 do not want to do it again. */
22418 newinsn
&= ~ 0xf0000;
22419 newinsn
|= ((newinsn
& 0x0f000) << 4);
22422 newimm
|= (temp
& 0xfffff000);
22423 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22425 highpart
|= (newinsn
& 0xfffff000);
22426 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
22430 case BFD_RELOC_ARM_OFFSET_IMM
:
22431 if (!fixP
->fx_done
&& seg
->use_rela_p
)
22434 case BFD_RELOC_ARM_LITERAL
:
22440 if (validate_offset_imm (value
, 0) == FAIL
)
22442 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
22443 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22444 _("invalid literal constant: pool needs to be closer"));
22446 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22447 _("bad immediate value for offset (%ld)"),
22452 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22454 newval
&= 0xfffff000;
22457 newval
&= 0xff7ff000;
22458 newval
|= value
| (sign
? INDEX_UP
: 0);
22460 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22463 case BFD_RELOC_ARM_OFFSET_IMM8
:
22464 case BFD_RELOC_ARM_HWLITERAL
:
22470 if (validate_offset_imm (value
, 1) == FAIL
)
22472 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
22473 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22474 _("invalid literal constant: pool needs to be closer"));
22476 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22477 _("bad immediate value for 8-bit offset (%ld)"),
22482 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22484 newval
&= 0xfffff0f0;
22487 newval
&= 0xff7ff0f0;
22488 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
22490 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22493 case BFD_RELOC_ARM_T32_OFFSET_U8
:
22494 if (value
< 0 || value
> 1020 || value
% 4 != 0)
22495 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22496 _("bad immediate value for offset (%ld)"), (long) value
);
22499 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
22501 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
22504 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22505 /* This is a complicated relocation used for all varieties of Thumb32
22506 load/store instruction with immediate offset:
22508 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22509 *4, optional writeback(W)
22510 (doubleword load/store)
22512 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22513 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22514 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22515 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22516 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22518 Uppercase letters indicate bits that are already encoded at
22519 this point. Lowercase letters are our problem. For the
22520 second block of instructions, the secondary opcode nybble
22521 (bits 8..11) is present, and bit 23 is zero, even if this is
22522 a PC-relative operation. */
22523 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22525 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
22527 if ((newval
& 0xf0000000) == 0xe0000000)
22529 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22531 newval
|= (1 << 23);
22534 if (value
% 4 != 0)
22536 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22537 _("offset not a multiple of 4"));
22543 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22544 _("offset out of range"));
22549 else if ((newval
& 0x000f0000) == 0x000f0000)
22551 /* PC-relative, 12-bit offset. */
22553 newval
|= (1 << 23);
22558 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22559 _("offset out of range"));
22564 else if ((newval
& 0x00000100) == 0x00000100)
22566 /* Writeback: 8-bit, +/- offset. */
22568 newval
|= (1 << 9);
22573 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22574 _("offset out of range"));
22579 else if ((newval
& 0x00000f00) == 0x00000e00)
22581 /* T-instruction: positive 8-bit offset. */
22582 if (value
< 0 || value
> 0xff)
22584 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22585 _("offset out of range"));
22593 /* Positive 12-bit or negative 8-bit offset. */
22597 newval
|= (1 << 23);
22607 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22608 _("offset out of range"));
22615 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
22616 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
22619 case BFD_RELOC_ARM_SHIFT_IMM
:
22620 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22621 if (((unsigned long) value
) > 32
22623 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
22625 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22626 _("shift expression is too large"));
22631 /* Shifts of zero must be done as lsl. */
22633 else if (value
== 32)
22635 newval
&= 0xfffff07f;
22636 newval
|= (value
& 0x1f) << 7;
22637 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22640 case BFD_RELOC_ARM_T32_IMMEDIATE
:
22641 case BFD_RELOC_ARM_T32_ADD_IMM
:
22642 case BFD_RELOC_ARM_T32_IMM12
:
22643 case BFD_RELOC_ARM_T32_ADD_PC12
:
22644 /* We claim that this fixup has been processed here,
22645 even if in fact we generate an error because we do
22646 not have a reloc for it, so tc_gen_reloc will reject it. */
22650 && ! S_IS_DEFINED (fixP
->fx_addsy
))
22652 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22653 _("undefined symbol %s used as an immediate value"),
22654 S_GET_NAME (fixP
->fx_addsy
));
22658 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22660 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
22663 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
22664 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22666 newimm
= encode_thumb32_immediate (value
);
22667 if (newimm
== (unsigned int) FAIL
)
22668 newimm
= thumb32_negate_data_op (&newval
, value
);
22670 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
22671 && newimm
== (unsigned int) FAIL
)
22673 /* Turn add/sum into addw/subw. */
22674 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22675 newval
= (newval
& 0xfeffffff) | 0x02000000;
22676 /* No flat 12-bit imm encoding for addsw/subsw. */
22677 if ((newval
& 0x00100000) == 0)
22679 /* 12 bit immediate for addw/subw. */
22683 newval
^= 0x00a00000;
22686 newimm
= (unsigned int) FAIL
;
22692 if (newimm
== (unsigned int)FAIL
)
22694 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22695 _("invalid constant (%lx) after fixup"),
22696 (unsigned long) value
);
22700 newval
|= (newimm
& 0x800) << 15;
22701 newval
|= (newimm
& 0x700) << 4;
22702 newval
|= (newimm
& 0x0ff);
22704 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
22705 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
22708 case BFD_RELOC_ARM_SMC
:
22709 if (((unsigned long) value
) > 0xffff)
22710 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22711 _("invalid smc expression"));
22712 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22713 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22714 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22717 case BFD_RELOC_ARM_HVC
:
22718 if (((unsigned long) value
) > 0xffff)
22719 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22720 _("invalid hvc expression"));
22721 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22722 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22723 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22726 case BFD_RELOC_ARM_SWI
:
22727 if (fixP
->tc_fix_data
!= 0)
22729 if (((unsigned long) value
) > 0xff)
22730 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22731 _("invalid swi expression"));
22732 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22734 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22738 if (((unsigned long) value
) > 0x00ffffff)
22739 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22740 _("invalid swi expression"));
22741 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22743 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22747 case BFD_RELOC_ARM_MULTI
:
22748 if (((unsigned long) value
) > 0xffff)
22749 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22750 _("invalid expression in load/store multiple"));
22751 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
22752 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22756 case BFD_RELOC_ARM_PCREL_CALL
:
22758 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22760 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22761 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22762 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22763 /* Flip the bl to blx. This is a simple flip
22764 bit here because we generate PCREL_CALL for
22765 unconditional bls. */
22767 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22768 newval
= newval
| 0x10000000;
22769 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22775 goto arm_branch_common
;
22777 case BFD_RELOC_ARM_PCREL_JUMP
:
22778 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22780 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22781 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22782 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22784 /* This would map to a bl<cond>, b<cond>,
22785 b<always> to a Thumb function. We
22786 need to force a relocation for this particular
22788 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22792 case BFD_RELOC_ARM_PLT32
:
22794 case BFD_RELOC_ARM_PCREL_BRANCH
:
22796 goto arm_branch_common
;
22798 case BFD_RELOC_ARM_PCREL_BLX
:
22801 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22803 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22804 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22805 && ARM_IS_FUNC (fixP
->fx_addsy
))
22807 /* Flip the blx to a bl and warn. */
22808 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
22809 newval
= 0xeb000000;
22810 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
22811 _("blx to '%s' an ARM ISA state function changed to bl"),
22813 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22819 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
22820 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
22824 /* We are going to store value (shifted right by two) in the
22825 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22826 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22827 also be be clear. */
22829 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22830 _("misaligned branch destination"));
22831 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
22832 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
22833 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22835 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22837 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22838 newval
|= (value
>> 2) & 0x00ffffff;
22839 /* Set the H bit on BLX instructions. */
22843 newval
|= 0x01000000;
22845 newval
&= ~0x01000000;
22847 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22851 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
22852 /* CBZ can only branch forward. */
22854 /* Attempts to use CBZ to branch to the next instruction
22855 (which, strictly speaking, are prohibited) will be turned into
22858 FIXME: It may be better to remove the instruction completely and
22859 perform relaxation. */
22862 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22863 newval
= 0xbf00; /* NOP encoding T1 */
22864 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22869 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22871 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22873 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22874 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
22875 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22880 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
22881 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
22882 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22884 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22886 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22887 newval
|= (value
& 0x1ff) >> 1;
22888 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22892 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
22893 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
22894 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22896 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22898 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22899 newval
|= (value
& 0xfff) >> 1;
22900 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22904 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22906 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22907 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22908 && ARM_IS_FUNC (fixP
->fx_addsy
)
22909 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22911 /* Force a relocation for a branch 20 bits wide. */
22914 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
22915 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22916 _("conditional branch out of range"));
22918 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22921 addressT S
, J1
, J2
, lo
, hi
;
22923 S
= (value
& 0x00100000) >> 20;
22924 J2
= (value
& 0x00080000) >> 19;
22925 J1
= (value
& 0x00040000) >> 18;
22926 hi
= (value
& 0x0003f000) >> 12;
22927 lo
= (value
& 0x00000ffe) >> 1;
22929 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22930 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22931 newval
|= (S
<< 10) | hi
;
22932 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
22933 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22934 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22938 case BFD_RELOC_THUMB_PCREL_BLX
:
22939 /* If there is a blx from a thumb state function to
22940 another thumb function flip this to a bl and warn
22944 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22945 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22946 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22948 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
22949 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
22950 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22952 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22953 newval
= newval
| 0x1000;
22954 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
22955 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
22960 goto thumb_bl_common
;
22962 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22963 /* A bl from Thumb state ISA to an internal ARM state function
22964 is converted to a blx. */
22966 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22967 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22968 && ARM_IS_FUNC (fixP
->fx_addsy
)
22969 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22971 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22972 newval
= newval
& ~0x1000;
22973 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
22974 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
22980 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
22981 /* For a BLX instruction, make sure that the relocation is rounded up
22982 to a word boundary. This follows the semantics of the instruction
22983 which specifies that bit 1 of the target address will come from bit
22984 1 of the base address. */
22985 value
= (value
+ 3) & ~ 3;
22988 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
22989 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
22990 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
22993 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
22995 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
22996 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22997 else if ((value
& ~0x1ffffff)
22998 && ((value
& ~0x1ffffff) != ~0x1ffffff))
22999 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23000 _("Thumb2 branch out of range"));
23003 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23004 encode_thumb2_b_bl_offset (buf
, value
);
23008 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23009 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
23010 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23012 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23013 encode_thumb2_b_bl_offset (buf
, value
);
23018 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23023 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23024 md_number_to_chars (buf
, value
, 2);
23028 case BFD_RELOC_ARM_TLS_CALL
:
23029 case BFD_RELOC_ARM_THM_TLS_CALL
:
23030 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23031 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23032 case BFD_RELOC_ARM_TLS_GOTDESC
:
23033 case BFD_RELOC_ARM_TLS_GD32
:
23034 case BFD_RELOC_ARM_TLS_LE32
:
23035 case BFD_RELOC_ARM_TLS_IE32
:
23036 case BFD_RELOC_ARM_TLS_LDM32
:
23037 case BFD_RELOC_ARM_TLS_LDO32
:
23038 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23041 case BFD_RELOC_ARM_GOT32
:
23042 case BFD_RELOC_ARM_GOTOFF
:
23045 case BFD_RELOC_ARM_GOT_PREL
:
23046 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23047 md_number_to_chars (buf
, value
, 4);
23050 case BFD_RELOC_ARM_TARGET2
:
23051 /* TARGET2 is not partial-inplace, so we need to write the
23052 addend here for REL targets, because it won't be written out
23053 during reloc processing later. */
23054 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23055 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23059 case BFD_RELOC_RVA
:
23061 case BFD_RELOC_ARM_TARGET1
:
23062 case BFD_RELOC_ARM_ROSEGREL32
:
23063 case BFD_RELOC_ARM_SBREL32
:
23064 case BFD_RELOC_32_PCREL
:
23066 case BFD_RELOC_32_SECREL
:
23068 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23070 /* For WinCE we only do this for pcrel fixups. */
23071 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23073 md_number_to_chars (buf
, value
, 4);
23077 case BFD_RELOC_ARM_PREL31
:
23078 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23080 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23081 if ((value
^ (value
>> 1)) & 0x40000000)
23083 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23084 _("rel31 relocation overflow"));
23086 newval
|= value
& 0x7fffffff;
23087 md_number_to_chars (buf
, newval
, 4);
23092 case BFD_RELOC_ARM_CP_OFF_IMM
:
23093 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23094 if (value
< -1023 || value
> 1023 || (value
& 3))
23095 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23096 _("co-processor offset out of range"));
23101 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23102 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23103 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23105 newval
= get_thumb32_insn (buf
);
23107 newval
&= 0xffffff00;
23110 newval
&= 0xff7fff00;
23111 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23113 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23114 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23115 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23117 put_thumb32_insn (buf
, newval
);
23120 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23121 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23122 if (value
< -255 || value
> 255)
23123 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23124 _("co-processor offset out of range"));
23126 goto cp_off_common
;
23128 case BFD_RELOC_ARM_THUMB_OFFSET
:
23129 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23130 /* Exactly what ranges, and where the offset is inserted depends
23131 on the type of instruction, we can establish this from the
23133 switch (newval
>> 12)
23135 case 4: /* PC load. */
23136 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23137 forced to zero for these loads; md_pcrel_from has already
23138 compensated for this. */
23140 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23141 _("invalid offset, target not word aligned (0x%08lX)"),
23142 (((unsigned long) fixP
->fx_frag
->fr_address
23143 + (unsigned long) fixP
->fx_where
) & ~3)
23144 + (unsigned long) value
);
23146 if (value
& ~0x3fc)
23147 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23148 _("invalid offset, value too big (0x%08lX)"),
23151 newval
|= value
>> 2;
23154 case 9: /* SP load/store. */
23155 if (value
& ~0x3fc)
23156 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23157 _("invalid offset, value too big (0x%08lX)"),
23159 newval
|= value
>> 2;
23162 case 6: /* Word load/store. */
23164 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23165 _("invalid offset, value too big (0x%08lX)"),
23167 newval
|= value
<< 4; /* 6 - 2. */
23170 case 7: /* Byte load/store. */
23172 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23173 _("invalid offset, value too big (0x%08lX)"),
23175 newval
|= value
<< 6;
23178 case 8: /* Halfword load/store. */
23180 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23181 _("invalid offset, value too big (0x%08lX)"),
23183 newval
|= value
<< 5; /* 6 - 1. */
23187 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23188 "Unable to process relocation for thumb opcode: %lx",
23189 (unsigned long) newval
);
23192 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23195 case BFD_RELOC_ARM_THUMB_ADD
:
23196 /* This is a complicated relocation, since we use it for all of
23197 the following immediate relocations:
23201 9bit ADD/SUB SP word-aligned
23202 10bit ADD PC/SP word-aligned
23204 The type of instruction being processed is encoded in the
23211 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23213 int rd
= (newval
>> 4) & 0xf;
23214 int rs
= newval
& 0xf;
23215 int subtract
= !!(newval
& 0x8000);
23217 /* Check for HI regs, only very restricted cases allowed:
23218 Adjusting SP, and using PC or SP to get an address. */
23219 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23220 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23221 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23222 _("invalid Hi register with immediate"));
23224 /* If value is negative, choose the opposite instruction. */
23228 subtract
= !subtract
;
23230 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23231 _("immediate value out of range"));
23236 if (value
& ~0x1fc)
23237 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23238 _("invalid immediate for stack address calculation"));
23239 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23240 newval
|= value
>> 2;
23242 else if (rs
== REG_PC
|| rs
== REG_SP
)
23244 /* PR gas/18541. If the addition is for a defined symbol
23245 within range of an ADR instruction then accept it. */
23248 && fixP
->fx_addsy
!= NULL
)
23252 if (! S_IS_DEFINED (fixP
->fx_addsy
)
23253 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
23254 || S_IS_WEAK (fixP
->fx_addsy
))
23256 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23257 _("address calculation needs a strongly defined nearby symbol"));
23261 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23263 /* Round up to the next 4-byte boundary. */
23268 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
23272 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23273 _("symbol too far away"));
23283 if (subtract
|| value
& ~0x3fc)
23284 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23285 _("invalid immediate for address calculation (value = 0x%08lX)"),
23286 (unsigned long) (subtract
? - value
: value
));
23287 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
23289 newval
|= value
>> 2;
23294 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23295 _("immediate value out of range"));
23296 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
23297 newval
|= (rd
<< 8) | value
;
23302 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23303 _("immediate value out of range"));
23304 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
23305 newval
|= rd
| (rs
<< 3) | (value
<< 6);
23308 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23311 case BFD_RELOC_ARM_THUMB_IMM
:
23312 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23313 if (value
< 0 || value
> 255)
23314 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23315 _("invalid immediate: %ld is out of range"),
23318 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23321 case BFD_RELOC_ARM_THUMB_SHIFT
:
23322 /* 5bit shift value (0..32). LSL cannot take 32. */
23323 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
23324 temp
= newval
& 0xf800;
23325 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
23326 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23327 _("invalid shift value: %ld"), (long) value
);
23328 /* Shifts of zero must be encoded as LSL. */
23330 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
23331 /* Shifts of 32 are encoded as zero. */
23332 else if (value
== 32)
23334 newval
|= value
<< 6;
23335 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23338 case BFD_RELOC_VTABLE_INHERIT
:
23339 case BFD_RELOC_VTABLE_ENTRY
:
23343 case BFD_RELOC_ARM_MOVW
:
23344 case BFD_RELOC_ARM_MOVT
:
23345 case BFD_RELOC_ARM_THUMB_MOVW
:
23346 case BFD_RELOC_ARM_THUMB_MOVT
:
23347 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23349 /* REL format relocations are limited to a 16-bit addend. */
23350 if (!fixP
->fx_done
)
23352 if (value
< -0x8000 || value
> 0x7fff)
23353 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23354 _("offset out of range"));
23356 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23357 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23362 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23363 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23365 newval
= get_thumb32_insn (buf
);
23366 newval
&= 0xfbf08f00;
23367 newval
|= (value
& 0xf000) << 4;
23368 newval
|= (value
& 0x0800) << 15;
23369 newval
|= (value
& 0x0700) << 4;
23370 newval
|= (value
& 0x00ff);
23371 put_thumb32_insn (buf
, newval
);
23375 newval
= md_chars_to_number (buf
, 4);
23376 newval
&= 0xfff0f000;
23377 newval
|= value
& 0x0fff;
23378 newval
|= (value
& 0xf000) << 4;
23379 md_number_to_chars (buf
, newval
, 4);
23384 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23385 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23386 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23387 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23388 gas_assert (!fixP
->fx_done
);
23391 bfd_boolean is_mov
;
23392 bfd_vma encoded_addend
= value
;
23394 /* Check that addend can be encoded in instruction. */
23395 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
23396 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23397 _("the offset 0x%08lX is not representable"),
23398 (unsigned long) encoded_addend
);
23400 /* Extract the instruction. */
23401 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
23402 is_mov
= (insn
& 0xf800) == 0x2000;
23407 if (!seg
->use_rela_p
)
23408 insn
|= encoded_addend
;
23414 /* Extract the instruction. */
23415 /* Encoding is the following
23420 /* The following conditions must be true :
23425 rd
= (insn
>> 4) & 0xf;
23427 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
23428 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23429 _("Unable to process relocation for thumb opcode: %lx"),
23430 (unsigned long) insn
);
23432 /* Encode as ADD immediate8 thumb 1 code. */
23433 insn
= 0x3000 | (rd
<< 8);
23435 /* Place the encoded addend into the first 8 bits of the
23437 if (!seg
->use_rela_p
)
23438 insn
|= encoded_addend
;
23441 /* Update the instruction. */
23442 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
23446 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23447 case BFD_RELOC_ARM_ALU_PC_G0
:
23448 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23449 case BFD_RELOC_ARM_ALU_PC_G1
:
23450 case BFD_RELOC_ARM_ALU_PC_G2
:
23451 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23452 case BFD_RELOC_ARM_ALU_SB_G0
:
23453 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23454 case BFD_RELOC_ARM_ALU_SB_G1
:
23455 case BFD_RELOC_ARM_ALU_SB_G2
:
23456 gas_assert (!fixP
->fx_done
);
23457 if (!seg
->use_rela_p
)
23460 bfd_vma encoded_addend
;
23461 bfd_vma addend_abs
= abs (value
);
23463 /* Check that the absolute value of the addend can be
23464 expressed as an 8-bit constant plus a rotation. */
23465 encoded_addend
= encode_arm_immediate (addend_abs
);
23466 if (encoded_addend
== (unsigned int) FAIL
)
23467 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23468 _("the offset 0x%08lX is not representable"),
23469 (unsigned long) addend_abs
);
23471 /* Extract the instruction. */
23472 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23474 /* If the addend is positive, use an ADD instruction.
23475 Otherwise use a SUB. Take care not to destroy the S bit. */
23476 insn
&= 0xff1fffff;
23482 /* Place the encoded addend into the first 12 bits of the
23484 insn
&= 0xfffff000;
23485 insn
|= encoded_addend
;
23487 /* Update the instruction. */
23488 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23492 case BFD_RELOC_ARM_LDR_PC_G0
:
23493 case BFD_RELOC_ARM_LDR_PC_G1
:
23494 case BFD_RELOC_ARM_LDR_PC_G2
:
23495 case BFD_RELOC_ARM_LDR_SB_G0
:
23496 case BFD_RELOC_ARM_LDR_SB_G1
:
23497 case BFD_RELOC_ARM_LDR_SB_G2
:
23498 gas_assert (!fixP
->fx_done
);
23499 if (!seg
->use_rela_p
)
23502 bfd_vma addend_abs
= abs (value
);
23504 /* Check that the absolute value of the addend can be
23505 encoded in 12 bits. */
23506 if (addend_abs
>= 0x1000)
23507 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23508 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23509 (unsigned long) addend_abs
);
23511 /* Extract the instruction. */
23512 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23514 /* If the addend is negative, clear bit 23 of the instruction.
23515 Otherwise set it. */
23517 insn
&= ~(1 << 23);
23521 /* Place the absolute value of the addend into the first 12 bits
23522 of the instruction. */
23523 insn
&= 0xfffff000;
23524 insn
|= addend_abs
;
23526 /* Update the instruction. */
23527 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23531 case BFD_RELOC_ARM_LDRS_PC_G0
:
23532 case BFD_RELOC_ARM_LDRS_PC_G1
:
23533 case BFD_RELOC_ARM_LDRS_PC_G2
:
23534 case BFD_RELOC_ARM_LDRS_SB_G0
:
23535 case BFD_RELOC_ARM_LDRS_SB_G1
:
23536 case BFD_RELOC_ARM_LDRS_SB_G2
:
23537 gas_assert (!fixP
->fx_done
);
23538 if (!seg
->use_rela_p
)
23541 bfd_vma addend_abs
= abs (value
);
23543 /* Check that the absolute value of the addend can be
23544 encoded in 8 bits. */
23545 if (addend_abs
>= 0x100)
23546 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23547 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23548 (unsigned long) addend_abs
);
23550 /* Extract the instruction. */
23551 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23553 /* If the addend is negative, clear bit 23 of the instruction.
23554 Otherwise set it. */
23556 insn
&= ~(1 << 23);
23560 /* Place the first four bits of the absolute value of the addend
23561 into the first 4 bits of the instruction, and the remaining
23562 four into bits 8 .. 11. */
23563 insn
&= 0xfffff0f0;
23564 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
23566 /* Update the instruction. */
23567 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23571 case BFD_RELOC_ARM_LDC_PC_G0
:
23572 case BFD_RELOC_ARM_LDC_PC_G1
:
23573 case BFD_RELOC_ARM_LDC_PC_G2
:
23574 case BFD_RELOC_ARM_LDC_SB_G0
:
23575 case BFD_RELOC_ARM_LDC_SB_G1
:
23576 case BFD_RELOC_ARM_LDC_SB_G2
:
23577 gas_assert (!fixP
->fx_done
);
23578 if (!seg
->use_rela_p
)
23581 bfd_vma addend_abs
= abs (value
);
23583 /* Check that the absolute value of the addend is a multiple of
23584 four and, when divided by four, fits in 8 bits. */
23585 if (addend_abs
& 0x3)
23586 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23587 _("bad offset 0x%08lX (must be word-aligned)"),
23588 (unsigned long) addend_abs
);
23590 if ((addend_abs
>> 2) > 0xff)
23591 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23592 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23593 (unsigned long) addend_abs
);
23595 /* Extract the instruction. */
23596 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23598 /* If the addend is negative, clear bit 23 of the instruction.
23599 Otherwise set it. */
23601 insn
&= ~(1 << 23);
23605 /* Place the addend (divided by four) into the first eight
23606 bits of the instruction. */
23607 insn
&= 0xfffffff0;
23608 insn
|= addend_abs
>> 2;
23610 /* Update the instruction. */
23611 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23615 case BFD_RELOC_ARM_V4BX
:
23616 /* This will need to go in the object file. */
23620 case BFD_RELOC_UNUSED
:
23622 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23623 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
23627 /* Translate internal representation of relocation info to BFD target
23631 tc_gen_reloc (asection
*section
, fixS
*fixp
)
23634 bfd_reloc_code_real_type code
;
23636 reloc
= (arelent
*) xmalloc (sizeof (arelent
));
23638 reloc
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
23639 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
23640 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
23642 if (fixp
->fx_pcrel
)
23644 if (section
->use_rela_p
)
23645 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
23647 fixp
->fx_offset
= reloc
->address
;
23649 reloc
->addend
= fixp
->fx_offset
;
23651 switch (fixp
->fx_r_type
)
23654 if (fixp
->fx_pcrel
)
23656 code
= BFD_RELOC_8_PCREL
;
23661 if (fixp
->fx_pcrel
)
23663 code
= BFD_RELOC_16_PCREL
;
23668 if (fixp
->fx_pcrel
)
23670 code
= BFD_RELOC_32_PCREL
;
23674 case BFD_RELOC_ARM_MOVW
:
23675 if (fixp
->fx_pcrel
)
23677 code
= BFD_RELOC_ARM_MOVW_PCREL
;
23681 case BFD_RELOC_ARM_MOVT
:
23682 if (fixp
->fx_pcrel
)
23684 code
= BFD_RELOC_ARM_MOVT_PCREL
;
23688 case BFD_RELOC_ARM_THUMB_MOVW
:
23689 if (fixp
->fx_pcrel
)
23691 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
23695 case BFD_RELOC_ARM_THUMB_MOVT
:
23696 if (fixp
->fx_pcrel
)
23698 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
23702 case BFD_RELOC_NONE
:
23703 case BFD_RELOC_ARM_PCREL_BRANCH
:
23704 case BFD_RELOC_ARM_PCREL_BLX
:
23705 case BFD_RELOC_RVA
:
23706 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23707 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23708 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23709 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23710 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23711 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23712 case BFD_RELOC_VTABLE_ENTRY
:
23713 case BFD_RELOC_VTABLE_INHERIT
:
23715 case BFD_RELOC_32_SECREL
:
23717 code
= fixp
->fx_r_type
;
23720 case BFD_RELOC_THUMB_PCREL_BLX
:
23722 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23723 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23726 code
= BFD_RELOC_THUMB_PCREL_BLX
;
23729 case BFD_RELOC_ARM_LITERAL
:
23730 case BFD_RELOC_ARM_HWLITERAL
:
23731 /* If this is called then the a literal has
23732 been referenced across a section boundary. */
23733 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23734 _("literal referenced across section boundary"));
23738 case BFD_RELOC_ARM_TLS_CALL
:
23739 case BFD_RELOC_ARM_THM_TLS_CALL
:
23740 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23741 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23742 case BFD_RELOC_ARM_GOT32
:
23743 case BFD_RELOC_ARM_GOTOFF
:
23744 case BFD_RELOC_ARM_GOT_PREL
:
23745 case BFD_RELOC_ARM_PLT32
:
23746 case BFD_RELOC_ARM_TARGET1
:
23747 case BFD_RELOC_ARM_ROSEGREL32
:
23748 case BFD_RELOC_ARM_SBREL32
:
23749 case BFD_RELOC_ARM_PREL31
:
23750 case BFD_RELOC_ARM_TARGET2
:
23751 case BFD_RELOC_ARM_TLS_LDO32
:
23752 case BFD_RELOC_ARM_PCREL_CALL
:
23753 case BFD_RELOC_ARM_PCREL_JUMP
:
23754 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23755 case BFD_RELOC_ARM_ALU_PC_G0
:
23756 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23757 case BFD_RELOC_ARM_ALU_PC_G1
:
23758 case BFD_RELOC_ARM_ALU_PC_G2
:
23759 case BFD_RELOC_ARM_LDR_PC_G0
:
23760 case BFD_RELOC_ARM_LDR_PC_G1
:
23761 case BFD_RELOC_ARM_LDR_PC_G2
:
23762 case BFD_RELOC_ARM_LDRS_PC_G0
:
23763 case BFD_RELOC_ARM_LDRS_PC_G1
:
23764 case BFD_RELOC_ARM_LDRS_PC_G2
:
23765 case BFD_RELOC_ARM_LDC_PC_G0
:
23766 case BFD_RELOC_ARM_LDC_PC_G1
:
23767 case BFD_RELOC_ARM_LDC_PC_G2
:
23768 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23769 case BFD_RELOC_ARM_ALU_SB_G0
:
23770 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23771 case BFD_RELOC_ARM_ALU_SB_G1
:
23772 case BFD_RELOC_ARM_ALU_SB_G2
:
23773 case BFD_RELOC_ARM_LDR_SB_G0
:
23774 case BFD_RELOC_ARM_LDR_SB_G1
:
23775 case BFD_RELOC_ARM_LDR_SB_G2
:
23776 case BFD_RELOC_ARM_LDRS_SB_G0
:
23777 case BFD_RELOC_ARM_LDRS_SB_G1
:
23778 case BFD_RELOC_ARM_LDRS_SB_G2
:
23779 case BFD_RELOC_ARM_LDC_SB_G0
:
23780 case BFD_RELOC_ARM_LDC_SB_G1
:
23781 case BFD_RELOC_ARM_LDC_SB_G2
:
23782 case BFD_RELOC_ARM_V4BX
:
23783 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23784 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23785 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23786 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23787 code
= fixp
->fx_r_type
;
23790 case BFD_RELOC_ARM_TLS_GOTDESC
:
23791 case BFD_RELOC_ARM_TLS_GD32
:
23792 case BFD_RELOC_ARM_TLS_LE32
:
23793 case BFD_RELOC_ARM_TLS_IE32
:
23794 case BFD_RELOC_ARM_TLS_LDM32
:
23795 /* BFD will include the symbol's address in the addend.
23796 But we don't want that, so subtract it out again here. */
23797 if (!S_IS_COMMON (fixp
->fx_addsy
))
23798 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
23799 code
= fixp
->fx_r_type
;
23803 case BFD_RELOC_ARM_IMMEDIATE
:
23804 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23805 _("internal relocation (type: IMMEDIATE) not fixed up"));
23808 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23809 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23810 _("ADRL used for a symbol not defined in the same file"));
23813 case BFD_RELOC_ARM_OFFSET_IMM
:
23814 if (section
->use_rela_p
)
23816 code
= fixp
->fx_r_type
;
23820 if (fixp
->fx_addsy
!= NULL
23821 && !S_IS_DEFINED (fixp
->fx_addsy
)
23822 && S_IS_LOCAL (fixp
->fx_addsy
))
23824 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23825 _("undefined local label `%s'"),
23826 S_GET_NAME (fixp
->fx_addsy
));
23830 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23831 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23838 switch (fixp
->fx_r_type
)
23840 case BFD_RELOC_NONE
: type
= "NONE"; break;
23841 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
23842 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
23843 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
23844 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
23845 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
23846 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
23847 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
23848 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
23849 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
23850 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
23851 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
23852 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
23853 default: type
= _("<unknown>"); break;
23855 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23856 _("cannot represent %s relocation in this object file format"),
23863 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
23865 && fixp
->fx_addsy
== GOT_symbol
)
23867 code
= BFD_RELOC_ARM_GOTPC
;
23868 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
23872 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
23874 if (reloc
->howto
== NULL
)
23876 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23877 _("cannot represent %s relocation in this object file format"),
23878 bfd_get_reloc_code_name (code
));
23882 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23883 vtable entry to be used in the relocation's section offset. */
23884 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
23885 reloc
->address
= fixp
->fx_offset
;
23890 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23893 cons_fix_new_arm (fragS
* frag
,
23897 bfd_reloc_code_real_type reloc
)
23902 FIXME: @@ Should look at CPU word size. */
23906 reloc
= BFD_RELOC_8
;
23909 reloc
= BFD_RELOC_16
;
23913 reloc
= BFD_RELOC_32
;
23916 reloc
= BFD_RELOC_64
;
23921 if (exp
->X_op
== O_secrel
)
23923 exp
->X_op
= O_symbol
;
23924 reloc
= BFD_RELOC_32_SECREL
;
23928 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
23931 #if defined (OBJ_COFF)
23933 arm_validate_fix (fixS
* fixP
)
23935 /* If the destination of the branch is a defined symbol which does not have
23936 the THUMB_FUNC attribute, then we must be calling a function which has
23937 the (interfacearm) attribute. We look for the Thumb entry point to that
23938 function and change the branch to refer to that function instead. */
23939 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
23940 && fixP
->fx_addsy
!= NULL
23941 && S_IS_DEFINED (fixP
->fx_addsy
)
23942 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
23944 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
23951 arm_force_relocation (struct fix
* fixp
)
23953 #if defined (OBJ_COFF) && defined (TE_PE)
23954 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
23958 /* In case we have a call or a branch to a function in ARM ISA mode from
23959 a thumb function or vice-versa force the relocation. These relocations
23960 are cleared off for some cores that might have blx and simple transformations
23964 switch (fixp
->fx_r_type
)
23966 case BFD_RELOC_ARM_PCREL_JUMP
:
23967 case BFD_RELOC_ARM_PCREL_CALL
:
23968 case BFD_RELOC_THUMB_PCREL_BLX
:
23969 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
23973 case BFD_RELOC_ARM_PCREL_BLX
:
23974 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23975 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23976 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23977 if (ARM_IS_FUNC (fixp
->fx_addsy
))
23986 /* Resolve these relocations even if the symbol is extern or weak.
23987 Technically this is probably wrong due to symbol preemption.
23988 In practice these relocations do not have enough range to be useful
23989 at dynamic link time, and some code (e.g. in the Linux kernel)
23990 expects these references to be resolved. */
23991 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
23992 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
23993 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
23994 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
23995 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23996 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
23997 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
23998 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
23999 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24000 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
24001 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
24002 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
24003 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
24004 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
24007 /* Always leave these relocations for the linker. */
24008 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24009 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24010 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24013 /* Always generate relocations against function symbols. */
24014 if (fixp
->fx_r_type
== BFD_RELOC_32
24016 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24019 return generic_force_reloc (fixp
);
24022 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24023 /* Relocations against function names must be left unadjusted,
24024 so that the linker can use this information to generate interworking
24025 stubs. The MIPS version of this function
24026 also prevents relocations that are mips-16 specific, but I do not
24027 know why it does this.
24030 There is one other problem that ought to be addressed here, but
24031 which currently is not: Taking the address of a label (rather
24032 than a function) and then later jumping to that address. Such
24033 addresses also ought to have their bottom bit set (assuming that
24034 they reside in Thumb code), but at the moment they will not. */
24037 arm_fix_adjustable (fixS
* fixP
)
24039 if (fixP
->fx_addsy
== NULL
)
24042 /* Preserve relocations against symbols with function type. */
24043 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24046 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24047 && fixP
->fx_subsy
== NULL
)
24050 /* We need the symbol name for the VTABLE entries. */
24051 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24052 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24055 /* Don't allow symbols to be discarded on GOT related relocs. */
24056 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24057 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24058 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24059 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24060 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24061 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24062 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24063 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24064 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24065 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24066 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24067 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24068 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24069 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24072 /* Similarly for group relocations. */
24073 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24074 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24075 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24078 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24079 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24080 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24081 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24082 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24083 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24084 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24085 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24086 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24089 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24090 offsets, so keep these symbols. */
24091 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24092 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24097 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24101 elf32_arm_target_format (void)
24104 return (target_big_endian
24105 ? "elf32-bigarm-symbian"
24106 : "elf32-littlearm-symbian");
24107 #elif defined (TE_VXWORKS)
24108 return (target_big_endian
24109 ? "elf32-bigarm-vxworks"
24110 : "elf32-littlearm-vxworks");
24111 #elif defined (TE_NACL)
24112 return (target_big_endian
24113 ? "elf32-bigarm-nacl"
24114 : "elf32-littlearm-nacl");
24116 if (target_big_endian
)
24117 return "elf32-bigarm";
24119 return "elf32-littlearm";
24124 armelf_frob_symbol (symbolS
* symp
,
24127 elf_frob_symbol (symp
, puntp
);
24131 /* MD interface: Finalization. */
24136 literal_pool
* pool
;
24138 /* Ensure that all the IT blocks are properly closed. */
24139 check_it_blocks_finished ();
24141 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24143 /* Put it at the end of the relevant section. */
24144 subseg_set (pool
->section
, pool
->sub_section
);
24146 arm_elf_change_section ();
24153 /* Remove any excess mapping symbols generated for alignment frags in
24154 SEC. We may have created a mapping symbol before a zero byte
24155 alignment; remove it if there's a mapping symbol after the
24158 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24159 void *dummy ATTRIBUTE_UNUSED
)
24161 segment_info_type
*seginfo
= seg_info (sec
);
24164 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24167 for (fragp
= seginfo
->frchainP
->frch_root
;
24169 fragp
= fragp
->fr_next
)
24171 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24172 fragS
*next
= fragp
->fr_next
;
24174 /* Variable-sized frags have been converted to fixed size by
24175 this point. But if this was variable-sized to start with,
24176 there will be a fixed-size frag after it. So don't handle
24178 if (sym
== NULL
|| next
== NULL
)
24181 if (S_GET_VALUE (sym
) < next
->fr_address
)
24182 /* Not at the end of this frag. */
24184 know (S_GET_VALUE (sym
) == next
->fr_address
);
24188 if (next
->tc_frag_data
.first_map
!= NULL
)
24190 /* Next frag starts with a mapping symbol. Discard this
24192 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24196 if (next
->fr_next
== NULL
)
24198 /* This mapping symbol is at the end of the section. Discard
24200 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24201 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24205 /* As long as we have empty frags without any mapping symbols,
24207 /* If the next frag is non-empty and does not start with a
24208 mapping symbol, then this mapping symbol is required. */
24209 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24212 next
= next
->fr_next
;
24214 while (next
!= NULL
);
24219 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24223 arm_adjust_symtab (void)
24228 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24230 if (ARM_IS_THUMB (sym
))
24232 if (THUMB_IS_FUNC (sym
))
24234 /* Mark the symbol as a Thumb function. */
24235 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24236 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24237 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
24239 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
24240 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
24242 as_bad (_("%s: unexpected function type: %d"),
24243 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
24245 else switch (S_GET_STORAGE_CLASS (sym
))
24248 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
24251 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
24254 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
24262 if (ARM_IS_INTERWORK (sym
))
24263 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
24270 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24272 if (ARM_IS_THUMB (sym
))
24274 elf_symbol_type
* elf_sym
;
24276 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
24277 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
24279 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
24280 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
24282 /* If it's a .thumb_func, declare it as so,
24283 otherwise tag label as .code 16. */
24284 if (THUMB_IS_FUNC (sym
))
24285 elf_sym
->internal_elf_sym
.st_target_internal
24286 = ST_BRANCH_TO_THUMB
;
24287 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
24288 elf_sym
->internal_elf_sym
.st_info
=
24289 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
24294 /* Remove any overlapping mapping symbols generated by alignment frags. */
24295 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
24296 /* Now do generic ELF adjustments. */
24297 elf_adjust_symtab ();
24301 /* MD interface: Initialization. */
24304 set_constant_flonums (void)
24308 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
24309 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
24313 /* Auto-select Thumb mode if it's the only available instruction set for the
24314 given architecture. */
24317 autoselect_thumb_from_cpu_variant (void)
24319 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
24320 opcode_select (16);
24329 if ( (arm_ops_hsh
= hash_new ()) == NULL
24330 || (arm_cond_hsh
= hash_new ()) == NULL
24331 || (arm_shift_hsh
= hash_new ()) == NULL
24332 || (arm_psr_hsh
= hash_new ()) == NULL
24333 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
24334 || (arm_reg_hsh
= hash_new ()) == NULL
24335 || (arm_reloc_hsh
= hash_new ()) == NULL
24336 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
24337 as_fatal (_("virtual memory exhausted"));
24339 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
24340 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
24341 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
24342 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
24343 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
24344 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
24345 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
24346 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
24347 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
24348 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
24349 (void *) (v7m_psrs
+ i
));
24350 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
24351 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
24353 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
24355 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
24356 (void *) (barrier_opt_names
+ i
));
24358 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
24360 struct reloc_entry
* entry
= reloc_names
+ i
;
24362 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
24363 /* This makes encode_branch() use the EABI versions of this relocation. */
24364 entry
->reloc
= BFD_RELOC_UNUSED
;
24366 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
24370 set_constant_flonums ();
24372 /* Set the cpu variant based on the command-line options. We prefer
24373 -mcpu= over -march= if both are set (as for GCC); and we prefer
24374 -mfpu= over any other way of setting the floating point unit.
24375 Use of legacy options with new options are faulted. */
24378 if (mcpu_cpu_opt
|| march_cpu_opt
)
24379 as_bad (_("use of old and new-style options to set CPU type"));
24381 mcpu_cpu_opt
= legacy_cpu
;
24383 else if (!mcpu_cpu_opt
)
24384 mcpu_cpu_opt
= march_cpu_opt
;
24389 as_bad (_("use of old and new-style options to set FPU type"));
24391 mfpu_opt
= legacy_fpu
;
24393 else if (!mfpu_opt
)
24395 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24396 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24397 /* Some environments specify a default FPU. If they don't, infer it
24398 from the processor. */
24400 mfpu_opt
= mcpu_fpu_opt
;
24402 mfpu_opt
= march_fpu_opt
;
24404 mfpu_opt
= &fpu_default
;
24410 if (mcpu_cpu_opt
!= NULL
)
24411 mfpu_opt
= &fpu_default
;
24412 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
24413 mfpu_opt
= &fpu_arch_vfp_v2
;
24415 mfpu_opt
= &fpu_arch_fpa
;
24421 mcpu_cpu_opt
= &cpu_default
;
24422 selected_cpu
= cpu_default
;
24424 else if (no_cpu_selected ())
24425 selected_cpu
= cpu_default
;
24428 selected_cpu
= *mcpu_cpu_opt
;
24430 mcpu_cpu_opt
= &arm_arch_any
;
24433 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
24435 autoselect_thumb_from_cpu_variant ();
24437 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
24439 #if defined OBJ_COFF || defined OBJ_ELF
24441 unsigned int flags
= 0;
24443 #if defined OBJ_ELF
24444 flags
= meabi_flags
;
24446 switch (meabi_flags
)
24448 case EF_ARM_EABI_UNKNOWN
:
24450 /* Set the flags in the private structure. */
24451 if (uses_apcs_26
) flags
|= F_APCS26
;
24452 if (support_interwork
) flags
|= F_INTERWORK
;
24453 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
24454 if (pic_code
) flags
|= F_PIC
;
24455 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
24456 flags
|= F_SOFT_FLOAT
;
24458 switch (mfloat_abi_opt
)
24460 case ARM_FLOAT_ABI_SOFT
:
24461 case ARM_FLOAT_ABI_SOFTFP
:
24462 flags
|= F_SOFT_FLOAT
;
24465 case ARM_FLOAT_ABI_HARD
:
24466 if (flags
& F_SOFT_FLOAT
)
24467 as_bad (_("hard-float conflicts with specified fpu"));
24471 /* Using pure-endian doubles (even if soft-float). */
24472 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
24473 flags
|= F_VFP_FLOAT
;
24475 #if defined OBJ_ELF
24476 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
24477 flags
|= EF_ARM_MAVERICK_FLOAT
;
24480 case EF_ARM_EABI_VER4
:
24481 case EF_ARM_EABI_VER5
:
24482 /* No additional flags to set. */
24489 bfd_set_private_flags (stdoutput
, flags
);
24491 /* We have run out flags in the COFF header to encode the
24492 status of ATPCS support, so instead we create a dummy,
24493 empty, debug section called .arm.atpcs. */
24498 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
24502 bfd_set_section_flags
24503 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
24504 bfd_set_section_size (stdoutput
, sec
, 0);
24505 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
24511 /* Record the CPU type as well. */
24512 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
24513 mach
= bfd_mach_arm_iWMMXt2
;
24514 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
24515 mach
= bfd_mach_arm_iWMMXt
;
24516 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
24517 mach
= bfd_mach_arm_XScale
;
24518 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
24519 mach
= bfd_mach_arm_ep9312
;
24520 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
24521 mach
= bfd_mach_arm_5TE
;
24522 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
24524 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24525 mach
= bfd_mach_arm_5T
;
24527 mach
= bfd_mach_arm_5
;
24529 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
24531 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24532 mach
= bfd_mach_arm_4T
;
24534 mach
= bfd_mach_arm_4
;
24536 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
24537 mach
= bfd_mach_arm_3M
;
24538 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
24539 mach
= bfd_mach_arm_3
;
24540 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
24541 mach
= bfd_mach_arm_2a
;
24542 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
24543 mach
= bfd_mach_arm_2
;
24545 mach
= bfd_mach_arm_unknown
;
24547 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
24550 /* Command line processing. */
24553 Invocation line includes a switch not recognized by the base assembler.
24554 See if it's a processor-specific option.
24556 This routine is somewhat complicated by the need for backwards
24557 compatibility (since older releases of gcc can't be changed).
24558 The new options try to make the interface as compatible as
24561 New options (supported) are:
24563 -mcpu=<cpu name> Assemble for selected processor
24564 -march=<architecture name> Assemble for selected architecture
24565 -mfpu=<fpu architecture> Assemble for selected FPU.
24566 -EB/-mbig-endian Big-endian
24567 -EL/-mlittle-endian Little-endian
24568 -k Generate PIC code
24569 -mthumb Start in Thumb mode
24570 -mthumb-interwork Code supports ARM/Thumb interworking
24572 -m[no-]warn-deprecated Warn about deprecated features
24573 -m[no-]warn-syms Warn when symbols match instructions
24575 For now we will also provide support for:
24577 -mapcs-32 32-bit Program counter
24578 -mapcs-26 26-bit Program counter
24579 -macps-float Floats passed in FP registers
24580 -mapcs-reentrant Reentrant code
24582 (sometime these will probably be replaced with -mapcs=<list of options>
24583 and -matpcs=<list of options>)
24585 The remaining options are only supported for back-wards compatibility.
24586 Cpu variants, the arm part is optional:
24587 -m[arm]1 Currently not supported.
24588 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24589 -m[arm]3 Arm 3 processor
24590 -m[arm]6[xx], Arm 6 processors
24591 -m[arm]7[xx][t][[d]m] Arm 7 processors
24592 -m[arm]8[10] Arm 8 processors
24593 -m[arm]9[20][tdmi] Arm 9 processors
24594 -mstrongarm[110[0]] StrongARM processors
24595 -mxscale XScale processors
24596 -m[arm]v[2345[t[e]]] Arm architectures
24597 -mall All (except the ARM1)
24599 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24600 -mfpe-old (No float load/store multiples)
24601 -mvfpxd VFP Single precision
24603 -mno-fpu Disable all floating point instructions
24605 The following CPU names are recognized:
24606 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24607 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24608 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24609 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24610 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24611 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24612 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24616 const char * md_shortopts
= "m:k";
24618 #ifdef ARM_BI_ENDIAN
24619 #define OPTION_EB (OPTION_MD_BASE + 0)
24620 #define OPTION_EL (OPTION_MD_BASE + 1)
24622 #if TARGET_BYTES_BIG_ENDIAN
24623 #define OPTION_EB (OPTION_MD_BASE + 0)
24625 #define OPTION_EL (OPTION_MD_BASE + 1)
24628 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24630 struct option md_longopts
[] =
24633 {"EB", no_argument
, NULL
, OPTION_EB
},
24636 {"EL", no_argument
, NULL
, OPTION_EL
},
24638 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
24639 {NULL
, no_argument
, NULL
, 0}
24643 size_t md_longopts_size
= sizeof (md_longopts
);
24645 struct arm_option_table
24647 char *option
; /* Option name to match. */
24648 char *help
; /* Help information. */
24649 int *var
; /* Variable to change. */
24650 int value
; /* What to change it to. */
24651 char *deprecated
; /* If non-null, print this message. */
24654 struct arm_option_table arm_opts
[] =
24656 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
24657 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
24658 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24659 &support_interwork
, 1, NULL
},
24660 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
24661 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
24662 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
24664 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
24665 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
24666 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
24667 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
24670 /* These are recognized by the assembler, but have no affect on code. */
24671 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
24672 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
24674 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
24675 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24676 &warn_on_deprecated
, 0, NULL
},
24677 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
24678 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
24679 {NULL
, NULL
, NULL
, 0, NULL
}
24682 struct arm_legacy_option_table
24684 char *option
; /* Option name to match. */
24685 const arm_feature_set
**var
; /* Variable to change. */
24686 const arm_feature_set value
; /* What to change it to. */
24687 char *deprecated
; /* If non-null, print this message. */
24690 const struct arm_legacy_option_table arm_legacy_opts
[] =
24692 /* DON'T add any new processors to this list -- we want the whole list
24693 to go away... Add them to the processors table instead. */
24694 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24695 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24696 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24697 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24698 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24699 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24700 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24701 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24702 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24703 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24704 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24705 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24706 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24707 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24708 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24709 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24710 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24711 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24712 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24713 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24714 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24715 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24716 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24717 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24718 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24719 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24720 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24721 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24722 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24723 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24724 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24725 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24726 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24727 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24728 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24729 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24730 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24731 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24732 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24733 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24734 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24735 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24736 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24737 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24738 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24739 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24740 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24741 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24742 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24743 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24744 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24745 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24746 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24747 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24748 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24749 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24750 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24751 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24752 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24753 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24754 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24755 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24756 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24757 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24758 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24759 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24760 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24761 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24762 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
24763 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
24764 N_("use -mcpu=strongarm110")},
24765 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
24766 N_("use -mcpu=strongarm1100")},
24767 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
24768 N_("use -mcpu=strongarm1110")},
24769 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
24770 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
24771 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
24773 /* Architecture variants -- don't add any more to this list either. */
24774 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24775 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24776 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24777 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24778 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24779 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24780 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24781 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24782 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24783 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24784 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24785 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24786 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24787 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24788 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24789 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24790 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24791 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24793 /* Floating point variants -- don't add any more to this list either. */
24794 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
24795 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
24796 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
24797 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
24798 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24800 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
24803 struct arm_cpu_option_table
24807 const arm_feature_set value
;
24808 /* For some CPUs we assume an FPU unless the user explicitly sets
24810 const arm_feature_set default_fpu
;
24811 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24813 const char *canonical_name
;
24816 /* This list should, at a minimum, contain all the cpu names
24817 recognized by GCC. */
24818 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24819 static const struct arm_cpu_option_table arm_cpus
[] =
24821 ARM_CPU_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
, NULL
),
24822 ARM_CPU_OPT ("arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
),
24823 ARM_CPU_OPT ("arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
),
24824 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24825 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24826 ARM_CPU_OPT ("arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24827 ARM_CPU_OPT ("arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24828 ARM_CPU_OPT ("arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24829 ARM_CPU_OPT ("arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24830 ARM_CPU_OPT ("arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24831 ARM_CPU_OPT ("arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24832 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24833 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24834 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24835 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24836 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24837 ARM_CPU_OPT ("arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24838 ARM_CPU_OPT ("arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24839 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24840 ARM_CPU_OPT ("arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24841 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24842 ARM_CPU_OPT ("arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24843 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24844 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24845 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24846 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24847 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24848 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24849 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24850 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24851 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24852 ARM_CPU_OPT ("arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24853 ARM_CPU_OPT ("arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24854 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24855 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24856 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24857 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24858 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24859 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24860 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"),
24861 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24862 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24863 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24864 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24865 ARM_CPU_OPT ("fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24866 ARM_CPU_OPT ("fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24867 /* For V5 or later processors we default to using VFP; but the user
24868 should really set the FPU type explicitly. */
24869 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24870 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24871 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24872 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24873 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24874 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24875 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"),
24876 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24877 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24878 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"),
24879 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24880 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24881 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24882 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24883 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24884 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"),
24885 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24886 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24887 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24888 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
,
24890 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24891 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24892 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24893 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24894 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24895 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24896 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"),
24897 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
),
24898 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
,
24900 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
),
24901 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"),
24902 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"),
24903 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
),
24904 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
),
24905 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ
, FPU_NONE
, NULL
),
24906 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ
, FPU_ARCH_VFP_V2
, NULL
),
24907 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC
,
24908 FPU_NONE
, "Cortex-A5"),
24909 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24911 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC
,
24912 ARM_FEATURE_COPROC (FPU_VFP_V3
24913 | FPU_NEON_EXT_V1
),
24915 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC
,
24916 ARM_FEATURE_COPROC (FPU_VFP_V3
24917 | FPU_NEON_EXT_V1
),
24919 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24921 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24923 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24925 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24927 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24929 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24931 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24933 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24935 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"),
24936 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
24938 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV
,
24939 FPU_NONE
, "Cortex-R5"),
24940 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV
,
24941 FPU_ARCH_VFP_V3D16
,
24943 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M7"),
24944 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"),
24945 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"),
24946 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"),
24947 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"),
24948 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0+"),
24949 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24952 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24956 /* ??? XSCALE is really an architecture. */
24957 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
24958 /* ??? iwmmxt is not a processor. */
24959 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
),
24960 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
),
24961 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
24963 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
24964 FPU_ARCH_MAVERICK
, "ARM920T"),
24965 /* Marvell processors. */
24966 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
24968 ARM_EXT2_V6T2_V8M
),
24969 FPU_ARCH_VFP_V3D16
, NULL
),
24970 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
24972 ARM_EXT2_V6T2_V8M
),
24973 FPU_ARCH_NEON_VFP_V4
, NULL
),
24974 /* APM X-Gene family. */
24975 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24977 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24980 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
24984 struct arm_arch_option_table
24988 const arm_feature_set value
;
24989 const arm_feature_set default_fpu
;
24992 /* This list should, at a minimum, contain all the architecture names
24993 recognized by GCC. */
24994 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24995 static const struct arm_arch_option_table arm_archs
[] =
24997 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
24998 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
24999 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
25000 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25001 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25002 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
25003 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
25004 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
25005 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
25006 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
25007 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
25008 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
25009 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
25010 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
25011 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
25012 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
25013 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
25014 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25015 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25016 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
25017 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
25018 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25019 kept to preserve existing behaviour. */
25020 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25021 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25022 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
25023 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
25024 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
25025 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25026 kept to preserve existing behaviour. */
25027 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25028 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25029 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
25030 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
25031 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
25032 /* The official spelling of the ARMv7 profile variants is the dashed form.
25033 Accept the non-dashed form for compatibility with old toolchains. */
25034 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25035 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
25036 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25037 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25038 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25039 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25040 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25041 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
25042 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
25043 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
25044 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
25045 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
25046 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
25047 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
25048 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
25049 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
25050 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25052 #undef ARM_ARCH_OPT
25054 /* ISA extensions in the co-processor and main instruction set space. */
25055 struct arm_option_extension_value_table
25059 const arm_feature_set merge_value
;
25060 const arm_feature_set clear_value
;
25061 const arm_feature_set allowed_archs
;
25064 /* The following table must be in alphabetical order with a NULL last entry.
25066 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25067 static const struct arm_option_extension_value_table arm_extensions
[] =
25069 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25070 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25071 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25072 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
25073 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25074 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
25075 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25076 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25077 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25079 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25080 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25081 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25082 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
25083 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ANY
),
25084 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
25085 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ANY
),
25086 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
25087 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ANY
),
25088 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25089 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25090 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25091 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
25092 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
25093 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25094 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25095 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25096 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
25097 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
25098 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
25099 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25100 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25101 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25102 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V7A
)),
25103 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
25105 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
25106 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25107 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8
,
25108 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
25109 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25110 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
25111 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ANY
),
25112 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25116 /* ISA floating-point and Advanced SIMD extensions. */
25117 struct arm_option_fpu_value_table
25120 const arm_feature_set value
;
25123 /* This list should, at a minimum, contain all the fpu names
25124 recognized by GCC. */
25125 static const struct arm_option_fpu_value_table arm_fpus
[] =
25127 {"softfpa", FPU_NONE
},
25128 {"fpe", FPU_ARCH_FPE
},
25129 {"fpe2", FPU_ARCH_FPE
},
25130 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
25131 {"fpa", FPU_ARCH_FPA
},
25132 {"fpa10", FPU_ARCH_FPA
},
25133 {"fpa11", FPU_ARCH_FPA
},
25134 {"arm7500fe", FPU_ARCH_FPA
},
25135 {"softvfp", FPU_ARCH_VFP
},
25136 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
25137 {"vfp", FPU_ARCH_VFP_V2
},
25138 {"vfp9", FPU_ARCH_VFP_V2
},
25139 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
25140 {"vfp10", FPU_ARCH_VFP_V2
},
25141 {"vfp10-r0", FPU_ARCH_VFP_V1
},
25142 {"vfpxd", FPU_ARCH_VFP_V1xD
},
25143 {"vfpv2", FPU_ARCH_VFP_V2
},
25144 {"vfpv3", FPU_ARCH_VFP_V3
},
25145 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
25146 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
25147 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
25148 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
25149 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
25150 {"arm1020t", FPU_ARCH_VFP_V1
},
25151 {"arm1020e", FPU_ARCH_VFP_V2
},
25152 {"arm1136jfs", FPU_ARCH_VFP_V2
},
25153 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
25154 {"maverick", FPU_ARCH_MAVERICK
},
25155 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
25156 {"neon-fp16", FPU_ARCH_NEON_FP16
},
25157 {"vfpv4", FPU_ARCH_VFP_V4
},
25158 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
25159 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
25160 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
25161 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
25162 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
25163 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
25164 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
25165 {"crypto-neon-fp-armv8",
25166 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
25167 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
25168 {"crypto-neon-fp-armv8.1",
25169 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
25170 {NULL
, ARM_ARCH_NONE
}
25173 struct arm_option_value_table
25179 static const struct arm_option_value_table arm_float_abis
[] =
25181 {"hard", ARM_FLOAT_ABI_HARD
},
25182 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
25183 {"soft", ARM_FLOAT_ABI_SOFT
},
25188 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25189 static const struct arm_option_value_table arm_eabis
[] =
25191 {"gnu", EF_ARM_EABI_UNKNOWN
},
25192 {"4", EF_ARM_EABI_VER4
},
25193 {"5", EF_ARM_EABI_VER5
},
25198 struct arm_long_option_table
25200 char * option
; /* Substring to match. */
25201 char * help
; /* Help information. */
25202 int (* func
) (char * subopt
); /* Function to decode sub-option. */
25203 char * deprecated
; /* If non-null, print this message. */
25207 arm_parse_extension (char *str
, const arm_feature_set
**opt_p
)
25209 arm_feature_set
*ext_set
= (arm_feature_set
*)
25210 xmalloc (sizeof (arm_feature_set
));
25212 /* We insist on extensions being specified in alphabetical order, and with
25213 extensions being added before being removed. We achieve this by having
25214 the global ARM_EXTENSIONS table in alphabetical order, and using the
25215 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25216 or removing it (0) and only allowing it to change in the order
25218 const struct arm_option_extension_value_table
* opt
= NULL
;
25219 int adding_value
= -1;
25221 /* Copy the feature set, so that we can modify it. */
25222 *ext_set
= **opt_p
;
25225 while (str
!= NULL
&& *str
!= 0)
25232 as_bad (_("invalid architectural extension"));
25237 ext
= strchr (str
, '+');
25242 len
= strlen (str
);
25244 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
25246 if (adding_value
!= 0)
25249 opt
= arm_extensions
;
25257 if (adding_value
== -1)
25260 opt
= arm_extensions
;
25262 else if (adding_value
!= 1)
25264 as_bad (_("must specify extensions to add before specifying "
25265 "those to remove"));
25272 as_bad (_("missing architectural extension"));
25276 gas_assert (adding_value
!= -1);
25277 gas_assert (opt
!= NULL
);
25279 /* Scan over the options table trying to find an exact match. */
25280 for (; opt
->name
!= NULL
; opt
++)
25281 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25283 /* Check we can apply the extension to this architecture. */
25284 if (!ARM_CPU_HAS_FEATURE (*ext_set
, opt
->allowed_archs
))
25286 as_bad (_("extension does not apply to the base architecture"));
25290 /* Add or remove the extension. */
25292 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
25294 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
25299 if (opt
->name
== NULL
)
25301 /* Did we fail to find an extension because it wasn't specified in
25302 alphabetical order, or because it does not exist? */
25304 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
25305 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25308 if (opt
->name
== NULL
)
25309 as_bad (_("unknown architectural extension `%s'"), str
);
25311 as_bad (_("architectural extensions must be specified in "
25312 "alphabetical order"));
25318 /* We should skip the extension we've just matched the next time
25330 arm_parse_cpu (char *str
)
25332 const struct arm_cpu_option_table
*opt
;
25333 char *ext
= strchr (str
, '+');
25339 len
= strlen (str
);
25343 as_bad (_("missing cpu name `%s'"), str
);
25347 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
25348 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25350 mcpu_cpu_opt
= &opt
->value
;
25351 mcpu_fpu_opt
= &opt
->default_fpu
;
25352 if (opt
->canonical_name
)
25354 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
25355 strcpy (selected_cpu_name
, opt
->canonical_name
);
25361 if (len
>= sizeof selected_cpu_name
)
25362 len
= (sizeof selected_cpu_name
) - 1;
25364 for (i
= 0; i
< len
; i
++)
25365 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25366 selected_cpu_name
[i
] = 0;
25370 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
25375 as_bad (_("unknown cpu `%s'"), str
);
25380 arm_parse_arch (char *str
)
25382 const struct arm_arch_option_table
*opt
;
25383 char *ext
= strchr (str
, '+');
25389 len
= strlen (str
);
25393 as_bad (_("missing architecture name `%s'"), str
);
25397 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
25398 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25400 march_cpu_opt
= &opt
->value
;
25401 march_fpu_opt
= &opt
->default_fpu
;
25402 strcpy (selected_cpu_name
, opt
->name
);
25405 return arm_parse_extension (ext
, &march_cpu_opt
);
25410 as_bad (_("unknown architecture `%s'\n"), str
);
25415 arm_parse_fpu (char * str
)
25417 const struct arm_option_fpu_value_table
* opt
;
25419 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
25420 if (streq (opt
->name
, str
))
25422 mfpu_opt
= &opt
->value
;
25426 as_bad (_("unknown floating point format `%s'\n"), str
);
25431 arm_parse_float_abi (char * str
)
25433 const struct arm_option_value_table
* opt
;
25435 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
25436 if (streq (opt
->name
, str
))
25438 mfloat_abi_opt
= opt
->value
;
25442 as_bad (_("unknown floating point abi `%s'\n"), str
);
25448 arm_parse_eabi (char * str
)
25450 const struct arm_option_value_table
*opt
;
25452 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
25453 if (streq (opt
->name
, str
))
25455 meabi_flags
= opt
->value
;
25458 as_bad (_("unknown EABI `%s'\n"), str
);
25464 arm_parse_it_mode (char * str
)
25466 bfd_boolean ret
= TRUE
;
25468 if (streq ("arm", str
))
25469 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
25470 else if (streq ("thumb", str
))
25471 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
25472 else if (streq ("always", str
))
25473 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
25474 else if (streq ("never", str
))
25475 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
25478 as_bad (_("unknown implicit IT mode `%s', should be "\
25479 "arm, thumb, always, or never."), str
);
25487 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED
)
25489 codecomposer_syntax
= TRUE
;
25490 arm_comment_chars
[0] = ';';
25491 arm_line_separator_chars
[0] = 0;
25495 struct arm_long_option_table arm_long_opts
[] =
25497 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25498 arm_parse_cpu
, NULL
},
25499 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25500 arm_parse_arch
, NULL
},
25501 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25502 arm_parse_fpu
, NULL
},
25503 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25504 arm_parse_float_abi
, NULL
},
25506 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25507 arm_parse_eabi
, NULL
},
25509 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25510 arm_parse_it_mode
, NULL
},
25511 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25512 arm_ccs_mode
, NULL
},
25513 {NULL
, NULL
, 0, NULL
}
25517 md_parse_option (int c
, char * arg
)
25519 struct arm_option_table
*opt
;
25520 const struct arm_legacy_option_table
*fopt
;
25521 struct arm_long_option_table
*lopt
;
25527 target_big_endian
= 1;
25533 target_big_endian
= 0;
25537 case OPTION_FIX_V4BX
:
25542 /* Listing option. Just ignore these, we don't support additional
25547 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25549 if (c
== opt
->option
[0]
25550 && ((arg
== NULL
&& opt
->option
[1] == 0)
25551 || streq (arg
, opt
->option
+ 1)))
25553 /* If the option is deprecated, tell the user. */
25554 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
25555 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25556 arg
? arg
: "", _(opt
->deprecated
));
25558 if (opt
->var
!= NULL
)
25559 *opt
->var
= opt
->value
;
25565 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
25567 if (c
== fopt
->option
[0]
25568 && ((arg
== NULL
&& fopt
->option
[1] == 0)
25569 || streq (arg
, fopt
->option
+ 1)))
25571 /* If the option is deprecated, tell the user. */
25572 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
25573 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25574 arg
? arg
: "", _(fopt
->deprecated
));
25576 if (fopt
->var
!= NULL
)
25577 *fopt
->var
= &fopt
->value
;
25583 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25585 /* These options are expected to have an argument. */
25586 if (c
== lopt
->option
[0]
25588 && strncmp (arg
, lopt
->option
+ 1,
25589 strlen (lopt
->option
+ 1)) == 0)
25591 /* If the option is deprecated, tell the user. */
25592 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
25593 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
25594 _(lopt
->deprecated
));
25596 /* Call the sup-option parser. */
25597 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
25608 md_show_usage (FILE * fp
)
25610 struct arm_option_table
*opt
;
25611 struct arm_long_option_table
*lopt
;
25613 fprintf (fp
, _(" ARM-specific assembler options:\n"));
25615 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25616 if (opt
->help
!= NULL
)
25617 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
25619 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25620 if (lopt
->help
!= NULL
)
25621 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
25625 -EB assemble code for a big-endian cpu\n"));
25630 -EL assemble code for a little-endian cpu\n"));
25634 --fix-v4bx Allow BX in ARMv4 code\n"));
25642 arm_feature_set flags
;
25643 } cpu_arch_ver_table
;
25645 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25646 must be sorted least features first but some reordering is needed, eg. for
25647 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25648 static const cpu_arch_ver_table cpu_arch_ver
[] =
25654 {4, ARM_ARCH_V5TE
},
25655 {5, ARM_ARCH_V5TEJ
},
25659 {11, ARM_ARCH_V6M
},
25660 {12, ARM_ARCH_V6SM
},
25661 {8, ARM_ARCH_V6T2
},
25662 {10, ARM_ARCH_V7VE
},
25663 {10, ARM_ARCH_V7R
},
25664 {10, ARM_ARCH_V7M
},
25665 {14, ARM_ARCH_V8A
},
25666 {16, ARM_ARCH_V8M_BASE
},
25667 {17, ARM_ARCH_V8M_MAIN
},
25671 /* Set an attribute if it has not already been set by the user. */
25673 aeabi_set_attribute_int (int tag
, int value
)
25676 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25677 || !attributes_set_explicitly
[tag
])
25678 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
25682 aeabi_set_attribute_string (int tag
, const char *value
)
25685 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25686 || !attributes_set_explicitly
[tag
])
25687 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
25690 /* Set the public EABI object attributes. */
25692 aeabi_set_public_attributes (void)
25697 int fp16_optional
= 0;
25698 arm_feature_set flags
;
25699 arm_feature_set tmp
;
25700 arm_feature_set arm_arch_v8m_base
= ARM_ARCH_V8M_BASE
;
25701 const cpu_arch_ver_table
*p
;
25703 /* Choose the architecture based on the capabilities of the requested cpu
25704 (if any) and/or the instructions actually used. */
25705 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
25706 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
25707 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
25709 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
25710 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
25712 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
25713 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
25715 selected_cpu
= flags
;
25717 /* Allow the user to override the reported architecture. */
25720 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
25721 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
25724 /* We need to make sure that the attributes do not identify us as v6S-M
25725 when the only v6S-M feature in use is the Operating System Extensions. */
25726 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
25727 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
25728 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
25732 for (p
= cpu_arch_ver
; p
->val
; p
++)
25734 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
25737 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
25741 /* The table lookup above finds the last architecture to contribute
25742 a new feature. Unfortunately, Tag13 is a subset of the union of
25743 v6T2 and v7-M, so it is never seen as contributing a new feature.
25744 We can not search for the last entry which is entirely used,
25745 because if no CPU is specified we build up only those flags
25746 actually used. Perhaps we should separate out the specified
25747 and implicit cases. Avoid taking this path for -march=all by
25748 checking for contradictory v7-A / v7-M features. */
25749 if (arch
== TAG_CPU_ARCH_V7
25750 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
25751 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
25752 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
25753 arch
= TAG_CPU_ARCH_V7E_M
;
25755 ARM_CLEAR_FEATURE (tmp
, flags
, arm_arch_v8m_base
);
25756 if (arch
== TAG_CPU_ARCH_V8M_BASE
&& ARM_CPU_HAS_FEATURE (tmp
, arm_arch_any
))
25757 arch
= TAG_CPU_ARCH_V8M_MAIN
;
25759 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
25760 coming from ARMv8-A. However, since ARMv8-A has more instructions than
25761 ARMv8-M, -march=all must be detected as ARMv8-A. */
25762 if (arch
== TAG_CPU_ARCH_V8M_MAIN
25763 && ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
25764 arch
= TAG_CPU_ARCH_V8
;
25766 /* Tag_CPU_name. */
25767 if (selected_cpu_name
[0])
25771 q
= selected_cpu_name
;
25772 if (strncmp (q
, "armv", 4) == 0)
25777 for (i
= 0; q
[i
]; i
++)
25778 q
[i
] = TOUPPER (q
[i
]);
25780 aeabi_set_attribute_string (Tag_CPU_name
, q
);
25783 /* Tag_CPU_arch. */
25784 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
25786 /* Tag_CPU_arch_profile. */
25787 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
25788 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25789 || (ARM_CPU_HAS_FEATURE (flags
, arm_ext_atomics
)
25790 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
)))
25792 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
25794 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
25799 if (profile
!= '\0')
25800 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
25802 /* Tag_ARM_ISA_use. */
25803 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
25805 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
25807 /* Tag_THUMB_ISA_use. */
25808 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
25813 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25814 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
25816 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
25820 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
25823 /* Tag_VFP_arch. */
25824 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
25825 aeabi_set_attribute_int (Tag_VFP_arch
,
25826 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25828 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
25829 aeabi_set_attribute_int (Tag_VFP_arch
,
25830 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25832 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
25835 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
25837 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
25839 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
25842 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
25843 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
25844 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
25845 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
25846 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
25848 /* Tag_ABI_HardFP_use. */
25849 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
25850 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
25851 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
25853 /* Tag_WMMX_arch. */
25854 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
25855 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
25856 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
25857 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
25859 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25860 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
25861 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
25862 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
25864 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
25866 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
25870 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
25875 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25876 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
25877 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
25881 We set Tag_DIV_use to two when integer divide instructions have been used
25882 in ARM state, or when Thumb integer divide instructions have been used,
25883 but we have no architecture profile set, nor have we any ARM instructions.
25885 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
25886 by the base architecture.
25888 For new architectures we will have to check these tests. */
25889 gas_assert (arch
<= TAG_CPU_ARCH_V8
25890 || (arch
>= TAG_CPU_ARCH_V8M_BASE
25891 && arch
<= TAG_CPU_ARCH_V8M_MAIN
));
25892 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25893 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
25894 aeabi_set_attribute_int (Tag_DIV_use
, 0);
25895 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
25896 || (profile
== '\0'
25897 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
25898 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
25899 aeabi_set_attribute_int (Tag_DIV_use
, 2);
25901 /* Tag_MP_extension_use. */
25902 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
25903 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
25905 /* Tag Virtualization_use. */
25906 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
25908 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
25911 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
25914 /* Add the default contents for the .ARM.attributes section. */
25918 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
25921 aeabi_set_public_attributes ();
25923 #endif /* OBJ_ELF */
25926 /* Parse a .cpu directive. */
25929 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
25931 const struct arm_cpu_option_table
*opt
;
25935 name
= input_line_pointer
;
25936 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25937 input_line_pointer
++;
25938 saved_char
= *input_line_pointer
;
25939 *input_line_pointer
= 0;
25941 /* Skip the first "all" entry. */
25942 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
25943 if (streq (opt
->name
, name
))
25945 mcpu_cpu_opt
= &opt
->value
;
25946 selected_cpu
= opt
->value
;
25947 if (opt
->canonical_name
)
25948 strcpy (selected_cpu_name
, opt
->canonical_name
);
25952 for (i
= 0; opt
->name
[i
]; i
++)
25953 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25955 selected_cpu_name
[i
] = 0;
25957 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25958 *input_line_pointer
= saved_char
;
25959 demand_empty_rest_of_line ();
25962 as_bad (_("unknown cpu `%s'"), name
);
25963 *input_line_pointer
= saved_char
;
25964 ignore_rest_of_line ();
25968 /* Parse a .arch directive. */
25971 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
25973 const struct arm_arch_option_table
*opt
;
25977 name
= input_line_pointer
;
25978 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25979 input_line_pointer
++;
25980 saved_char
= *input_line_pointer
;
25981 *input_line_pointer
= 0;
25983 /* Skip the first "all" entry. */
25984 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
25985 if (streq (opt
->name
, name
))
25987 mcpu_cpu_opt
= &opt
->value
;
25988 selected_cpu
= opt
->value
;
25989 strcpy (selected_cpu_name
, opt
->name
);
25990 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25991 *input_line_pointer
= saved_char
;
25992 demand_empty_rest_of_line ();
25996 as_bad (_("unknown architecture `%s'\n"), name
);
25997 *input_line_pointer
= saved_char
;
25998 ignore_rest_of_line ();
26002 /* Parse a .object_arch directive. */
26005 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
26007 const struct arm_arch_option_table
*opt
;
26011 name
= input_line_pointer
;
26012 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26013 input_line_pointer
++;
26014 saved_char
= *input_line_pointer
;
26015 *input_line_pointer
= 0;
26017 /* Skip the first "all" entry. */
26018 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26019 if (streq (opt
->name
, name
))
26021 object_arch
= &opt
->value
;
26022 *input_line_pointer
= saved_char
;
26023 demand_empty_rest_of_line ();
26027 as_bad (_("unknown architecture `%s'\n"), name
);
26028 *input_line_pointer
= saved_char
;
26029 ignore_rest_of_line ();
26032 /* Parse a .arch_extension directive. */
26035 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
26037 const struct arm_option_extension_value_table
*opt
;
26040 int adding_value
= 1;
26042 name
= input_line_pointer
;
26043 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26044 input_line_pointer
++;
26045 saved_char
= *input_line_pointer
;
26046 *input_line_pointer
= 0;
26048 if (strlen (name
) >= 2
26049 && strncmp (name
, "no", 2) == 0)
26055 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26056 if (streq (opt
->name
, name
))
26058 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt
, opt
->allowed_archs
))
26060 as_bad (_("architectural extension `%s' is not allowed for the "
26061 "current base architecture"), name
);
26066 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
,
26069 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->clear_value
);
26071 mcpu_cpu_opt
= &selected_cpu
;
26072 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26073 *input_line_pointer
= saved_char
;
26074 demand_empty_rest_of_line ();
26078 if (opt
->name
== NULL
)
26079 as_bad (_("unknown architecture extension `%s'\n"), name
);
26081 *input_line_pointer
= saved_char
;
26082 ignore_rest_of_line ();
26085 /* Parse a .fpu directive. */
26088 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
26090 const struct arm_option_fpu_value_table
*opt
;
26094 name
= input_line_pointer
;
26095 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26096 input_line_pointer
++;
26097 saved_char
= *input_line_pointer
;
26098 *input_line_pointer
= 0;
26100 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26101 if (streq (opt
->name
, name
))
26103 mfpu_opt
= &opt
->value
;
26104 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26105 *input_line_pointer
= saved_char
;
26106 demand_empty_rest_of_line ();
26110 as_bad (_("unknown floating point format `%s'\n"), name
);
26111 *input_line_pointer
= saved_char
;
26112 ignore_rest_of_line ();
26115 /* Copy symbol information. */
26118 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
26120 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
26124 /* Given a symbolic attribute NAME, return the proper integer value.
26125 Returns -1 if the attribute is not known. */
26128 arm_convert_symbolic_attribute (const char *name
)
26130 static const struct
26135 attribute_table
[] =
26137 /* When you modify this table you should
26138 also modify the list in doc/c-arm.texi. */
26139 #define T(tag) {#tag, tag}
26140 T (Tag_CPU_raw_name
),
26143 T (Tag_CPU_arch_profile
),
26144 T (Tag_ARM_ISA_use
),
26145 T (Tag_THUMB_ISA_use
),
26149 T (Tag_Advanced_SIMD_arch
),
26150 T (Tag_PCS_config
),
26151 T (Tag_ABI_PCS_R9_use
),
26152 T (Tag_ABI_PCS_RW_data
),
26153 T (Tag_ABI_PCS_RO_data
),
26154 T (Tag_ABI_PCS_GOT_use
),
26155 T (Tag_ABI_PCS_wchar_t
),
26156 T (Tag_ABI_FP_rounding
),
26157 T (Tag_ABI_FP_denormal
),
26158 T (Tag_ABI_FP_exceptions
),
26159 T (Tag_ABI_FP_user_exceptions
),
26160 T (Tag_ABI_FP_number_model
),
26161 T (Tag_ABI_align_needed
),
26162 T (Tag_ABI_align8_needed
),
26163 T (Tag_ABI_align_preserved
),
26164 T (Tag_ABI_align8_preserved
),
26165 T (Tag_ABI_enum_size
),
26166 T (Tag_ABI_HardFP_use
),
26167 T (Tag_ABI_VFP_args
),
26168 T (Tag_ABI_WMMX_args
),
26169 T (Tag_ABI_optimization_goals
),
26170 T (Tag_ABI_FP_optimization_goals
),
26171 T (Tag_compatibility
),
26172 T (Tag_CPU_unaligned_access
),
26173 T (Tag_FP_HP_extension
),
26174 T (Tag_VFP_HP_extension
),
26175 T (Tag_ABI_FP_16bit_format
),
26176 T (Tag_MPextension_use
),
26178 T (Tag_nodefaults
),
26179 T (Tag_also_compatible_with
),
26180 T (Tag_conformance
),
26182 T (Tag_Virtualization_use
),
26183 /* We deliberately do not include Tag_MPextension_use_legacy. */
26191 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
26192 if (streq (name
, attribute_table
[i
].name
))
26193 return attribute_table
[i
].tag
;
26199 /* Apply sym value for relocations only in the case that they are for
26200 local symbols in the same segment as the fixup and you have the
26201 respective architectural feature for blx and simple switches. */
26203 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
26206 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
26207 /* PR 17444: If the local symbol is in a different section then a reloc
26208 will always be generated for it, so applying the symbol value now
26209 will result in a double offset being stored in the relocation. */
26210 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
26211 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
26213 switch (fixP
->fx_r_type
)
26215 case BFD_RELOC_ARM_PCREL_BLX
:
26216 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26217 if (ARM_IS_FUNC (fixP
->fx_addsy
))
26221 case BFD_RELOC_ARM_PCREL_CALL
:
26222 case BFD_RELOC_THUMB_PCREL_BLX
:
26223 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
26234 #endif /* OBJ_ELF */