1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2017 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static arm_feature_set
*dyn_mcpu_ext_opt
= NULL
;
151 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
152 static const arm_feature_set
*march_cpu_opt
= NULL
;
153 static arm_feature_set
*dyn_march_ext_opt
= NULL
;
154 static const arm_feature_set
*march_fpu_opt
= NULL
;
155 static const arm_feature_set
*mfpu_opt
= NULL
;
156 static const arm_feature_set
*object_arch
= NULL
;
158 /* Constants for known architecture features. */
159 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
160 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
161 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
162 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
163 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
164 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
165 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
167 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
169 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
172 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
175 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
176 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
177 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
178 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
179 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
180 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
181 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
182 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
183 static const arm_feature_set arm_ext_v4t_5
=
184 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
185 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
186 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
187 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
188 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
189 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
190 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
191 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
192 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
193 static const arm_feature_set arm_ext_v6_notm
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
195 static const arm_feature_set arm_ext_v6_dsp
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
197 static const arm_feature_set arm_ext_barrier
=
198 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
199 static const arm_feature_set arm_ext_msr
=
200 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
201 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
202 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
203 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
204 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
206 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
208 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
209 static const arm_feature_set arm_ext_m
=
210 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
,
211 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
212 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
213 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
214 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
215 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
216 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
217 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
218 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
219 static const arm_feature_set arm_ext_v8m_main
=
220 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
221 /* Instructions in ARMv8-M only found in M profile architectures. */
222 static const arm_feature_set arm_ext_v8m_m_only
=
223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
224 static const arm_feature_set arm_ext_v6t2_v8m
=
225 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
226 /* Instructions shared between ARMv8-A and ARMv8-M. */
227 static const arm_feature_set arm_ext_atomics
=
228 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
230 /* DSP instructions Tag_DSP_extension refers to. */
231 static const arm_feature_set arm_ext_dsp
=
232 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
234 static const arm_feature_set arm_ext_ras
=
235 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
236 /* FP16 instructions. */
237 static const arm_feature_set arm_ext_fp16
=
238 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
239 static const arm_feature_set arm_ext_v8_3
=
240 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
242 static const arm_feature_set arm_arch_any
= ARM_ANY
;
243 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
244 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
245 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
247 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
250 static const arm_feature_set arm_cext_iwmmxt2
=
251 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
252 static const arm_feature_set arm_cext_iwmmxt
=
253 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
254 static const arm_feature_set arm_cext_xscale
=
255 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
256 static const arm_feature_set arm_cext_maverick
=
257 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
258 static const arm_feature_set fpu_fpa_ext_v1
=
259 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
260 static const arm_feature_set fpu_fpa_ext_v2
=
261 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
262 static const arm_feature_set fpu_vfp_ext_v1xd
=
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
264 static const arm_feature_set fpu_vfp_ext_v1
=
265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
266 static const arm_feature_set fpu_vfp_ext_v2
=
267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
268 static const arm_feature_set fpu_vfp_ext_v3xd
=
269 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
270 static const arm_feature_set fpu_vfp_ext_v3
=
271 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
272 static const arm_feature_set fpu_vfp_ext_d32
=
273 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
274 static const arm_feature_set fpu_neon_ext_v1
=
275 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
276 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
277 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
279 static const arm_feature_set fpu_vfp_fp16
=
280 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
281 static const arm_feature_set fpu_neon_ext_fma
=
282 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
284 static const arm_feature_set fpu_vfp_ext_fma
=
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
286 static const arm_feature_set fpu_vfp_ext_armv8
=
287 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
288 static const arm_feature_set fpu_vfp_ext_armv8xd
=
289 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
290 static const arm_feature_set fpu_neon_ext_armv8
=
291 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
292 static const arm_feature_set fpu_crypto_ext_armv8
=
293 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
294 static const arm_feature_set crc_ext_armv8
=
295 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
296 static const arm_feature_set fpu_neon_ext_v8_1
=
297 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
299 static int mfloat_abi_opt
= -1;
300 /* Record user cpu selection for object attributes. */
301 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
302 /* Must be long enough to hold any of the names in arm_cpus. */
303 static char selected_cpu_name
[20];
305 extern FLONUM_TYPE generic_floating_point_number
;
307 /* Return if no cpu was selected on command-line. */
309 no_cpu_selected (void)
311 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
316 static int meabi_flags
= EABI_DEFAULT
;
318 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
321 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
326 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
331 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
332 symbolS
* GOT_symbol
;
335 /* 0: assemble for ARM,
336 1: assemble for Thumb,
337 2: assemble for Thumb even though target CPU does not support thumb
339 static int thumb_mode
= 0;
340 /* A value distinct from the possible values for thumb_mode that we
341 can use to record whether thumb_mode has been copied into the
342 tc_frag_data field of a frag. */
343 #define MODE_RECORDED (1 << 4)
345 /* Specifies the intrinsic IT insn behavior mode. */
346 enum implicit_it_mode
348 IMPLICIT_IT_MODE_NEVER
= 0x00,
349 IMPLICIT_IT_MODE_ARM
= 0x01,
350 IMPLICIT_IT_MODE_THUMB
= 0x02,
351 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
353 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
355 /* If unified_syntax is true, we are processing the new unified
356 ARM/Thumb syntax. Important differences from the old ARM mode:
358 - Immediate operands do not require a # prefix.
359 - Conditional affixes always appear at the end of the
360 instruction. (For backward compatibility, those instructions
361 that formerly had them in the middle, continue to accept them
363 - The IT instruction may appear, and if it does is validated
364 against subsequent conditional affixes. It does not generate
367 Important differences from the old Thumb mode:
369 - Immediate operands do not require a # prefix.
370 - Most of the V6T2 instructions are only available in unified mode.
371 - The .N and .W suffixes are recognized and honored (it is an error
372 if they cannot be honored).
373 - All instructions set the flags if and only if they have an 's' affix.
374 - Conditional affixes may be used. They are validated against
375 preceding IT instructions. Unlike ARM mode, you cannot use a
376 conditional affix except in the scope of an IT instruction. */
378 static bfd_boolean unified_syntax
= FALSE
;
380 /* An immediate operand can start with #, and ld*, st*, pld operands
381 can contain [ and ]. We need to tell APP not to elide whitespace
382 before a [, which can appear as the first operand for pld.
383 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
384 const char arm_symbol_chars
[] = "#[]{}";
399 enum neon_el_type type
;
403 #define NEON_MAX_TYPE_ELS 4
407 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
411 enum it_instruction_type
416 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
417 if inside, should be the last one. */
418 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
419 i.e. BKPT and NOP. */
420 IT_INSN
/* The IT insn has been parsed. */
423 /* The maximum number of operands we need. */
424 #define ARM_IT_MAX_OPERANDS 6
429 unsigned long instruction
;
433 /* "uncond_value" is set to the value in place of the conditional field in
434 unconditional versions of the instruction, or -1 if nothing is
437 struct neon_type vectype
;
438 /* This does not indicate an actual NEON instruction, only that
439 the mnemonic accepts neon-style type suffixes. */
441 /* Set to the opcode if the instruction needs relaxation.
442 Zero if the instruction is not relaxed. */
446 bfd_reloc_code_real_type type
;
451 enum it_instruction_type it_insn_type
;
457 struct neon_type_el vectype
;
458 unsigned present
: 1; /* Operand present. */
459 unsigned isreg
: 1; /* Operand was a register. */
460 unsigned immisreg
: 1; /* .imm field is a second register. */
461 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
462 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
463 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
464 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
465 instructions. This allows us to disambiguate ARM <-> vector insns. */
466 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
467 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
468 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
469 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
470 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
471 unsigned writeback
: 1; /* Operand has trailing ! */
472 unsigned preind
: 1; /* Preindexed address. */
473 unsigned postind
: 1; /* Postindexed address. */
474 unsigned negative
: 1; /* Index register was negated. */
475 unsigned shifted
: 1; /* Shift applied to operation. */
476 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
477 } operands
[ARM_IT_MAX_OPERANDS
];
480 static struct arm_it inst
;
482 #define NUM_FLOAT_VALS 8
484 const char * fp_const
[] =
486 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
489 /* Number of littlenums required to hold an extended precision number. */
490 #define MAX_LITTLENUMS 6
492 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
502 #define CP_T_X 0x00008000
503 #define CP_T_Y 0x00400000
505 #define CONDS_BIT 0x00100000
506 #define LOAD_BIT 0x00100000
508 #define DOUBLE_LOAD_FLAG 0x00000001
512 const char * template_name
;
516 #define COND_ALWAYS 0xE
520 const char * template_name
;
524 struct asm_barrier_opt
526 const char * template_name
;
528 const arm_feature_set arch
;
531 /* The bit that distinguishes CPSR and SPSR. */
532 #define SPSR_BIT (1 << 22)
534 /* The individual PSR flag bits. */
535 #define PSR_c (1 << 16)
536 #define PSR_x (1 << 17)
537 #define PSR_s (1 << 18)
538 #define PSR_f (1 << 19)
543 bfd_reloc_code_real_type reloc
;
548 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
549 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
554 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
557 /* Bits for DEFINED field in neon_typed_alias. */
558 #define NTA_HASTYPE 1
559 #define NTA_HASINDEX 2
561 struct neon_typed_alias
563 unsigned char defined
;
565 struct neon_type_el eltype
;
568 /* ARM register categories. This includes coprocessor numbers and various
569 architecture extensions' registers. */
596 /* Structure for a hash table entry for a register.
597 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
598 information which states whether a vector type or index is specified (for a
599 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
605 unsigned char builtin
;
606 struct neon_typed_alias
* neon
;
609 /* Diagnostics used when we don't get a register of the expected type. */
610 const char * const reg_expected_msgs
[] =
612 N_("ARM register expected"),
613 N_("bad or missing co-processor number"),
614 N_("co-processor register expected"),
615 N_("FPA register expected"),
616 N_("VFP single precision register expected"),
617 N_("VFP/Neon double precision register expected"),
618 N_("Neon quad precision register expected"),
619 N_("VFP single or double precision register expected"),
620 N_("Neon double or quad precision register expected"),
621 N_("VFP single, double or Neon quad precision register expected"),
622 N_("VFP system register expected"),
623 N_("Maverick MVF register expected"),
624 N_("Maverick MVD register expected"),
625 N_("Maverick MVFX register expected"),
626 N_("Maverick MVDX register expected"),
627 N_("Maverick MVAX register expected"),
628 N_("Maverick DSPSC register expected"),
629 N_("iWMMXt data register expected"),
630 N_("iWMMXt control register expected"),
631 N_("iWMMXt scalar register expected"),
632 N_("XScale accumulator register expected"),
635 /* Some well known registers that we refer to directly elsewhere. */
641 /* ARM instructions take 4bytes in the object file, Thumb instructions
647 /* Basic string to match. */
648 const char * template_name
;
650 /* Parameters to instruction. */
651 unsigned int operands
[8];
653 /* Conditional tag - see opcode_lookup. */
654 unsigned int tag
: 4;
656 /* Basic instruction code. */
657 unsigned int avalue
: 28;
659 /* Thumb-format instruction code. */
662 /* Which architecture variant provides this instruction. */
663 const arm_feature_set
* avariant
;
664 const arm_feature_set
* tvariant
;
666 /* Function to call to encode instruction in ARM format. */
667 void (* aencode
) (void);
669 /* Function to call to encode instruction in Thumb format. */
670 void (* tencode
) (void);
673 /* Defines for various bits that we will want to toggle. */
674 #define INST_IMMEDIATE 0x02000000
675 #define OFFSET_REG 0x02000000
676 #define HWOFFSET_IMM 0x00400000
677 #define SHIFT_BY_REG 0x00000010
678 #define PRE_INDEX 0x01000000
679 #define INDEX_UP 0x00800000
680 #define WRITE_BACK 0x00200000
681 #define LDM_TYPE_2_OR_3 0x00400000
682 #define CPSI_MMOD 0x00020000
684 #define LITERAL_MASK 0xf000f000
685 #define OPCODE_MASK 0xfe1fffff
686 #define V4_STR_BIT 0x00000020
687 #define VLDR_VMOV_SAME 0x0040f000
689 #define T2_SUBS_PC_LR 0xf3de8f00
691 #define DATA_OP_SHIFT 21
692 #define SBIT_SHIFT 20
694 #define T2_OPCODE_MASK 0xfe1fffff
695 #define T2_DATA_OP_SHIFT 21
696 #define T2_SBIT_SHIFT 20
698 #define A_COND_MASK 0xf0000000
699 #define A_PUSH_POP_OP_MASK 0x0fff0000
701 /* Opcodes for pushing/poping registers to/from the stack. */
702 #define A1_OPCODE_PUSH 0x092d0000
703 #define A2_OPCODE_PUSH 0x052d0004
704 #define A2_OPCODE_POP 0x049d0004
706 /* Codes to distinguish the arithmetic instructions. */
717 #define OPCODE_CMP 10
718 #define OPCODE_CMN 11
719 #define OPCODE_ORR 12
720 #define OPCODE_MOV 13
721 #define OPCODE_BIC 14
722 #define OPCODE_MVN 15
724 #define T2_OPCODE_AND 0
725 #define T2_OPCODE_BIC 1
726 #define T2_OPCODE_ORR 2
727 #define T2_OPCODE_ORN 3
728 #define T2_OPCODE_EOR 4
729 #define T2_OPCODE_ADD 8
730 #define T2_OPCODE_ADC 10
731 #define T2_OPCODE_SBC 11
732 #define T2_OPCODE_SUB 13
733 #define T2_OPCODE_RSB 14
735 #define T_OPCODE_MUL 0x4340
736 #define T_OPCODE_TST 0x4200
737 #define T_OPCODE_CMN 0x42c0
738 #define T_OPCODE_NEG 0x4240
739 #define T_OPCODE_MVN 0x43c0
741 #define T_OPCODE_ADD_R3 0x1800
742 #define T_OPCODE_SUB_R3 0x1a00
743 #define T_OPCODE_ADD_HI 0x4400
744 #define T_OPCODE_ADD_ST 0xb000
745 #define T_OPCODE_SUB_ST 0xb080
746 #define T_OPCODE_ADD_SP 0xa800
747 #define T_OPCODE_ADD_PC 0xa000
748 #define T_OPCODE_ADD_I8 0x3000
749 #define T_OPCODE_SUB_I8 0x3800
750 #define T_OPCODE_ADD_I3 0x1c00
751 #define T_OPCODE_SUB_I3 0x1e00
753 #define T_OPCODE_ASR_R 0x4100
754 #define T_OPCODE_LSL_R 0x4080
755 #define T_OPCODE_LSR_R 0x40c0
756 #define T_OPCODE_ROR_R 0x41c0
757 #define T_OPCODE_ASR_I 0x1000
758 #define T_OPCODE_LSL_I 0x0000
759 #define T_OPCODE_LSR_I 0x0800
761 #define T_OPCODE_MOV_I8 0x2000
762 #define T_OPCODE_CMP_I8 0x2800
763 #define T_OPCODE_CMP_LR 0x4280
764 #define T_OPCODE_MOV_HR 0x4600
765 #define T_OPCODE_CMP_HR 0x4500
767 #define T_OPCODE_LDR_PC 0x4800
768 #define T_OPCODE_LDR_SP 0x9800
769 #define T_OPCODE_STR_SP 0x9000
770 #define T_OPCODE_LDR_IW 0x6800
771 #define T_OPCODE_STR_IW 0x6000
772 #define T_OPCODE_LDR_IH 0x8800
773 #define T_OPCODE_STR_IH 0x8000
774 #define T_OPCODE_LDR_IB 0x7800
775 #define T_OPCODE_STR_IB 0x7000
776 #define T_OPCODE_LDR_RW 0x5800
777 #define T_OPCODE_STR_RW 0x5000
778 #define T_OPCODE_LDR_RH 0x5a00
779 #define T_OPCODE_STR_RH 0x5200
780 #define T_OPCODE_LDR_RB 0x5c00
781 #define T_OPCODE_STR_RB 0x5400
783 #define T_OPCODE_PUSH 0xb400
784 #define T_OPCODE_POP 0xbc00
786 #define T_OPCODE_BRANCH 0xe000
788 #define THUMB_SIZE 2 /* Size of thumb instruction. */
789 #define THUMB_PP_PC_LR 0x0100
790 #define THUMB_LOAD_BIT 0x0800
791 #define THUMB2_LOAD_BIT 0x00100000
793 #define BAD_ARGS _("bad arguments to instruction")
794 #define BAD_SP _("r13 not allowed here")
795 #define BAD_PC _("r15 not allowed here")
796 #define BAD_COND _("instruction cannot be conditional")
797 #define BAD_OVERLAP _("registers may not be the same")
798 #define BAD_HIREG _("lo register required")
799 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
800 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
801 #define BAD_BRANCH _("branch must be last instruction in IT block")
802 #define BAD_NOT_IT _("instruction not allowed in IT block")
803 #define BAD_FPU _("selected FPU does not support instruction")
804 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
805 #define BAD_IT_COND _("incorrect condition in IT block")
806 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
807 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
808 #define BAD_PC_ADDRESSING \
809 _("cannot use register index with PC-relative addressing")
810 #define BAD_PC_WRITEBACK \
811 _("cannot use writeback with PC-relative addressing")
812 #define BAD_RANGE _("branch out of range")
813 #define BAD_FP16 _("selected processor does not support fp16 instruction")
814 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
815 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
817 static struct hash_control
* arm_ops_hsh
;
818 static struct hash_control
* arm_cond_hsh
;
819 static struct hash_control
* arm_shift_hsh
;
820 static struct hash_control
* arm_psr_hsh
;
821 static struct hash_control
* arm_v7m_psr_hsh
;
822 static struct hash_control
* arm_reg_hsh
;
823 static struct hash_control
* arm_reloc_hsh
;
824 static struct hash_control
* arm_barrier_opt_hsh
;
826 /* Stuff needed to resolve the label ambiguity
835 symbolS
* last_label_seen
;
836 static int label_is_thumb_function_name
= FALSE
;
838 /* Literal pool structure. Held on a per-section
839 and per-sub-section basis. */
841 #define MAX_LITERAL_POOL_SIZE 1024
842 typedef struct literal_pool
844 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
845 unsigned int next_free_entry
;
851 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
853 struct literal_pool
* next
;
854 unsigned int alignment
;
857 /* Pointer to a linked list of literal pools. */
858 literal_pool
* list_of_pools
= NULL
;
860 typedef enum asmfunc_states
863 WAITING_ASMFUNC_NAME
,
867 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
870 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
872 static struct current_it now_it
;
876 now_it_compatible (int cond
)
878 return (cond
& ~1) == (now_it
.cc
& ~1);
882 conditional_insn (void)
884 return inst
.cond
!= COND_ALWAYS
;
887 static int in_it_block (void);
889 static int handle_it_state (void);
891 static void force_automatic_it_block_close (void);
893 static void it_fsm_post_encode (void);
895 #define set_it_insn_type(type) \
898 inst.it_insn_type = type; \
899 if (handle_it_state () == FAIL) \
904 #define set_it_insn_type_nonvoid(type, failret) \
907 inst.it_insn_type = type; \
908 if (handle_it_state () == FAIL) \
913 #define set_it_insn_type_last() \
916 if (inst.cond == COND_ALWAYS) \
917 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
919 set_it_insn_type (INSIDE_IT_LAST_INSN); \
925 /* This array holds the chars that always start a comment. If the
926 pre-processor is disabled, these aren't very useful. */
927 char arm_comment_chars
[] = "@";
929 /* This array holds the chars that only start a comment at the beginning of
930 a line. If the line seems to have the form '# 123 filename'
931 .line and .file directives will appear in the pre-processed output. */
932 /* Note that input_file.c hand checks for '#' at the beginning of the
933 first line of the input file. This is because the compiler outputs
934 #NO_APP at the beginning of its output. */
935 /* Also note that comments like this one will always work. */
936 const char line_comment_chars
[] = "#";
938 char arm_line_separator_chars
[] = ";";
940 /* Chars that can be used to separate mant
941 from exp in floating point numbers. */
942 const char EXP_CHARS
[] = "eE";
944 /* Chars that mean this number is a floating point constant. */
948 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
950 /* Prefix characters that indicate the start of an immediate
952 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
954 /* Separator character handling. */
956 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
959 skip_past_char (char ** str
, char c
)
961 /* PR gas/14987: Allow for whitespace before the expected character. */
962 skip_whitespace (*str
);
973 #define skip_past_comma(str) skip_past_char (str, ',')
975 /* Arithmetic expressions (possibly involving symbols). */
977 /* Return TRUE if anything in the expression is a bignum. */
980 walk_no_bignums (symbolS
* sp
)
982 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
985 if (symbol_get_value_expression (sp
)->X_add_symbol
)
987 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
988 || (symbol_get_value_expression (sp
)->X_op_symbol
989 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
995 static int in_my_get_expression
= 0;
997 /* Third argument to my_get_expression. */
998 #define GE_NO_PREFIX 0
999 #define GE_IMM_PREFIX 1
1000 #define GE_OPT_PREFIX 2
1001 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1002 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1003 #define GE_OPT_PREFIX_BIG 3
1006 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1011 /* In unified syntax, all prefixes are optional. */
1013 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1016 switch (prefix_mode
)
1018 case GE_NO_PREFIX
: break;
1020 if (!is_immediate_prefix (**str
))
1022 inst
.error
= _("immediate expression requires a # prefix");
1028 case GE_OPT_PREFIX_BIG
:
1029 if (is_immediate_prefix (**str
))
1035 memset (ep
, 0, sizeof (expressionS
));
1037 save_in
= input_line_pointer
;
1038 input_line_pointer
= *str
;
1039 in_my_get_expression
= 1;
1040 seg
= expression (ep
);
1041 in_my_get_expression
= 0;
1043 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1045 /* We found a bad or missing expression in md_operand(). */
1046 *str
= input_line_pointer
;
1047 input_line_pointer
= save_in
;
1048 if (inst
.error
== NULL
)
1049 inst
.error
= (ep
->X_op
== O_absent
1050 ? _("missing expression") :_("bad expression"));
1055 if (seg
!= absolute_section
1056 && seg
!= text_section
1057 && seg
!= data_section
1058 && seg
!= bss_section
1059 && seg
!= undefined_section
)
1061 inst
.error
= _("bad segment");
1062 *str
= input_line_pointer
;
1063 input_line_pointer
= save_in
;
1070 /* Get rid of any bignums now, so that we don't generate an error for which
1071 we can't establish a line number later on. Big numbers are never valid
1072 in instructions, which is where this routine is always called. */
1073 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1074 && (ep
->X_op
== O_big
1075 || (ep
->X_add_symbol
1076 && (walk_no_bignums (ep
->X_add_symbol
)
1078 && walk_no_bignums (ep
->X_op_symbol
))))))
1080 inst
.error
= _("invalid constant");
1081 *str
= input_line_pointer
;
1082 input_line_pointer
= save_in
;
1086 *str
= input_line_pointer
;
1087 input_line_pointer
= save_in
;
1091 /* Turn a string in input_line_pointer into a floating point constant
1092 of type TYPE, and store the appropriate bytes in *LITP. The number
1093 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1094 returned, or NULL on OK.
1096 Note that fp constants aren't represent in the normal way on the ARM.
1097 In big endian mode, things are as expected. However, in little endian
1098 mode fp constants are big-endian word-wise, and little-endian byte-wise
1099 within the words. For example, (double) 1.1 in big endian mode is
1100 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1101 the byte sequence 99 99 f1 3f 9a 99 99 99.
1103 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1106 md_atof (int type
, char * litP
, int * sizeP
)
1109 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1141 return _("Unrecognized or unsupported floating point constant");
1144 t
= atof_ieee (input_line_pointer
, type
, words
);
1146 input_line_pointer
= t
;
1147 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1149 if (target_big_endian
)
1151 for (i
= 0; i
< prec
; i
++)
1153 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1154 litP
+= sizeof (LITTLENUM_TYPE
);
1159 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1160 for (i
= prec
- 1; i
>= 0; i
--)
1162 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1163 litP
+= sizeof (LITTLENUM_TYPE
);
1166 /* For a 4 byte float the order of elements in `words' is 1 0.
1167 For an 8 byte float the order is 1 0 3 2. */
1168 for (i
= 0; i
< prec
; i
+= 2)
1170 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1171 sizeof (LITTLENUM_TYPE
));
1172 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1173 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1174 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1181 /* We handle all bad expressions here, so that we can report the faulty
1182 instruction in the error message. */
1184 md_operand (expressionS
* exp
)
1186 if (in_my_get_expression
)
1187 exp
->X_op
= O_illegal
;
1190 /* Immediate values. */
1192 /* Generic immediate-value read function for use in directives.
1193 Accepts anything that 'expression' can fold to a constant.
1194 *val receives the number. */
1197 immediate_for_directive (int *val
)
1200 exp
.X_op
= O_illegal
;
1202 if (is_immediate_prefix (*input_line_pointer
))
1204 input_line_pointer
++;
1208 if (exp
.X_op
!= O_constant
)
1210 as_bad (_("expected #constant"));
1211 ignore_rest_of_line ();
1214 *val
= exp
.X_add_number
;
1219 /* Register parsing. */
1221 /* Generic register parser. CCP points to what should be the
1222 beginning of a register name. If it is indeed a valid register
1223 name, advance CCP over it and return the reg_entry structure;
1224 otherwise return NULL. Does not issue diagnostics. */
1226 static struct reg_entry
*
1227 arm_reg_parse_multi (char **ccp
)
1231 struct reg_entry
*reg
;
1233 skip_whitespace (start
);
1235 #ifdef REGISTER_PREFIX
1236 if (*start
!= REGISTER_PREFIX
)
1240 #ifdef OPTIONAL_REGISTER_PREFIX
1241 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1246 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1251 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1253 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1263 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1264 enum arm_reg_type type
)
1266 /* Alternative syntaxes are accepted for a few register classes. */
1273 /* Generic coprocessor register names are allowed for these. */
1274 if (reg
&& reg
->type
== REG_TYPE_CN
)
1279 /* For backward compatibility, a bare number is valid here. */
1281 unsigned long processor
= strtoul (start
, ccp
, 10);
1282 if (*ccp
!= start
&& processor
<= 15)
1287 case REG_TYPE_MMXWC
:
1288 /* WC includes WCG. ??? I'm not sure this is true for all
1289 instructions that take WC registers. */
1290 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1301 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1302 return value is the register number or FAIL. */
1305 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1308 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1311 /* Do not allow a scalar (reg+index) to parse as a register. */
1312 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1315 if (reg
&& reg
->type
== type
)
1318 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1325 /* Parse a Neon type specifier. *STR should point at the leading '.'
1326 character. Does no verification at this stage that the type fits the opcode
1333 Can all be legally parsed by this function.
1335 Fills in neon_type struct pointer with parsed information, and updates STR
1336 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1337 type, FAIL if not. */
1340 parse_neon_type (struct neon_type
*type
, char **str
)
1347 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1349 enum neon_el_type thistype
= NT_untyped
;
1350 unsigned thissize
= -1u;
1357 /* Just a size without an explicit type. */
1361 switch (TOLOWER (*ptr
))
1363 case 'i': thistype
= NT_integer
; break;
1364 case 'f': thistype
= NT_float
; break;
1365 case 'p': thistype
= NT_poly
; break;
1366 case 's': thistype
= NT_signed
; break;
1367 case 'u': thistype
= NT_unsigned
; break;
1369 thistype
= NT_float
;
1374 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1380 /* .f is an abbreviation for .f32. */
1381 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1386 thissize
= strtoul (ptr
, &ptr
, 10);
1388 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1391 as_bad (_("bad size %d in type specifier"), thissize
);
1399 type
->el
[type
->elems
].type
= thistype
;
1400 type
->el
[type
->elems
].size
= thissize
;
1405 /* Empty/missing type is not a successful parse. */
1406 if (type
->elems
== 0)
1414 /* Errors may be set multiple times during parsing or bit encoding
1415 (particularly in the Neon bits), but usually the earliest error which is set
1416 will be the most meaningful. Avoid overwriting it with later (cascading)
1417 errors by calling this function. */
1420 first_error (const char *err
)
1426 /* Parse a single type, e.g. ".s32", leading period included. */
1428 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1431 struct neon_type optype
;
1435 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1437 if (optype
.elems
== 1)
1438 *vectype
= optype
.el
[0];
1441 first_error (_("only one type should be specified for operand"));
1447 first_error (_("vector type expected"));
1459 /* Special meanings for indices (which have a range of 0-7), which will fit into
1462 #define NEON_ALL_LANES 15
1463 #define NEON_INTERLEAVE_LANES 14
1465 /* Parse either a register or a scalar, with an optional type. Return the
1466 register number, and optionally fill in the actual type of the register
1467 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1468 type/index information in *TYPEINFO. */
1471 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1472 enum arm_reg_type
*rtype
,
1473 struct neon_typed_alias
*typeinfo
)
1476 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1477 struct neon_typed_alias atype
;
1478 struct neon_type_el parsetype
;
1482 atype
.eltype
.type
= NT_invtype
;
1483 atype
.eltype
.size
= -1;
1485 /* Try alternate syntax for some types of register. Note these are mutually
1486 exclusive with the Neon syntax extensions. */
1489 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1497 /* Undo polymorphism when a set of register types may be accepted. */
1498 if ((type
== REG_TYPE_NDQ
1499 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1500 || (type
== REG_TYPE_VFSD
1501 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1502 || (type
== REG_TYPE_NSDQ
1503 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1504 || reg
->type
== REG_TYPE_NQ
))
1505 || (type
== REG_TYPE_MMXWC
1506 && (reg
->type
== REG_TYPE_MMXWCG
)))
1507 type
= (enum arm_reg_type
) reg
->type
;
1509 if (type
!= reg
->type
)
1515 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1517 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1519 first_error (_("can't redefine type for operand"));
1522 atype
.defined
|= NTA_HASTYPE
;
1523 atype
.eltype
= parsetype
;
1526 if (skip_past_char (&str
, '[') == SUCCESS
)
1528 if (type
!= REG_TYPE_VFD
)
1530 first_error (_("only D registers may be indexed"));
1534 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1536 first_error (_("can't change index for operand"));
1540 atype
.defined
|= NTA_HASINDEX
;
1542 if (skip_past_char (&str
, ']') == SUCCESS
)
1543 atype
.index
= NEON_ALL_LANES
;
1548 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1550 if (exp
.X_op
!= O_constant
)
1552 first_error (_("constant expression required"));
1556 if (skip_past_char (&str
, ']') == FAIL
)
1559 atype
.index
= exp
.X_add_number
;
1574 /* Like arm_reg_parse, but allow allow the following extra features:
1575 - If RTYPE is non-zero, return the (possibly restricted) type of the
1576 register (e.g. Neon double or quad reg when either has been requested).
1577 - If this is a Neon vector type with additional type information, fill
1578 in the struct pointed to by VECTYPE (if non-NULL).
1579 This function will fault on encountering a scalar. */
1582 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1583 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1585 struct neon_typed_alias atype
;
1587 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1592 /* Do not allow regname(... to parse as a register. */
1596 /* Do not allow a scalar (reg+index) to parse as a register. */
1597 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1599 first_error (_("register operand expected, but got scalar"));
1604 *vectype
= atype
.eltype
;
1611 #define NEON_SCALAR_REG(X) ((X) >> 4)
1612 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1614 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1615 have enough information to be able to do a good job bounds-checking. So, we
1616 just do easy checks here, and do further checks later. */
1619 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1623 struct neon_typed_alias atype
;
1625 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1627 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1630 if (atype
.index
== NEON_ALL_LANES
)
1632 first_error (_("scalar must have an index"));
1635 else if (atype
.index
>= 64 / elsize
)
1637 first_error (_("scalar index out of range"));
1642 *type
= atype
.eltype
;
1646 return reg
* 16 + atype
.index
;
1649 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1652 parse_reg_list (char ** strp
)
1654 char * str
= * strp
;
1658 /* We come back here if we get ranges concatenated by '+' or '|'. */
1661 skip_whitespace (str
);
1675 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1677 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1687 first_error (_("bad range in register list"));
1691 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1693 if (range
& (1 << i
))
1695 (_("Warning: duplicated register (r%d) in register list"),
1703 if (range
& (1 << reg
))
1704 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1706 else if (reg
<= cur_reg
)
1707 as_tsktsk (_("Warning: register range not in ascending order"));
1712 while (skip_past_comma (&str
) != FAIL
1713 || (in_range
= 1, *str
++ == '-'));
1716 if (skip_past_char (&str
, '}') == FAIL
)
1718 first_error (_("missing `}'"));
1726 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1729 if (exp
.X_op
== O_constant
)
1731 if (exp
.X_add_number
1732 != (exp
.X_add_number
& 0x0000ffff))
1734 inst
.error
= _("invalid register mask");
1738 if ((range
& exp
.X_add_number
) != 0)
1740 int regno
= range
& exp
.X_add_number
;
1743 regno
= (1 << regno
) - 1;
1745 (_("Warning: duplicated register (r%d) in register list"),
1749 range
|= exp
.X_add_number
;
1753 if (inst
.reloc
.type
!= 0)
1755 inst
.error
= _("expression too complex");
1759 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1760 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1761 inst
.reloc
.pc_rel
= 0;
1765 if (*str
== '|' || *str
== '+')
1771 while (another_range
);
1777 /* Types of registers in a list. */
1786 /* Parse a VFP register list. If the string is invalid return FAIL.
1787 Otherwise return the number of registers, and set PBASE to the first
1788 register. Parses registers of type ETYPE.
1789 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1790 - Q registers can be used to specify pairs of D registers
1791 - { } can be omitted from around a singleton register list
1792 FIXME: This is not implemented, as it would require backtracking in
1795 This could be done (the meaning isn't really ambiguous), but doesn't
1796 fit in well with the current parsing framework.
1797 - 32 D registers may be used (also true for VFPv3).
1798 FIXME: Types are ignored in these register lists, which is probably a
1802 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1807 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1811 unsigned long mask
= 0;
1814 if (skip_past_char (&str
, '{') == FAIL
)
1816 inst
.error
= _("expecting {");
1823 regtype
= REG_TYPE_VFS
;
1828 regtype
= REG_TYPE_VFD
;
1831 case REGLIST_NEON_D
:
1832 regtype
= REG_TYPE_NDQ
;
1836 if (etype
!= REGLIST_VFP_S
)
1838 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1839 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1843 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1846 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1853 base_reg
= max_regs
;
1857 int setmask
= 1, addregs
= 1;
1859 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1861 if (new_base
== FAIL
)
1863 first_error (_(reg_expected_msgs
[regtype
]));
1867 if (new_base
>= max_regs
)
1869 first_error (_("register out of range in list"));
1873 /* Note: a value of 2 * n is returned for the register Q<n>. */
1874 if (regtype
== REG_TYPE_NQ
)
1880 if (new_base
< base_reg
)
1881 base_reg
= new_base
;
1883 if (mask
& (setmask
<< new_base
))
1885 first_error (_("invalid register list"));
1889 if ((mask
>> new_base
) != 0 && ! warned
)
1891 as_tsktsk (_("register list not in ascending order"));
1895 mask
|= setmask
<< new_base
;
1898 if (*str
== '-') /* We have the start of a range expression */
1904 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1907 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1911 if (high_range
>= max_regs
)
1913 first_error (_("register out of range in list"));
1917 if (regtype
== REG_TYPE_NQ
)
1918 high_range
= high_range
+ 1;
1920 if (high_range
<= new_base
)
1922 inst
.error
= _("register range not in ascending order");
1926 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1928 if (mask
& (setmask
<< new_base
))
1930 inst
.error
= _("invalid register list");
1934 mask
|= setmask
<< new_base
;
1939 while (skip_past_comma (&str
) != FAIL
);
1943 /* Sanity check -- should have raised a parse error above. */
1944 if (count
== 0 || count
> max_regs
)
1949 /* Final test -- the registers must be consecutive. */
1951 for (i
= 0; i
< count
; i
++)
1953 if ((mask
& (1u << i
)) == 0)
1955 inst
.error
= _("non-contiguous register range");
1965 /* True if two alias types are the same. */
1968 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1976 if (a
->defined
!= b
->defined
)
1979 if ((a
->defined
& NTA_HASTYPE
) != 0
1980 && (a
->eltype
.type
!= b
->eltype
.type
1981 || a
->eltype
.size
!= b
->eltype
.size
))
1984 if ((a
->defined
& NTA_HASINDEX
) != 0
1985 && (a
->index
!= b
->index
))
1991 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1992 The base register is put in *PBASE.
1993 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1995 The register stride (minus one) is put in bit 4 of the return value.
1996 Bits [6:5] encode the list length (minus one).
1997 The type of the list elements is put in *ELTYPE, if non-NULL. */
1999 #define NEON_LANE(X) ((X) & 0xf)
2000 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2001 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2004 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2005 struct neon_type_el
*eltype
)
2012 int leading_brace
= 0;
2013 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2014 const char *const incr_error
= _("register stride must be 1 or 2");
2015 const char *const type_error
= _("mismatched element/structure types in list");
2016 struct neon_typed_alias firsttype
;
2017 firsttype
.defined
= 0;
2018 firsttype
.eltype
.type
= NT_invtype
;
2019 firsttype
.eltype
.size
= -1;
2020 firsttype
.index
= -1;
2022 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2027 struct neon_typed_alias atype
;
2028 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2032 first_error (_(reg_expected_msgs
[rtype
]));
2039 if (rtype
== REG_TYPE_NQ
)
2045 else if (reg_incr
== -1)
2047 reg_incr
= getreg
- base_reg
;
2048 if (reg_incr
< 1 || reg_incr
> 2)
2050 first_error (_(incr_error
));
2054 else if (getreg
!= base_reg
+ reg_incr
* count
)
2056 first_error (_(incr_error
));
2060 if (! neon_alias_types_same (&atype
, &firsttype
))
2062 first_error (_(type_error
));
2066 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2070 struct neon_typed_alias htype
;
2071 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2073 lane
= NEON_INTERLEAVE_LANES
;
2074 else if (lane
!= NEON_INTERLEAVE_LANES
)
2076 first_error (_(type_error
));
2081 else if (reg_incr
!= 1)
2083 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2087 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2090 first_error (_(reg_expected_msgs
[rtype
]));
2093 if (! neon_alias_types_same (&htype
, &firsttype
))
2095 first_error (_(type_error
));
2098 count
+= hireg
+ dregs
- getreg
;
2102 /* If we're using Q registers, we can't use [] or [n] syntax. */
2103 if (rtype
== REG_TYPE_NQ
)
2109 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2113 else if (lane
!= atype
.index
)
2115 first_error (_(type_error
));
2119 else if (lane
== -1)
2120 lane
= NEON_INTERLEAVE_LANES
;
2121 else if (lane
!= NEON_INTERLEAVE_LANES
)
2123 first_error (_(type_error
));
2128 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2130 /* No lane set by [x]. We must be interleaving structures. */
2132 lane
= NEON_INTERLEAVE_LANES
;
2135 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2136 || (count
> 1 && reg_incr
== -1))
2138 first_error (_("error parsing element/structure list"));
2142 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2144 first_error (_("expected }"));
2152 *eltype
= firsttype
.eltype
;
2157 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2160 /* Parse an explicit relocation suffix on an expression. This is
2161 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2162 arm_reloc_hsh contains no entries, so this function can only
2163 succeed if there is no () after the word. Returns -1 on error,
2164 BFD_RELOC_UNUSED if there wasn't any suffix. */
2167 parse_reloc (char **str
)
2169 struct reloc_entry
*r
;
2173 return BFD_RELOC_UNUSED
;
2178 while (*q
&& *q
!= ')' && *q
!= ',')
2183 if ((r
= (struct reloc_entry
*)
2184 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2191 /* Directives: register aliases. */
2193 static struct reg_entry
*
2194 insert_reg_alias (char *str
, unsigned number
, int type
)
2196 struct reg_entry
*new_reg
;
2199 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2201 if (new_reg
->builtin
)
2202 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2204 /* Only warn about a redefinition if it's not defined as the
2206 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2207 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2212 name
= xstrdup (str
);
2213 new_reg
= XNEW (struct reg_entry
);
2215 new_reg
->name
= name
;
2216 new_reg
->number
= number
;
2217 new_reg
->type
= type
;
2218 new_reg
->builtin
= FALSE
;
2219 new_reg
->neon
= NULL
;
2221 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2228 insert_neon_reg_alias (char *str
, int number
, int type
,
2229 struct neon_typed_alias
*atype
)
2231 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2235 first_error (_("attempt to redefine typed alias"));
2241 reg
->neon
= XNEW (struct neon_typed_alias
);
2242 *reg
->neon
= *atype
;
2246 /* Look for the .req directive. This is of the form:
2248 new_register_name .req existing_register_name
2250 If we find one, or if it looks sufficiently like one that we want to
2251 handle any error here, return TRUE. Otherwise return FALSE. */
2254 create_register_alias (char * newname
, char *p
)
2256 struct reg_entry
*old
;
2257 char *oldname
, *nbuf
;
2260 /* The input scrubber ensures that whitespace after the mnemonic is
2261 collapsed to single spaces. */
2263 if (strncmp (oldname
, " .req ", 6) != 0)
2267 if (*oldname
== '\0')
2270 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2273 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2277 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2278 the desired alias name, and p points to its end. If not, then
2279 the desired alias name is in the global original_case_string. */
2280 #ifdef TC_CASE_SENSITIVE
2283 newname
= original_case_string
;
2284 nlen
= strlen (newname
);
2287 nbuf
= xmemdup0 (newname
, nlen
);
2289 /* Create aliases under the new name as stated; an all-lowercase
2290 version of the new name; and an all-uppercase version of the new
2292 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2294 for (p
= nbuf
; *p
; p
++)
2297 if (strncmp (nbuf
, newname
, nlen
))
2299 /* If this attempt to create an additional alias fails, do not bother
2300 trying to create the all-lower case alias. We will fail and issue
2301 a second, duplicate error message. This situation arises when the
2302 programmer does something like:
2305 The second .req creates the "Foo" alias but then fails to create
2306 the artificial FOO alias because it has already been created by the
2308 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2315 for (p
= nbuf
; *p
; p
++)
2318 if (strncmp (nbuf
, newname
, nlen
))
2319 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2326 /* Create a Neon typed/indexed register alias using directives, e.g.:
2331 These typed registers can be used instead of the types specified after the
2332 Neon mnemonic, so long as all operands given have types. Types can also be
2333 specified directly, e.g.:
2334 vadd d0.s32, d1.s32, d2.s32 */
2337 create_neon_reg_alias (char *newname
, char *p
)
2339 enum arm_reg_type basetype
;
2340 struct reg_entry
*basereg
;
2341 struct reg_entry mybasereg
;
2342 struct neon_type ntype
;
2343 struct neon_typed_alias typeinfo
;
2344 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2347 typeinfo
.defined
= 0;
2348 typeinfo
.eltype
.type
= NT_invtype
;
2349 typeinfo
.eltype
.size
= -1;
2350 typeinfo
.index
= -1;
2354 if (strncmp (p
, " .dn ", 5) == 0)
2355 basetype
= REG_TYPE_VFD
;
2356 else if (strncmp (p
, " .qn ", 5) == 0)
2357 basetype
= REG_TYPE_NQ
;
2366 basereg
= arm_reg_parse_multi (&p
);
2368 if (basereg
&& basereg
->type
!= basetype
)
2370 as_bad (_("bad type for register"));
2374 if (basereg
== NULL
)
2377 /* Try parsing as an integer. */
2378 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2379 if (exp
.X_op
!= O_constant
)
2381 as_bad (_("expression must be constant"));
2384 basereg
= &mybasereg
;
2385 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2391 typeinfo
= *basereg
->neon
;
2393 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2395 /* We got a type. */
2396 if (typeinfo
.defined
& NTA_HASTYPE
)
2398 as_bad (_("can't redefine the type of a register alias"));
2402 typeinfo
.defined
|= NTA_HASTYPE
;
2403 if (ntype
.elems
!= 1)
2405 as_bad (_("you must specify a single type only"));
2408 typeinfo
.eltype
= ntype
.el
[0];
2411 if (skip_past_char (&p
, '[') == SUCCESS
)
2414 /* We got a scalar index. */
2416 if (typeinfo
.defined
& NTA_HASINDEX
)
2418 as_bad (_("can't redefine the index of a scalar alias"));
2422 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2424 if (exp
.X_op
!= O_constant
)
2426 as_bad (_("scalar index must be constant"));
2430 typeinfo
.defined
|= NTA_HASINDEX
;
2431 typeinfo
.index
= exp
.X_add_number
;
2433 if (skip_past_char (&p
, ']') == FAIL
)
2435 as_bad (_("expecting ]"));
2440 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2441 the desired alias name, and p points to its end. If not, then
2442 the desired alias name is in the global original_case_string. */
2443 #ifdef TC_CASE_SENSITIVE
2444 namelen
= nameend
- newname
;
2446 newname
= original_case_string
;
2447 namelen
= strlen (newname
);
2450 namebuf
= xmemdup0 (newname
, namelen
);
2452 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2453 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2455 /* Insert name in all uppercase. */
2456 for (p
= namebuf
; *p
; p
++)
2459 if (strncmp (namebuf
, newname
, namelen
))
2460 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2461 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2463 /* Insert name in all lowercase. */
2464 for (p
= namebuf
; *p
; p
++)
2467 if (strncmp (namebuf
, newname
, namelen
))
2468 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2469 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2475 /* Should never be called, as .req goes between the alias and the
2476 register name, not at the beginning of the line. */
2479 s_req (int a ATTRIBUTE_UNUSED
)
2481 as_bad (_("invalid syntax for .req directive"));
2485 s_dn (int a ATTRIBUTE_UNUSED
)
2487 as_bad (_("invalid syntax for .dn directive"));
2491 s_qn (int a ATTRIBUTE_UNUSED
)
2493 as_bad (_("invalid syntax for .qn directive"));
2496 /* The .unreq directive deletes an alias which was previously defined
2497 by .req. For example:
2503 s_unreq (int a ATTRIBUTE_UNUSED
)
2508 name
= input_line_pointer
;
2510 while (*input_line_pointer
!= 0
2511 && *input_line_pointer
!= ' '
2512 && *input_line_pointer
!= '\n')
2513 ++input_line_pointer
;
2515 saved_char
= *input_line_pointer
;
2516 *input_line_pointer
= 0;
2519 as_bad (_("invalid syntax for .unreq directive"));
2522 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2526 as_bad (_("unknown register alias '%s'"), name
);
2527 else if (reg
->builtin
)
2528 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2535 hash_delete (arm_reg_hsh
, name
, FALSE
);
2536 free ((char *) reg
->name
);
2541 /* Also locate the all upper case and all lower case versions.
2542 Do not complain if we cannot find one or the other as it
2543 was probably deleted above. */
2545 nbuf
= strdup (name
);
2546 for (p
= nbuf
; *p
; p
++)
2548 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2551 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2552 free ((char *) reg
->name
);
2558 for (p
= nbuf
; *p
; p
++)
2560 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2563 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2564 free ((char *) reg
->name
);
2574 *input_line_pointer
= saved_char
;
2575 demand_empty_rest_of_line ();
2578 /* Directives: Instruction set selection. */
2581 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2582 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2583 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2584 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2586 /* Create a new mapping symbol for the transition to STATE. */
2589 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2592 const char * symname
;
2599 type
= BSF_NO_FLAGS
;
2603 type
= BSF_NO_FLAGS
;
2607 type
= BSF_NO_FLAGS
;
2613 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2614 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2619 THUMB_SET_FUNC (symbolP
, 0);
2620 ARM_SET_THUMB (symbolP
, 0);
2621 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2625 THUMB_SET_FUNC (symbolP
, 1);
2626 ARM_SET_THUMB (symbolP
, 1);
2627 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2635 /* Save the mapping symbols for future reference. Also check that
2636 we do not place two mapping symbols at the same offset within a
2637 frag. We'll handle overlap between frags in
2638 check_mapping_symbols.
2640 If .fill or other data filling directive generates zero sized data,
2641 the mapping symbol for the following code will have the same value
2642 as the one generated for the data filling directive. In this case,
2643 we replace the old symbol with the new one at the same address. */
2646 if (frag
->tc_frag_data
.first_map
!= NULL
)
2648 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2649 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2651 frag
->tc_frag_data
.first_map
= symbolP
;
2653 if (frag
->tc_frag_data
.last_map
!= NULL
)
2655 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2656 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2657 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2659 frag
->tc_frag_data
.last_map
= symbolP
;
2662 /* We must sometimes convert a region marked as code to data during
2663 code alignment, if an odd number of bytes have to be padded. The
2664 code mapping symbol is pushed to an aligned address. */
2667 insert_data_mapping_symbol (enum mstate state
,
2668 valueT value
, fragS
*frag
, offsetT bytes
)
2670 /* If there was already a mapping symbol, remove it. */
2671 if (frag
->tc_frag_data
.last_map
!= NULL
2672 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2674 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2678 know (frag
->tc_frag_data
.first_map
== symp
);
2679 frag
->tc_frag_data
.first_map
= NULL
;
2681 frag
->tc_frag_data
.last_map
= NULL
;
2682 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2685 make_mapping_symbol (MAP_DATA
, value
, frag
);
2686 make_mapping_symbol (state
, value
+ bytes
, frag
);
2689 static void mapping_state_2 (enum mstate state
, int max_chars
);
2691 /* Set the mapping state to STATE. Only call this when about to
2692 emit some STATE bytes to the file. */
2694 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2696 mapping_state (enum mstate state
)
2698 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2700 if (mapstate
== state
)
2701 /* The mapping symbol has already been emitted.
2702 There is nothing else to do. */
2705 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2707 All ARM instructions require 4-byte alignment.
2708 (Almost) all Thumb instructions require 2-byte alignment.
2710 When emitting instructions into any section, mark the section
2713 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2714 but themselves require 2-byte alignment; this applies to some
2715 PC- relative forms. However, these cases will involve implicit
2716 literal pool generation or an explicit .align >=2, both of
2717 which will cause the section to me marked with sufficient
2718 alignment. Thus, we don't handle those cases here. */
2719 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2721 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2722 /* This case will be evaluated later. */
2725 mapping_state_2 (state
, 0);
2728 /* Same as mapping_state, but MAX_CHARS bytes have already been
2729 allocated. Put the mapping symbol that far back. */
2732 mapping_state_2 (enum mstate state
, int max_chars
)
2734 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2736 if (!SEG_NORMAL (now_seg
))
2739 if (mapstate
== state
)
2740 /* The mapping symbol has already been emitted.
2741 There is nothing else to do. */
2744 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2745 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2747 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2748 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2751 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2754 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2755 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2759 #define mapping_state(x) ((void)0)
2760 #define mapping_state_2(x, y) ((void)0)
2763 /* Find the real, Thumb encoded start of a Thumb function. */
2767 find_real_start (symbolS
* symbolP
)
2770 const char * name
= S_GET_NAME (symbolP
);
2771 symbolS
* new_target
;
2773 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2774 #define STUB_NAME ".real_start_of"
2779 /* The compiler may generate BL instructions to local labels because
2780 it needs to perform a branch to a far away location. These labels
2781 do not have a corresponding ".real_start_of" label. We check
2782 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2783 the ".real_start_of" convention for nonlocal branches. */
2784 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2787 real_start
= concat (STUB_NAME
, name
, NULL
);
2788 new_target
= symbol_find (real_start
);
2791 if (new_target
== NULL
)
2793 as_warn (_("Failed to find real start of function: %s\n"), name
);
2794 new_target
= symbolP
;
2802 opcode_select (int width
)
2809 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2810 as_bad (_("selected processor does not support THUMB opcodes"));
2813 /* No need to force the alignment, since we will have been
2814 coming from ARM mode, which is word-aligned. */
2815 record_alignment (now_seg
, 1);
2822 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2823 as_bad (_("selected processor does not support ARM opcodes"));
2828 frag_align (2, 0, 0);
2830 record_alignment (now_seg
, 1);
2835 as_bad (_("invalid instruction size selected (%d)"), width
);
2840 s_arm (int ignore ATTRIBUTE_UNUSED
)
2843 demand_empty_rest_of_line ();
2847 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2850 demand_empty_rest_of_line ();
2854 s_code (int unused ATTRIBUTE_UNUSED
)
2858 temp
= get_absolute_expression ();
2863 opcode_select (temp
);
2867 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2872 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2874 /* If we are not already in thumb mode go into it, EVEN if
2875 the target processor does not support thumb instructions.
2876 This is used by gcc/config/arm/lib1funcs.asm for example
2877 to compile interworking support functions even if the
2878 target processor should not support interworking. */
2882 record_alignment (now_seg
, 1);
2885 demand_empty_rest_of_line ();
2889 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2893 /* The following label is the name/address of the start of a Thumb function.
2894 We need to know this for the interworking support. */
2895 label_is_thumb_function_name
= TRUE
;
2898 /* Perform a .set directive, but also mark the alias as
2899 being a thumb function. */
2902 s_thumb_set (int equiv
)
2904 /* XXX the following is a duplicate of the code for s_set() in read.c
2905 We cannot just call that code as we need to get at the symbol that
2912 /* Especial apologies for the random logic:
2913 This just grew, and could be parsed much more simply!
2915 delim
= get_symbol_name (& name
);
2916 end_name
= input_line_pointer
;
2917 (void) restore_line_pointer (delim
);
2919 if (*input_line_pointer
!= ',')
2922 as_bad (_("expected comma after name \"%s\""), name
);
2924 ignore_rest_of_line ();
2928 input_line_pointer
++;
2931 if (name
[0] == '.' && name
[1] == '\0')
2933 /* XXX - this should not happen to .thumb_set. */
2937 if ((symbolP
= symbol_find (name
)) == NULL
2938 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2941 /* When doing symbol listings, play games with dummy fragments living
2942 outside the normal fragment chain to record the file and line info
2944 if (listing
& LISTING_SYMBOLS
)
2946 extern struct list_info_struct
* listing_tail
;
2947 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2949 memset (dummy_frag
, 0, sizeof (fragS
));
2950 dummy_frag
->fr_type
= rs_fill
;
2951 dummy_frag
->line
= listing_tail
;
2952 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2953 dummy_frag
->fr_symbol
= symbolP
;
2957 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2960 /* "set" symbols are local unless otherwise specified. */
2961 SF_SET_LOCAL (symbolP
);
2962 #endif /* OBJ_COFF */
2963 } /* Make a new symbol. */
2965 symbol_table_insert (symbolP
);
2970 && S_IS_DEFINED (symbolP
)
2971 && S_GET_SEGMENT (symbolP
) != reg_section
)
2972 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2974 pseudo_set (symbolP
);
2976 demand_empty_rest_of_line ();
2978 /* XXX Now we come to the Thumb specific bit of code. */
2980 THUMB_SET_FUNC (symbolP
, 1);
2981 ARM_SET_THUMB (symbolP
, 1);
2982 #if defined OBJ_ELF || defined OBJ_COFF
2983 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2987 /* Directives: Mode selection. */
2989 /* .syntax [unified|divided] - choose the new unified syntax
2990 (same for Arm and Thumb encoding, modulo slight differences in what
2991 can be represented) or the old divergent syntax for each mode. */
2993 s_syntax (int unused ATTRIBUTE_UNUSED
)
2997 delim
= get_symbol_name (& name
);
2999 if (!strcasecmp (name
, "unified"))
3000 unified_syntax
= TRUE
;
3001 else if (!strcasecmp (name
, "divided"))
3002 unified_syntax
= FALSE
;
3005 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3008 (void) restore_line_pointer (delim
);
3009 demand_empty_rest_of_line ();
3012 /* Directives: sectioning and alignment. */
3015 s_bss (int ignore ATTRIBUTE_UNUSED
)
3017 /* We don't support putting frags in the BSS segment, we fake it by
3018 marking in_bss, then looking at s_skip for clues. */
3019 subseg_set (bss_section
, 0);
3020 demand_empty_rest_of_line ();
3022 #ifdef md_elf_section_change_hook
3023 md_elf_section_change_hook ();
3028 s_even (int ignore ATTRIBUTE_UNUSED
)
3030 /* Never make frag if expect extra pass. */
3032 frag_align (1, 0, 0);
3034 record_alignment (now_seg
, 1);
3036 demand_empty_rest_of_line ();
3039 /* Directives: CodeComposer Studio. */
3041 /* .ref (for CodeComposer Studio syntax only). */
3043 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3045 if (codecomposer_syntax
)
3046 ignore_rest_of_line ();
3048 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3051 /* If name is not NULL, then it is used for marking the beginning of a
3052 function, whereas if it is NULL then it means the function end. */
3054 asmfunc_debug (const char * name
)
3056 static const char * last_name
= NULL
;
3060 gas_assert (last_name
== NULL
);
3063 if (debug_type
== DEBUG_STABS
)
3064 stabs_generate_asm_func (name
, name
);
3068 gas_assert (last_name
!= NULL
);
3070 if (debug_type
== DEBUG_STABS
)
3071 stabs_generate_asm_endfunc (last_name
, last_name
);
3078 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3080 if (codecomposer_syntax
)
3082 switch (asmfunc_state
)
3084 case OUTSIDE_ASMFUNC
:
3085 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3088 case WAITING_ASMFUNC_NAME
:
3089 as_bad (_(".asmfunc repeated."));
3092 case WAITING_ENDASMFUNC
:
3093 as_bad (_(".asmfunc without function."));
3096 demand_empty_rest_of_line ();
3099 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3103 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3105 if (codecomposer_syntax
)
3107 switch (asmfunc_state
)
3109 case OUTSIDE_ASMFUNC
:
3110 as_bad (_(".endasmfunc without a .asmfunc."));
3113 case WAITING_ASMFUNC_NAME
:
3114 as_bad (_(".endasmfunc without function."));
3117 case WAITING_ENDASMFUNC
:
3118 asmfunc_state
= OUTSIDE_ASMFUNC
;
3119 asmfunc_debug (NULL
);
3122 demand_empty_rest_of_line ();
3125 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3129 s_ccs_def (int name
)
3131 if (codecomposer_syntax
)
3134 as_bad (_(".def pseudo-op only available with -mccs flag."));
3137 /* Directives: Literal pools. */
3139 static literal_pool
*
3140 find_literal_pool (void)
3142 literal_pool
* pool
;
3144 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3146 if (pool
->section
== now_seg
3147 && pool
->sub_section
== now_subseg
)
3154 static literal_pool
*
3155 find_or_make_literal_pool (void)
3157 /* Next literal pool ID number. */
3158 static unsigned int latest_pool_num
= 1;
3159 literal_pool
* pool
;
3161 pool
= find_literal_pool ();
3165 /* Create a new pool. */
3166 pool
= XNEW (literal_pool
);
3170 pool
->next_free_entry
= 0;
3171 pool
->section
= now_seg
;
3172 pool
->sub_section
= now_subseg
;
3173 pool
->next
= list_of_pools
;
3174 pool
->symbol
= NULL
;
3175 pool
->alignment
= 2;
3177 /* Add it to the list. */
3178 list_of_pools
= pool
;
3181 /* New pools, and emptied pools, will have a NULL symbol. */
3182 if (pool
->symbol
== NULL
)
3184 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3185 (valueT
) 0, &zero_address_frag
);
3186 pool
->id
= latest_pool_num
++;
3193 /* Add the literal in the global 'inst'
3194 structure to the relevant literal pool. */
3197 add_to_lit_pool (unsigned int nbytes
)
3199 #define PADDING_SLOT 0x1
3200 #define LIT_ENTRY_SIZE_MASK 0xFF
3201 literal_pool
* pool
;
3202 unsigned int entry
, pool_size
= 0;
3203 bfd_boolean padding_slot_p
= FALSE
;
3209 imm1
= inst
.operands
[1].imm
;
3210 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3211 : inst
.reloc
.exp
.X_unsigned
? 0
3212 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3213 if (target_big_endian
)
3216 imm2
= inst
.operands
[1].imm
;
3220 pool
= find_or_make_literal_pool ();
3222 /* Check if this literal value is already in the pool. */
3223 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3227 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3228 && (inst
.reloc
.exp
.X_op
== O_constant
)
3229 && (pool
->literals
[entry
].X_add_number
3230 == inst
.reloc
.exp
.X_add_number
)
3231 && (pool
->literals
[entry
].X_md
== nbytes
)
3232 && (pool
->literals
[entry
].X_unsigned
3233 == inst
.reloc
.exp
.X_unsigned
))
3236 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3237 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3238 && (pool
->literals
[entry
].X_add_number
3239 == inst
.reloc
.exp
.X_add_number
)
3240 && (pool
->literals
[entry
].X_add_symbol
3241 == inst
.reloc
.exp
.X_add_symbol
)
3242 && (pool
->literals
[entry
].X_op_symbol
3243 == inst
.reloc
.exp
.X_op_symbol
)
3244 && (pool
->literals
[entry
].X_md
== nbytes
))
3247 else if ((nbytes
== 8)
3248 && !(pool_size
& 0x7)
3249 && ((entry
+ 1) != pool
->next_free_entry
)
3250 && (pool
->literals
[entry
].X_op
== O_constant
)
3251 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3252 && (pool
->literals
[entry
].X_unsigned
3253 == inst
.reloc
.exp
.X_unsigned
)
3254 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3255 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3256 && (pool
->literals
[entry
+ 1].X_unsigned
3257 == inst
.reloc
.exp
.X_unsigned
))
3260 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3261 if (padding_slot_p
&& (nbytes
== 4))
3267 /* Do we need to create a new entry? */
3268 if (entry
== pool
->next_free_entry
)
3270 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3272 inst
.error
= _("literal pool overflow");
3278 /* For 8-byte entries, we align to an 8-byte boundary,
3279 and split it into two 4-byte entries, because on 32-bit
3280 host, 8-byte constants are treated as big num, thus
3281 saved in "generic_bignum" which will be overwritten
3282 by later assignments.
3284 We also need to make sure there is enough space for
3287 We also check to make sure the literal operand is a
3289 if (!(inst
.reloc
.exp
.X_op
== O_constant
3290 || inst
.reloc
.exp
.X_op
== O_big
))
3292 inst
.error
= _("invalid type for literal pool");
3295 else if (pool_size
& 0x7)
3297 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3299 inst
.error
= _("literal pool overflow");
3303 pool
->literals
[entry
] = inst
.reloc
.exp
;
3304 pool
->literals
[entry
].X_op
= O_constant
;
3305 pool
->literals
[entry
].X_add_number
= 0;
3306 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3307 pool
->next_free_entry
+= 1;
3310 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3312 inst
.error
= _("literal pool overflow");
3316 pool
->literals
[entry
] = inst
.reloc
.exp
;
3317 pool
->literals
[entry
].X_op
= O_constant
;
3318 pool
->literals
[entry
].X_add_number
= imm1
;
3319 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3320 pool
->literals
[entry
++].X_md
= 4;
3321 pool
->literals
[entry
] = inst
.reloc
.exp
;
3322 pool
->literals
[entry
].X_op
= O_constant
;
3323 pool
->literals
[entry
].X_add_number
= imm2
;
3324 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3325 pool
->literals
[entry
].X_md
= 4;
3326 pool
->alignment
= 3;
3327 pool
->next_free_entry
+= 1;
3331 pool
->literals
[entry
] = inst
.reloc
.exp
;
3332 pool
->literals
[entry
].X_md
= 4;
3336 /* PR ld/12974: Record the location of the first source line to reference
3337 this entry in the literal pool. If it turns out during linking that the
3338 symbol does not exist we will be able to give an accurate line number for
3339 the (first use of the) missing reference. */
3340 if (debug_type
== DEBUG_DWARF2
)
3341 dwarf2_where (pool
->locs
+ entry
);
3343 pool
->next_free_entry
+= 1;
3345 else if (padding_slot_p
)
3347 pool
->literals
[entry
] = inst
.reloc
.exp
;
3348 pool
->literals
[entry
].X_md
= nbytes
;
3351 inst
.reloc
.exp
.X_op
= O_symbol
;
3352 inst
.reloc
.exp
.X_add_number
= pool_size
;
3353 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3359 tc_start_label_without_colon (void)
3361 bfd_boolean ret
= TRUE
;
3363 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3365 const char *label
= input_line_pointer
;
3367 while (!is_end_of_line
[(int) label
[-1]])
3372 as_bad (_("Invalid label '%s'"), label
);
3376 asmfunc_debug (label
);
3378 asmfunc_state
= WAITING_ENDASMFUNC
;
3384 /* Can't use symbol_new here, so have to create a symbol and then at
3385 a later date assign it a value. That's what these functions do. */
3388 symbol_locate (symbolS
* symbolP
,
3389 const char * name
, /* It is copied, the caller can modify. */
3390 segT segment
, /* Segment identifier (SEG_<something>). */
3391 valueT valu
, /* Symbol value. */
3392 fragS
* frag
) /* Associated fragment. */
3395 char * preserved_copy_of_name
;
3397 name_length
= strlen (name
) + 1; /* +1 for \0. */
3398 obstack_grow (¬es
, name
, name_length
);
3399 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3401 #ifdef tc_canonicalize_symbol_name
3402 preserved_copy_of_name
=
3403 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3406 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3408 S_SET_SEGMENT (symbolP
, segment
);
3409 S_SET_VALUE (symbolP
, valu
);
3410 symbol_clear_list_pointers (symbolP
);
3412 symbol_set_frag (symbolP
, frag
);
3414 /* Link to end of symbol chain. */
3416 extern int symbol_table_frozen
;
3418 if (symbol_table_frozen
)
3422 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3424 obj_symbol_new_hook (symbolP
);
3426 #ifdef tc_symbol_new_hook
3427 tc_symbol_new_hook (symbolP
);
3431 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3432 #endif /* DEBUG_SYMS */
3436 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3439 literal_pool
* pool
;
3442 pool
= find_literal_pool ();
3444 || pool
->symbol
== NULL
3445 || pool
->next_free_entry
== 0)
3448 /* Align pool as you have word accesses.
3449 Only make a frag if we have to. */
3451 frag_align (pool
->alignment
, 0, 0);
3453 record_alignment (now_seg
, 2);
3456 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3457 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3459 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3461 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3462 (valueT
) frag_now_fix (), frag_now
);
3463 symbol_table_insert (pool
->symbol
);
3465 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3467 #if defined OBJ_COFF || defined OBJ_ELF
3468 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3471 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3474 if (debug_type
== DEBUG_DWARF2
)
3475 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3477 /* First output the expression in the instruction to the pool. */
3478 emit_expr (&(pool
->literals
[entry
]),
3479 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3482 /* Mark the pool as empty. */
3483 pool
->next_free_entry
= 0;
3484 pool
->symbol
= NULL
;
3488 /* Forward declarations for functions below, in the MD interface
3490 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3491 static valueT
create_unwind_entry (int);
3492 static void start_unwind_section (const segT
, int);
3493 static void add_unwind_opcode (valueT
, int);
3494 static void flush_pending_unwind (void);
3496 /* Directives: Data. */
3499 s_arm_elf_cons (int nbytes
)
3503 #ifdef md_flush_pending_output
3504 md_flush_pending_output ();
3507 if (is_it_end_of_statement ())
3509 demand_empty_rest_of_line ();
3513 #ifdef md_cons_align
3514 md_cons_align (nbytes
);
3517 mapping_state (MAP_DATA
);
3521 char *base
= input_line_pointer
;
3525 if (exp
.X_op
!= O_symbol
)
3526 emit_expr (&exp
, (unsigned int) nbytes
);
3529 char *before_reloc
= input_line_pointer
;
3530 reloc
= parse_reloc (&input_line_pointer
);
3533 as_bad (_("unrecognized relocation suffix"));
3534 ignore_rest_of_line ();
3537 else if (reloc
== BFD_RELOC_UNUSED
)
3538 emit_expr (&exp
, (unsigned int) nbytes
);
3541 reloc_howto_type
*howto
= (reloc_howto_type
*)
3542 bfd_reloc_type_lookup (stdoutput
,
3543 (bfd_reloc_code_real_type
) reloc
);
3544 int size
= bfd_get_reloc_size (howto
);
3546 if (reloc
== BFD_RELOC_ARM_PLT32
)
3548 as_bad (_("(plt) is only valid on branch targets"));
3549 reloc
= BFD_RELOC_UNUSED
;
3554 as_bad (_("%s relocations do not fit in %d bytes"),
3555 howto
->name
, nbytes
);
3558 /* We've parsed an expression stopping at O_symbol.
3559 But there may be more expression left now that we
3560 have parsed the relocation marker. Parse it again.
3561 XXX Surely there is a cleaner way to do this. */
3562 char *p
= input_line_pointer
;
3564 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3566 memcpy (save_buf
, base
, input_line_pointer
- base
);
3567 memmove (base
+ (input_line_pointer
- before_reloc
),
3568 base
, before_reloc
- base
);
3570 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3572 memcpy (base
, save_buf
, p
- base
);
3574 offset
= nbytes
- size
;
3575 p
= frag_more (nbytes
);
3576 memset (p
, 0, nbytes
);
3577 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3578 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3584 while (*input_line_pointer
++ == ',');
3586 /* Put terminator back into stream. */
3587 input_line_pointer
--;
3588 demand_empty_rest_of_line ();
3591 /* Emit an expression containing a 32-bit thumb instruction.
3592 Implementation based on put_thumb32_insn. */
3595 emit_thumb32_expr (expressionS
* exp
)
3597 expressionS exp_high
= *exp
;
3599 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3600 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3601 exp
->X_add_number
&= 0xffff;
3602 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3605 /* Guess the instruction size based on the opcode. */
3608 thumb_insn_size (int opcode
)
3610 if ((unsigned int) opcode
< 0xe800u
)
3612 else if ((unsigned int) opcode
>= 0xe8000000u
)
3619 emit_insn (expressionS
*exp
, int nbytes
)
3623 if (exp
->X_op
== O_constant
)
3628 size
= thumb_insn_size (exp
->X_add_number
);
3632 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3634 as_bad (_(".inst.n operand too big. "\
3635 "Use .inst.w instead"));
3640 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3641 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3643 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3645 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3646 emit_thumb32_expr (exp
);
3648 emit_expr (exp
, (unsigned int) size
);
3650 it_fsm_post_encode ();
3654 as_bad (_("cannot determine Thumb instruction size. " \
3655 "Use .inst.n/.inst.w instead"));
3658 as_bad (_("constant expression required"));
3663 /* Like s_arm_elf_cons but do not use md_cons_align and
3664 set the mapping state to MAP_ARM/MAP_THUMB. */
3667 s_arm_elf_inst (int nbytes
)
3669 if (is_it_end_of_statement ())
3671 demand_empty_rest_of_line ();
3675 /* Calling mapping_state () here will not change ARM/THUMB,
3676 but will ensure not to be in DATA state. */
3679 mapping_state (MAP_THUMB
);
3684 as_bad (_("width suffixes are invalid in ARM mode"));
3685 ignore_rest_of_line ();
3691 mapping_state (MAP_ARM
);
3700 if (! emit_insn (& exp
, nbytes
))
3702 ignore_rest_of_line ();
3706 while (*input_line_pointer
++ == ',');
3708 /* Put terminator back into stream. */
3709 input_line_pointer
--;
3710 demand_empty_rest_of_line ();
3713 /* Parse a .rel31 directive. */
3716 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3723 if (*input_line_pointer
== '1')
3724 highbit
= 0x80000000;
3725 else if (*input_line_pointer
!= '0')
3726 as_bad (_("expected 0 or 1"));
3728 input_line_pointer
++;
3729 if (*input_line_pointer
!= ',')
3730 as_bad (_("missing comma"));
3731 input_line_pointer
++;
3733 #ifdef md_flush_pending_output
3734 md_flush_pending_output ();
3737 #ifdef md_cons_align
3741 mapping_state (MAP_DATA
);
3746 md_number_to_chars (p
, highbit
, 4);
3747 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3748 BFD_RELOC_ARM_PREL31
);
3750 demand_empty_rest_of_line ();
3753 /* Directives: AEABI stack-unwind tables. */
3755 /* Parse an unwind_fnstart directive. Simply records the current location. */
3758 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3760 demand_empty_rest_of_line ();
3761 if (unwind
.proc_start
)
3763 as_bad (_("duplicate .fnstart directive"));
3767 /* Mark the start of the function. */
3768 unwind
.proc_start
= expr_build_dot ();
3770 /* Reset the rest of the unwind info. */
3771 unwind
.opcode_count
= 0;
3772 unwind
.table_entry
= NULL
;
3773 unwind
.personality_routine
= NULL
;
3774 unwind
.personality_index
= -1;
3775 unwind
.frame_size
= 0;
3776 unwind
.fp_offset
= 0;
3777 unwind
.fp_reg
= REG_SP
;
3779 unwind
.sp_restored
= 0;
3783 /* Parse a handlerdata directive. Creates the exception handling table entry
3784 for the function. */
3787 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3789 demand_empty_rest_of_line ();
3790 if (!unwind
.proc_start
)
3791 as_bad (MISSING_FNSTART
);
3793 if (unwind
.table_entry
)
3794 as_bad (_("duplicate .handlerdata directive"));
3796 create_unwind_entry (1);
3799 /* Parse an unwind_fnend directive. Generates the index table entry. */
3802 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3807 unsigned int marked_pr_dependency
;
3809 demand_empty_rest_of_line ();
3811 if (!unwind
.proc_start
)
3813 as_bad (_(".fnend directive without .fnstart"));
3817 /* Add eh table entry. */
3818 if (unwind
.table_entry
== NULL
)
3819 val
= create_unwind_entry (0);
3823 /* Add index table entry. This is two words. */
3824 start_unwind_section (unwind
.saved_seg
, 1);
3825 frag_align (2, 0, 0);
3826 record_alignment (now_seg
, 2);
3828 ptr
= frag_more (8);
3830 where
= frag_now_fix () - 8;
3832 /* Self relative offset of the function start. */
3833 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3834 BFD_RELOC_ARM_PREL31
);
3836 /* Indicate dependency on EHABI-defined personality routines to the
3837 linker, if it hasn't been done already. */
3838 marked_pr_dependency
3839 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3840 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3841 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3843 static const char *const name
[] =
3845 "__aeabi_unwind_cpp_pr0",
3846 "__aeabi_unwind_cpp_pr1",
3847 "__aeabi_unwind_cpp_pr2"
3849 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3850 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3851 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3852 |= 1 << unwind
.personality_index
;
3856 /* Inline exception table entry. */
3857 md_number_to_chars (ptr
+ 4, val
, 4);
3859 /* Self relative offset of the table entry. */
3860 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3861 BFD_RELOC_ARM_PREL31
);
3863 /* Restore the original section. */
3864 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3866 unwind
.proc_start
= NULL
;
3870 /* Parse an unwind_cantunwind directive. */
3873 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3875 demand_empty_rest_of_line ();
3876 if (!unwind
.proc_start
)
3877 as_bad (MISSING_FNSTART
);
3879 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3880 as_bad (_("personality routine specified for cantunwind frame"));
3882 unwind
.personality_index
= -2;
3886 /* Parse a personalityindex directive. */
3889 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3893 if (!unwind
.proc_start
)
3894 as_bad (MISSING_FNSTART
);
3896 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3897 as_bad (_("duplicate .personalityindex directive"));
3901 if (exp
.X_op
!= O_constant
3902 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3904 as_bad (_("bad personality routine number"));
3905 ignore_rest_of_line ();
3909 unwind
.personality_index
= exp
.X_add_number
;
3911 demand_empty_rest_of_line ();
3915 /* Parse a personality directive. */
3918 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3922 if (!unwind
.proc_start
)
3923 as_bad (MISSING_FNSTART
);
3925 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3926 as_bad (_("duplicate .personality directive"));
3928 c
= get_symbol_name (& name
);
3929 p
= input_line_pointer
;
3931 ++ input_line_pointer
;
3932 unwind
.personality_routine
= symbol_find_or_make (name
);
3934 demand_empty_rest_of_line ();
3938 /* Parse a directive saving core registers. */
3941 s_arm_unwind_save_core (void)
3947 range
= parse_reg_list (&input_line_pointer
);
3950 as_bad (_("expected register list"));
3951 ignore_rest_of_line ();
3955 demand_empty_rest_of_line ();
3957 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3958 into .unwind_save {..., sp...}. We aren't bothered about the value of
3959 ip because it is clobbered by calls. */
3960 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3961 && (range
& 0x3000) == 0x1000)
3963 unwind
.opcode_count
--;
3964 unwind
.sp_restored
= 0;
3965 range
= (range
| 0x2000) & ~0x1000;
3966 unwind
.pending_offset
= 0;
3972 /* See if we can use the short opcodes. These pop a block of up to 8
3973 registers starting with r4, plus maybe r14. */
3974 for (n
= 0; n
< 8; n
++)
3976 /* Break at the first non-saved register. */
3977 if ((range
& (1 << (n
+ 4))) == 0)
3980 /* See if there are any other bits set. */
3981 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3983 /* Use the long form. */
3984 op
= 0x8000 | ((range
>> 4) & 0xfff);
3985 add_unwind_opcode (op
, 2);
3989 /* Use the short form. */
3991 op
= 0xa8; /* Pop r14. */
3993 op
= 0xa0; /* Do not pop r14. */
3995 add_unwind_opcode (op
, 1);
4002 op
= 0xb100 | (range
& 0xf);
4003 add_unwind_opcode (op
, 2);
4006 /* Record the number of bytes pushed. */
4007 for (n
= 0; n
< 16; n
++)
4009 if (range
& (1 << n
))
4010 unwind
.frame_size
+= 4;
4015 /* Parse a directive saving FPA registers. */
4018 s_arm_unwind_save_fpa (int reg
)
4024 /* Get Number of registers to transfer. */
4025 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4028 exp
.X_op
= O_illegal
;
4030 if (exp
.X_op
!= O_constant
)
4032 as_bad (_("expected , <constant>"));
4033 ignore_rest_of_line ();
4037 num_regs
= exp
.X_add_number
;
4039 if (num_regs
< 1 || num_regs
> 4)
4041 as_bad (_("number of registers must be in the range [1:4]"));
4042 ignore_rest_of_line ();
4046 demand_empty_rest_of_line ();
4051 op
= 0xb4 | (num_regs
- 1);
4052 add_unwind_opcode (op
, 1);
4057 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4058 add_unwind_opcode (op
, 2);
4060 unwind
.frame_size
+= num_regs
* 12;
4064 /* Parse a directive saving VFP registers for ARMv6 and above. */
4067 s_arm_unwind_save_vfp_armv6 (void)
4072 int num_vfpv3_regs
= 0;
4073 int num_regs_below_16
;
4075 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4078 as_bad (_("expected register list"));
4079 ignore_rest_of_line ();
4083 demand_empty_rest_of_line ();
4085 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4086 than FSTMX/FLDMX-style ones). */
4088 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4090 num_vfpv3_regs
= count
;
4091 else if (start
+ count
> 16)
4092 num_vfpv3_regs
= start
+ count
- 16;
4094 if (num_vfpv3_regs
> 0)
4096 int start_offset
= start
> 16 ? start
- 16 : 0;
4097 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4098 add_unwind_opcode (op
, 2);
4101 /* Generate opcode for registers numbered in the range 0 .. 15. */
4102 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4103 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4104 if (num_regs_below_16
> 0)
4106 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4107 add_unwind_opcode (op
, 2);
4110 unwind
.frame_size
+= count
* 8;
4114 /* Parse a directive saving VFP registers for pre-ARMv6. */
4117 s_arm_unwind_save_vfp (void)
4123 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4126 as_bad (_("expected register list"));
4127 ignore_rest_of_line ();
4131 demand_empty_rest_of_line ();
4136 op
= 0xb8 | (count
- 1);
4137 add_unwind_opcode (op
, 1);
4142 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4143 add_unwind_opcode (op
, 2);
4145 unwind
.frame_size
+= count
* 8 + 4;
4149 /* Parse a directive saving iWMMXt data registers. */
4152 s_arm_unwind_save_mmxwr (void)
4160 if (*input_line_pointer
== '{')
4161 input_line_pointer
++;
4165 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4169 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4174 as_tsktsk (_("register list not in ascending order"));
4177 if (*input_line_pointer
== '-')
4179 input_line_pointer
++;
4180 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4183 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4186 else if (reg
>= hi_reg
)
4188 as_bad (_("bad register range"));
4191 for (; reg
< hi_reg
; reg
++)
4195 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4197 skip_past_char (&input_line_pointer
, '}');
4199 demand_empty_rest_of_line ();
4201 /* Generate any deferred opcodes because we're going to be looking at
4203 flush_pending_unwind ();
4205 for (i
= 0; i
< 16; i
++)
4207 if (mask
& (1 << i
))
4208 unwind
.frame_size
+= 8;
4211 /* Attempt to combine with a previous opcode. We do this because gcc
4212 likes to output separate unwind directives for a single block of
4214 if (unwind
.opcode_count
> 0)
4216 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4217 if ((i
& 0xf8) == 0xc0)
4220 /* Only merge if the blocks are contiguous. */
4223 if ((mask
& 0xfe00) == (1 << 9))
4225 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4226 unwind
.opcode_count
--;
4229 else if (i
== 6 && unwind
.opcode_count
>= 2)
4231 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4235 op
= 0xffff << (reg
- 1);
4237 && ((mask
& op
) == (1u << (reg
- 1))))
4239 op
= (1 << (reg
+ i
+ 1)) - 1;
4240 op
&= ~((1 << reg
) - 1);
4242 unwind
.opcode_count
-= 2;
4249 /* We want to generate opcodes in the order the registers have been
4250 saved, ie. descending order. */
4251 for (reg
= 15; reg
>= -1; reg
--)
4253 /* Save registers in blocks. */
4255 || !(mask
& (1 << reg
)))
4257 /* We found an unsaved reg. Generate opcodes to save the
4264 op
= 0xc0 | (hi_reg
- 10);
4265 add_unwind_opcode (op
, 1);
4270 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4271 add_unwind_opcode (op
, 2);
4280 ignore_rest_of_line ();
4284 s_arm_unwind_save_mmxwcg (void)
4291 if (*input_line_pointer
== '{')
4292 input_line_pointer
++;
4294 skip_whitespace (input_line_pointer
);
4298 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4302 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4308 as_tsktsk (_("register list not in ascending order"));
4311 if (*input_line_pointer
== '-')
4313 input_line_pointer
++;
4314 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4317 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4320 else if (reg
>= hi_reg
)
4322 as_bad (_("bad register range"));
4325 for (; reg
< hi_reg
; reg
++)
4329 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4331 skip_past_char (&input_line_pointer
, '}');
4333 demand_empty_rest_of_line ();
4335 /* Generate any deferred opcodes because we're going to be looking at
4337 flush_pending_unwind ();
4339 for (reg
= 0; reg
< 16; reg
++)
4341 if (mask
& (1 << reg
))
4342 unwind
.frame_size
+= 4;
4345 add_unwind_opcode (op
, 2);
4348 ignore_rest_of_line ();
4352 /* Parse an unwind_save directive.
4353 If the argument is non-zero, this is a .vsave directive. */
4356 s_arm_unwind_save (int arch_v6
)
4359 struct reg_entry
*reg
;
4360 bfd_boolean had_brace
= FALSE
;
4362 if (!unwind
.proc_start
)
4363 as_bad (MISSING_FNSTART
);
4365 /* Figure out what sort of save we have. */
4366 peek
= input_line_pointer
;
4374 reg
= arm_reg_parse_multi (&peek
);
4378 as_bad (_("register expected"));
4379 ignore_rest_of_line ();
4388 as_bad (_("FPA .unwind_save does not take a register list"));
4389 ignore_rest_of_line ();
4392 input_line_pointer
= peek
;
4393 s_arm_unwind_save_fpa (reg
->number
);
4397 s_arm_unwind_save_core ();
4402 s_arm_unwind_save_vfp_armv6 ();
4404 s_arm_unwind_save_vfp ();
4407 case REG_TYPE_MMXWR
:
4408 s_arm_unwind_save_mmxwr ();
4411 case REG_TYPE_MMXWCG
:
4412 s_arm_unwind_save_mmxwcg ();
4416 as_bad (_(".unwind_save does not support this kind of register"));
4417 ignore_rest_of_line ();
4422 /* Parse an unwind_movsp directive. */
4425 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4431 if (!unwind
.proc_start
)
4432 as_bad (MISSING_FNSTART
);
4434 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4437 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4438 ignore_rest_of_line ();
4442 /* Optional constant. */
4443 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4445 if (immediate_for_directive (&offset
) == FAIL
)
4451 demand_empty_rest_of_line ();
4453 if (reg
== REG_SP
|| reg
== REG_PC
)
4455 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4459 if (unwind
.fp_reg
!= REG_SP
)
4460 as_bad (_("unexpected .unwind_movsp directive"));
4462 /* Generate opcode to restore the value. */
4464 add_unwind_opcode (op
, 1);
4466 /* Record the information for later. */
4467 unwind
.fp_reg
= reg
;
4468 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4469 unwind
.sp_restored
= 1;
4472 /* Parse an unwind_pad directive. */
4475 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4479 if (!unwind
.proc_start
)
4480 as_bad (MISSING_FNSTART
);
4482 if (immediate_for_directive (&offset
) == FAIL
)
4487 as_bad (_("stack increment must be multiple of 4"));
4488 ignore_rest_of_line ();
4492 /* Don't generate any opcodes, just record the details for later. */
4493 unwind
.frame_size
+= offset
;
4494 unwind
.pending_offset
+= offset
;
4496 demand_empty_rest_of_line ();
4499 /* Parse an unwind_setfp directive. */
4502 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4508 if (!unwind
.proc_start
)
4509 as_bad (MISSING_FNSTART
);
4511 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4512 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4515 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4517 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4519 as_bad (_("expected <reg>, <reg>"));
4520 ignore_rest_of_line ();
4524 /* Optional constant. */
4525 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4527 if (immediate_for_directive (&offset
) == FAIL
)
4533 demand_empty_rest_of_line ();
4535 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4537 as_bad (_("register must be either sp or set by a previous"
4538 "unwind_movsp directive"));
4542 /* Don't generate any opcodes, just record the information for later. */
4543 unwind
.fp_reg
= fp_reg
;
4545 if (sp_reg
== REG_SP
)
4546 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4548 unwind
.fp_offset
-= offset
;
4551 /* Parse an unwind_raw directive. */
4554 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4557 /* This is an arbitrary limit. */
4558 unsigned char op
[16];
4561 if (!unwind
.proc_start
)
4562 as_bad (MISSING_FNSTART
);
4565 if (exp
.X_op
== O_constant
4566 && skip_past_comma (&input_line_pointer
) != FAIL
)
4568 unwind
.frame_size
+= exp
.X_add_number
;
4572 exp
.X_op
= O_illegal
;
4574 if (exp
.X_op
!= O_constant
)
4576 as_bad (_("expected <offset>, <opcode>"));
4577 ignore_rest_of_line ();
4583 /* Parse the opcode. */
4588 as_bad (_("unwind opcode too long"));
4589 ignore_rest_of_line ();
4591 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4593 as_bad (_("invalid unwind opcode"));
4594 ignore_rest_of_line ();
4597 op
[count
++] = exp
.X_add_number
;
4599 /* Parse the next byte. */
4600 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4606 /* Add the opcode bytes in reverse order. */
4608 add_unwind_opcode (op
[count
], 1);
4610 demand_empty_rest_of_line ();
4614 /* Parse a .eabi_attribute directive. */
4617 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4619 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4621 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4622 attributes_set_explicitly
[tag
] = 1;
4625 /* Emit a tls fix for the symbol. */
4628 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4632 #ifdef md_flush_pending_output
4633 md_flush_pending_output ();
4636 #ifdef md_cons_align
4640 /* Since we're just labelling the code, there's no need to define a
4643 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4644 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4645 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4646 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4648 #endif /* OBJ_ELF */
4650 static void s_arm_arch (int);
4651 static void s_arm_object_arch (int);
4652 static void s_arm_cpu (int);
4653 static void s_arm_fpu (int);
4654 static void s_arm_arch_extension (int);
4659 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4666 if (exp
.X_op
== O_symbol
)
4667 exp
.X_op
= O_secrel
;
4669 emit_expr (&exp
, 4);
4671 while (*input_line_pointer
++ == ',');
4673 input_line_pointer
--;
4674 demand_empty_rest_of_line ();
4678 /* This table describes all the machine specific pseudo-ops the assembler
4679 has to support. The fields are:
4680 pseudo-op name without dot
4681 function to call to execute this pseudo-op
4682 Integer arg to pass to the function. */
4684 const pseudo_typeS md_pseudo_table
[] =
4686 /* Never called because '.req' does not start a line. */
4687 { "req", s_req
, 0 },
4688 /* Following two are likewise never called. */
4691 { "unreq", s_unreq
, 0 },
4692 { "bss", s_bss
, 0 },
4693 { "align", s_align_ptwo
, 2 },
4694 { "arm", s_arm
, 0 },
4695 { "thumb", s_thumb
, 0 },
4696 { "code", s_code
, 0 },
4697 { "force_thumb", s_force_thumb
, 0 },
4698 { "thumb_func", s_thumb_func
, 0 },
4699 { "thumb_set", s_thumb_set
, 0 },
4700 { "even", s_even
, 0 },
4701 { "ltorg", s_ltorg
, 0 },
4702 { "pool", s_ltorg
, 0 },
4703 { "syntax", s_syntax
, 0 },
4704 { "cpu", s_arm_cpu
, 0 },
4705 { "arch", s_arm_arch
, 0 },
4706 { "object_arch", s_arm_object_arch
, 0 },
4707 { "fpu", s_arm_fpu
, 0 },
4708 { "arch_extension", s_arm_arch_extension
, 0 },
4710 { "word", s_arm_elf_cons
, 4 },
4711 { "long", s_arm_elf_cons
, 4 },
4712 { "inst.n", s_arm_elf_inst
, 2 },
4713 { "inst.w", s_arm_elf_inst
, 4 },
4714 { "inst", s_arm_elf_inst
, 0 },
4715 { "rel31", s_arm_rel31
, 0 },
4716 { "fnstart", s_arm_unwind_fnstart
, 0 },
4717 { "fnend", s_arm_unwind_fnend
, 0 },
4718 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4719 { "personality", s_arm_unwind_personality
, 0 },
4720 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4721 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4722 { "save", s_arm_unwind_save
, 0 },
4723 { "vsave", s_arm_unwind_save
, 1 },
4724 { "movsp", s_arm_unwind_movsp
, 0 },
4725 { "pad", s_arm_unwind_pad
, 0 },
4726 { "setfp", s_arm_unwind_setfp
, 0 },
4727 { "unwind_raw", s_arm_unwind_raw
, 0 },
4728 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4729 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4733 /* These are used for dwarf. */
4737 /* These are used for dwarf2. */
4738 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4739 { "loc", dwarf2_directive_loc
, 0 },
4740 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4742 { "extend", float_cons
, 'x' },
4743 { "ldouble", float_cons
, 'x' },
4744 { "packed", float_cons
, 'p' },
4746 {"secrel32", pe_directive_secrel
, 0},
4749 /* These are for compatibility with CodeComposer Studio. */
4750 {"ref", s_ccs_ref
, 0},
4751 {"def", s_ccs_def
, 0},
4752 {"asmfunc", s_ccs_asmfunc
, 0},
4753 {"endasmfunc", s_ccs_endasmfunc
, 0},
4758 /* Parser functions used exclusively in instruction operands. */
4760 /* Generic immediate-value read function for use in insn parsing.
4761 STR points to the beginning of the immediate (the leading #);
4762 VAL receives the value; if the value is outside [MIN, MAX]
4763 issue an error. PREFIX_OPT is true if the immediate prefix is
4767 parse_immediate (char **str
, int *val
, int min
, int max
,
4768 bfd_boolean prefix_opt
)
4771 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4772 if (exp
.X_op
!= O_constant
)
4774 inst
.error
= _("constant expression required");
4778 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4780 inst
.error
= _("immediate value out of range");
4784 *val
= exp
.X_add_number
;
4788 /* Less-generic immediate-value read function with the possibility of loading a
4789 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4790 instructions. Puts the result directly in inst.operands[i]. */
4793 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4794 bfd_boolean allow_symbol_p
)
4797 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4800 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4802 if (exp_p
->X_op
== O_constant
)
4804 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4805 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4806 O_constant. We have to be careful not to break compilation for
4807 32-bit X_add_number, though. */
4808 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4810 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4811 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4813 inst
.operands
[i
].regisimm
= 1;
4816 else if (exp_p
->X_op
== O_big
4817 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4819 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4821 /* Bignums have their least significant bits in
4822 generic_bignum[0]. Make sure we put 32 bits in imm and
4823 32 bits in reg, in a (hopefully) portable way. */
4824 gas_assert (parts
!= 0);
4826 /* Make sure that the number is not too big.
4827 PR 11972: Bignums can now be sign-extended to the
4828 size of a .octa so check that the out of range bits
4829 are all zero or all one. */
4830 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4832 LITTLENUM_TYPE m
= -1;
4834 if (generic_bignum
[parts
* 2] != 0
4835 && generic_bignum
[parts
* 2] != m
)
4838 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4839 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4843 inst
.operands
[i
].imm
= 0;
4844 for (j
= 0; j
< parts
; j
++, idx
++)
4845 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4846 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4847 inst
.operands
[i
].reg
= 0;
4848 for (j
= 0; j
< parts
; j
++, idx
++)
4849 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4850 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4851 inst
.operands
[i
].regisimm
= 1;
4853 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4861 /* Returns the pseudo-register number of an FPA immediate constant,
4862 or FAIL if there isn't a valid constant here. */
4865 parse_fpa_immediate (char ** str
)
4867 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4873 /* First try and match exact strings, this is to guarantee
4874 that some formats will work even for cross assembly. */
4876 for (i
= 0; fp_const
[i
]; i
++)
4878 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4882 *str
+= strlen (fp_const
[i
]);
4883 if (is_end_of_line
[(unsigned char) **str
])
4889 /* Just because we didn't get a match doesn't mean that the constant
4890 isn't valid, just that it is in a format that we don't
4891 automatically recognize. Try parsing it with the standard
4892 expression routines. */
4894 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4896 /* Look for a raw floating point number. */
4897 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4898 && is_end_of_line
[(unsigned char) *save_in
])
4900 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4902 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4904 if (words
[j
] != fp_values
[i
][j
])
4908 if (j
== MAX_LITTLENUMS
)
4916 /* Try and parse a more complex expression, this will probably fail
4917 unless the code uses a floating point prefix (eg "0f"). */
4918 save_in
= input_line_pointer
;
4919 input_line_pointer
= *str
;
4920 if (expression (&exp
) == absolute_section
4921 && exp
.X_op
== O_big
4922 && exp
.X_add_number
< 0)
4924 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4926 #define X_PRECISION 5
4927 #define E_PRECISION 15L
4928 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4930 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4932 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4934 if (words
[j
] != fp_values
[i
][j
])
4938 if (j
== MAX_LITTLENUMS
)
4940 *str
= input_line_pointer
;
4941 input_line_pointer
= save_in
;
4948 *str
= input_line_pointer
;
4949 input_line_pointer
= save_in
;
4950 inst
.error
= _("invalid FPA immediate expression");
4954 /* Returns 1 if a number has "quarter-precision" float format
4955 0baBbbbbbc defgh000 00000000 00000000. */
4958 is_quarter_float (unsigned imm
)
4960 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4961 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4965 /* Detect the presence of a floating point or integer zero constant,
4969 parse_ifimm_zero (char **in
)
4973 if (!is_immediate_prefix (**in
))
4975 /* In unified syntax, all prefixes are optional. */
4976 if (!unified_syntax
)
4982 /* Accept #0x0 as a synonym for #0. */
4983 if (strncmp (*in
, "0x", 2) == 0)
4986 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4991 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4992 &generic_floating_point_number
);
4995 && generic_floating_point_number
.sign
== '+'
4996 && (generic_floating_point_number
.low
4997 > generic_floating_point_number
.leader
))
5003 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5004 0baBbbbbbc defgh000 00000000 00000000.
5005 The zero and minus-zero cases need special handling, since they can't be
5006 encoded in the "quarter-precision" float format, but can nonetheless be
5007 loaded as integer constants. */
5010 parse_qfloat_immediate (char **ccp
, int *immed
)
5014 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5015 int found_fpchar
= 0;
5017 skip_past_char (&str
, '#');
5019 /* We must not accidentally parse an integer as a floating-point number. Make
5020 sure that the value we parse is not an integer by checking for special
5021 characters '.' or 'e'.
5022 FIXME: This is a horrible hack, but doing better is tricky because type
5023 information isn't in a very usable state at parse time. */
5025 skip_whitespace (fpnum
);
5027 if (strncmp (fpnum
, "0x", 2) == 0)
5031 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5032 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5042 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5044 unsigned fpword
= 0;
5047 /* Our FP word must be 32 bits (single-precision FP). */
5048 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5050 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5054 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5067 /* Shift operands. */
5070 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5073 struct asm_shift_name
5076 enum shift_kind kind
;
5079 /* Third argument to parse_shift. */
5080 enum parse_shift_mode
5082 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5083 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5084 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5085 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5086 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5089 /* Parse a <shift> specifier on an ARM data processing instruction.
5090 This has three forms:
5092 (LSL|LSR|ASL|ASR|ROR) Rs
5093 (LSL|LSR|ASL|ASR|ROR) #imm
5096 Note that ASL is assimilated to LSL in the instruction encoding, and
5097 RRX to ROR #0 (which cannot be written as such). */
5100 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5102 const struct asm_shift_name
*shift_name
;
5103 enum shift_kind shift
;
5108 for (p
= *str
; ISALPHA (*p
); p
++)
5113 inst
.error
= _("shift expression expected");
5117 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5120 if (shift_name
== NULL
)
5122 inst
.error
= _("shift expression expected");
5126 shift
= shift_name
->kind
;
5130 case NO_SHIFT_RESTRICT
:
5131 case SHIFT_IMMEDIATE
: break;
5133 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5134 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5136 inst
.error
= _("'LSL' or 'ASR' required");
5141 case SHIFT_LSL_IMMEDIATE
:
5142 if (shift
!= SHIFT_LSL
)
5144 inst
.error
= _("'LSL' required");
5149 case SHIFT_ASR_IMMEDIATE
:
5150 if (shift
!= SHIFT_ASR
)
5152 inst
.error
= _("'ASR' required");
5160 if (shift
!= SHIFT_RRX
)
5162 /* Whitespace can appear here if the next thing is a bare digit. */
5163 skip_whitespace (p
);
5165 if (mode
== NO_SHIFT_RESTRICT
5166 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5168 inst
.operands
[i
].imm
= reg
;
5169 inst
.operands
[i
].immisreg
= 1;
5171 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5174 inst
.operands
[i
].shift_kind
= shift
;
5175 inst
.operands
[i
].shifted
= 1;
5180 /* Parse a <shifter_operand> for an ARM data processing instruction:
5183 #<immediate>, <rotate>
5187 where <shift> is defined by parse_shift above, and <rotate> is a
5188 multiple of 2 between 0 and 30. Validation of immediate operands
5189 is deferred to md_apply_fix. */
5192 parse_shifter_operand (char **str
, int i
)
5197 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5199 inst
.operands
[i
].reg
= value
;
5200 inst
.operands
[i
].isreg
= 1;
5202 /* parse_shift will override this if appropriate */
5203 inst
.reloc
.exp
.X_op
= O_constant
;
5204 inst
.reloc
.exp
.X_add_number
= 0;
5206 if (skip_past_comma (str
) == FAIL
)
5209 /* Shift operation on register. */
5210 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5213 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5216 if (skip_past_comma (str
) == SUCCESS
)
5218 /* #x, y -- ie explicit rotation by Y. */
5219 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5222 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5224 inst
.error
= _("constant expression expected");
5228 value
= exp
.X_add_number
;
5229 if (value
< 0 || value
> 30 || value
% 2 != 0)
5231 inst
.error
= _("invalid rotation");
5234 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5236 inst
.error
= _("invalid constant");
5240 /* Encode as specified. */
5241 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5245 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5246 inst
.reloc
.pc_rel
= 0;
5250 /* Group relocation information. Each entry in the table contains the
5251 textual name of the relocation as may appear in assembler source
5252 and must end with a colon.
5253 Along with this textual name are the relocation codes to be used if
5254 the corresponding instruction is an ALU instruction (ADD or SUB only),
5255 an LDR, an LDRS, or an LDC. */
5257 struct group_reloc_table_entry
5268 /* Varieties of non-ALU group relocation. */
5275 static struct group_reloc_table_entry group_reloc_table
[] =
5276 { /* Program counter relative: */
5278 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5283 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5284 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5285 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5286 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5288 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5293 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5294 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5295 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5296 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5298 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5299 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5300 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5301 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5302 /* Section base relative */
5304 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5309 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5310 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5311 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5312 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5314 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5319 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5320 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5321 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5322 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5324 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5325 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5326 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5327 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5328 /* Absolute thumb alu relocations. */
5330 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5335 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5340 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5345 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5350 /* Given the address of a pointer pointing to the textual name of a group
5351 relocation as may appear in assembler source, attempt to find its details
5352 in group_reloc_table. The pointer will be updated to the character after
5353 the trailing colon. On failure, FAIL will be returned; SUCCESS
5354 otherwise. On success, *entry will be updated to point at the relevant
5355 group_reloc_table entry. */
5358 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5361 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5363 int length
= strlen (group_reloc_table
[i
].name
);
5365 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5366 && (*str
)[length
] == ':')
5368 *out
= &group_reloc_table
[i
];
5369 *str
+= (length
+ 1);
5377 /* Parse a <shifter_operand> for an ARM data processing instruction
5378 (as for parse_shifter_operand) where group relocations are allowed:
5381 #<immediate>, <rotate>
5382 #:<group_reloc>:<expression>
5386 where <group_reloc> is one of the strings defined in group_reloc_table.
5387 The hashes are optional.
5389 Everything else is as for parse_shifter_operand. */
5391 static parse_operand_result
5392 parse_shifter_operand_group_reloc (char **str
, int i
)
5394 /* Determine if we have the sequence of characters #: or just :
5395 coming next. If we do, then we check for a group relocation.
5396 If we don't, punt the whole lot to parse_shifter_operand. */
5398 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5399 || (*str
)[0] == ':')
5401 struct group_reloc_table_entry
*entry
;
5403 if ((*str
)[0] == '#')
5408 /* Try to parse a group relocation. Anything else is an error. */
5409 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5411 inst
.error
= _("unknown group relocation");
5412 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5415 /* We now have the group relocation table entry corresponding to
5416 the name in the assembler source. Next, we parse the expression. */
5417 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5418 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5420 /* Record the relocation type (always the ALU variant here). */
5421 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5422 gas_assert (inst
.reloc
.type
!= 0);
5424 return PARSE_OPERAND_SUCCESS
;
5427 return parse_shifter_operand (str
, i
) == SUCCESS
5428 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5430 /* Never reached. */
5433 /* Parse a Neon alignment expression. Information is written to
5434 inst.operands[i]. We assume the initial ':' has been skipped.
5436 align .imm = align << 8, .immisalign=1, .preind=0 */
5437 static parse_operand_result
5438 parse_neon_alignment (char **str
, int i
)
5443 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5445 if (exp
.X_op
!= O_constant
)
5447 inst
.error
= _("alignment must be constant");
5448 return PARSE_OPERAND_FAIL
;
5451 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5452 inst
.operands
[i
].immisalign
= 1;
5453 /* Alignments are not pre-indexes. */
5454 inst
.operands
[i
].preind
= 0;
5457 return PARSE_OPERAND_SUCCESS
;
5460 /* Parse all forms of an ARM address expression. Information is written
5461 to inst.operands[i] and/or inst.reloc.
5463 Preindexed addressing (.preind=1):
5465 [Rn, #offset] .reg=Rn .reloc.exp=offset
5466 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5467 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5468 .shift_kind=shift .reloc.exp=shift_imm
5470 These three may have a trailing ! which causes .writeback to be set also.
5472 Postindexed addressing (.postind=1, .writeback=1):
5474 [Rn], #offset .reg=Rn .reloc.exp=offset
5475 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5476 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5477 .shift_kind=shift .reloc.exp=shift_imm
5479 Unindexed addressing (.preind=0, .postind=0):
5481 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5485 [Rn]{!} shorthand for [Rn,#0]{!}
5486 =immediate .isreg=0 .reloc.exp=immediate
5487 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5489 It is the caller's responsibility to check for addressing modes not
5490 supported by the instruction, and to set inst.reloc.type. */
5492 static parse_operand_result
5493 parse_address_main (char **str
, int i
, int group_relocations
,
5494 group_reloc_type group_type
)
5499 if (skip_past_char (&p
, '[') == FAIL
)
5501 if (skip_past_char (&p
, '=') == FAIL
)
5503 /* Bare address - translate to PC-relative offset. */
5504 inst
.reloc
.pc_rel
= 1;
5505 inst
.operands
[i
].reg
= REG_PC
;
5506 inst
.operands
[i
].isreg
= 1;
5507 inst
.operands
[i
].preind
= 1;
5509 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5510 return PARSE_OPERAND_FAIL
;
5512 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5513 /*allow_symbol_p=*/TRUE
))
5514 return PARSE_OPERAND_FAIL
;
5517 return PARSE_OPERAND_SUCCESS
;
5520 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5521 skip_whitespace (p
);
5523 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5525 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5526 return PARSE_OPERAND_FAIL
;
5528 inst
.operands
[i
].reg
= reg
;
5529 inst
.operands
[i
].isreg
= 1;
5531 if (skip_past_comma (&p
) == SUCCESS
)
5533 inst
.operands
[i
].preind
= 1;
5536 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5538 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5540 inst
.operands
[i
].imm
= reg
;
5541 inst
.operands
[i
].immisreg
= 1;
5543 if (skip_past_comma (&p
) == SUCCESS
)
5544 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5545 return PARSE_OPERAND_FAIL
;
5547 else if (skip_past_char (&p
, ':') == SUCCESS
)
5549 /* FIXME: '@' should be used here, but it's filtered out by generic
5550 code before we get to see it here. This may be subject to
5552 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5554 if (result
!= PARSE_OPERAND_SUCCESS
)
5559 if (inst
.operands
[i
].negative
)
5561 inst
.operands
[i
].negative
= 0;
5565 if (group_relocations
5566 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5568 struct group_reloc_table_entry
*entry
;
5570 /* Skip over the #: or : sequence. */
5576 /* Try to parse a group relocation. Anything else is an
5578 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5580 inst
.error
= _("unknown group relocation");
5581 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5584 /* We now have the group relocation table entry corresponding to
5585 the name in the assembler source. Next, we parse the
5587 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5588 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5590 /* Record the relocation type. */
5594 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5598 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5602 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5609 if (inst
.reloc
.type
== 0)
5611 inst
.error
= _("this group relocation is not allowed on this instruction");
5612 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5618 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5619 return PARSE_OPERAND_FAIL
;
5620 /* If the offset is 0, find out if it's a +0 or -0. */
5621 if (inst
.reloc
.exp
.X_op
== O_constant
5622 && inst
.reloc
.exp
.X_add_number
== 0)
5624 skip_whitespace (q
);
5628 skip_whitespace (q
);
5631 inst
.operands
[i
].negative
= 1;
5636 else if (skip_past_char (&p
, ':') == SUCCESS
)
5638 /* FIXME: '@' should be used here, but it's filtered out by generic code
5639 before we get to see it here. This may be subject to change. */
5640 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5642 if (result
!= PARSE_OPERAND_SUCCESS
)
5646 if (skip_past_char (&p
, ']') == FAIL
)
5648 inst
.error
= _("']' expected");
5649 return PARSE_OPERAND_FAIL
;
5652 if (skip_past_char (&p
, '!') == SUCCESS
)
5653 inst
.operands
[i
].writeback
= 1;
5655 else if (skip_past_comma (&p
) == SUCCESS
)
5657 if (skip_past_char (&p
, '{') == SUCCESS
)
5659 /* [Rn], {expr} - unindexed, with option */
5660 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5661 0, 255, TRUE
) == FAIL
)
5662 return PARSE_OPERAND_FAIL
;
5664 if (skip_past_char (&p
, '}') == FAIL
)
5666 inst
.error
= _("'}' expected at end of 'option' field");
5667 return PARSE_OPERAND_FAIL
;
5669 if (inst
.operands
[i
].preind
)
5671 inst
.error
= _("cannot combine index with option");
5672 return PARSE_OPERAND_FAIL
;
5675 return PARSE_OPERAND_SUCCESS
;
5679 inst
.operands
[i
].postind
= 1;
5680 inst
.operands
[i
].writeback
= 1;
5682 if (inst
.operands
[i
].preind
)
5684 inst
.error
= _("cannot combine pre- and post-indexing");
5685 return PARSE_OPERAND_FAIL
;
5689 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5691 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5693 /* We might be using the immediate for alignment already. If we
5694 are, OR the register number into the low-order bits. */
5695 if (inst
.operands
[i
].immisalign
)
5696 inst
.operands
[i
].imm
|= reg
;
5698 inst
.operands
[i
].imm
= reg
;
5699 inst
.operands
[i
].immisreg
= 1;
5701 if (skip_past_comma (&p
) == SUCCESS
)
5702 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5703 return PARSE_OPERAND_FAIL
;
5708 if (inst
.operands
[i
].negative
)
5710 inst
.operands
[i
].negative
= 0;
5713 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5714 return PARSE_OPERAND_FAIL
;
5715 /* If the offset is 0, find out if it's a +0 or -0. */
5716 if (inst
.reloc
.exp
.X_op
== O_constant
5717 && inst
.reloc
.exp
.X_add_number
== 0)
5719 skip_whitespace (q
);
5723 skip_whitespace (q
);
5726 inst
.operands
[i
].negative
= 1;
5732 /* If at this point neither .preind nor .postind is set, we have a
5733 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5734 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5736 inst
.operands
[i
].preind
= 1;
5737 inst
.reloc
.exp
.X_op
= O_constant
;
5738 inst
.reloc
.exp
.X_add_number
= 0;
5741 return PARSE_OPERAND_SUCCESS
;
5745 parse_address (char **str
, int i
)
5747 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5751 static parse_operand_result
5752 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5754 return parse_address_main (str
, i
, 1, type
);
5757 /* Parse an operand for a MOVW or MOVT instruction. */
5759 parse_half (char **str
)
5764 skip_past_char (&p
, '#');
5765 if (strncasecmp (p
, ":lower16:", 9) == 0)
5766 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5767 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5768 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5770 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5773 skip_whitespace (p
);
5776 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5779 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5781 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5783 inst
.error
= _("constant expression expected");
5786 if (inst
.reloc
.exp
.X_add_number
< 0
5787 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5789 inst
.error
= _("immediate value out of range");
5797 /* Miscellaneous. */
5799 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5800 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5802 parse_psr (char **str
, bfd_boolean lhs
)
5805 unsigned long psr_field
;
5806 const struct asm_psr
*psr
;
5808 bfd_boolean is_apsr
= FALSE
;
5809 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5811 /* PR gas/12698: If the user has specified -march=all then m_profile will
5812 be TRUE, but we want to ignore it in this case as we are building for any
5813 CPU type, including non-m variants. */
5814 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5817 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5818 feature for ease of use and backwards compatibility. */
5820 if (strncasecmp (p
, "SPSR", 4) == 0)
5823 goto unsupported_psr
;
5825 psr_field
= SPSR_BIT
;
5827 else if (strncasecmp (p
, "CPSR", 4) == 0)
5830 goto unsupported_psr
;
5834 else if (strncasecmp (p
, "APSR", 4) == 0)
5836 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5837 and ARMv7-R architecture CPUs. */
5846 while (ISALNUM (*p
) || *p
== '_');
5848 if (strncasecmp (start
, "iapsr", 5) == 0
5849 || strncasecmp (start
, "eapsr", 5) == 0
5850 || strncasecmp (start
, "xpsr", 4) == 0
5851 || strncasecmp (start
, "psr", 3) == 0)
5852 p
= start
+ strcspn (start
, "rR") + 1;
5854 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5860 /* If APSR is being written, a bitfield may be specified. Note that
5861 APSR itself is handled above. */
5862 if (psr
->field
<= 3)
5864 psr_field
= psr
->field
;
5870 /* M-profile MSR instructions have the mask field set to "10", except
5871 *PSR variants which modify APSR, which may use a different mask (and
5872 have been handled already). Do that by setting the PSR_f field
5874 return psr
->field
| (lhs
? PSR_f
: 0);
5877 goto unsupported_psr
;
5883 /* A suffix follows. */
5889 while (ISALNUM (*p
) || *p
== '_');
5893 /* APSR uses a notation for bits, rather than fields. */
5894 unsigned int nzcvq_bits
= 0;
5895 unsigned int g_bit
= 0;
5898 for (bit
= start
; bit
!= p
; bit
++)
5900 switch (TOLOWER (*bit
))
5903 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5907 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5911 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5915 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5919 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5923 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5927 inst
.error
= _("unexpected bit specified after APSR");
5932 if (nzcvq_bits
== 0x1f)
5937 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5939 inst
.error
= _("selected processor does not "
5940 "support DSP extension");
5947 if ((nzcvq_bits
& 0x20) != 0
5948 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5949 || (g_bit
& 0x2) != 0)
5951 inst
.error
= _("bad bitmask specified after APSR");
5957 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5962 psr_field
|= psr
->field
;
5968 goto error
; /* Garbage after "[CS]PSR". */
5970 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5971 is deprecated, but allow it anyway. */
5975 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5978 else if (!m_profile
)
5979 /* These bits are never right for M-profile devices: don't set them
5980 (only code paths which read/write APSR reach here). */
5981 psr_field
|= (PSR_c
| PSR_f
);
5987 inst
.error
= _("selected processor does not support requested special "
5988 "purpose register");
5992 inst
.error
= _("flag for {c}psr instruction expected");
5996 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5997 value suitable for splatting into the AIF field of the instruction. */
6000 parse_cps_flags (char **str
)
6009 case '\0': case ',':
6012 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6013 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6014 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6017 inst
.error
= _("unrecognized CPS flag");
6022 if (saw_a_flag
== 0)
6024 inst
.error
= _("missing CPS flags");
6032 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6033 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6036 parse_endian_specifier (char **str
)
6041 if (strncasecmp (s
, "BE", 2))
6043 else if (strncasecmp (s
, "LE", 2))
6047 inst
.error
= _("valid endian specifiers are be or le");
6051 if (ISALNUM (s
[2]) || s
[2] == '_')
6053 inst
.error
= _("valid endian specifiers are be or le");
6058 return little_endian
;
6061 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6062 value suitable for poking into the rotate field of an sxt or sxta
6063 instruction, or FAIL on error. */
6066 parse_ror (char **str
)
6071 if (strncasecmp (s
, "ROR", 3) == 0)
6075 inst
.error
= _("missing rotation field after comma");
6079 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6084 case 0: *str
= s
; return 0x0;
6085 case 8: *str
= s
; return 0x1;
6086 case 16: *str
= s
; return 0x2;
6087 case 24: *str
= s
; return 0x3;
6090 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6095 /* Parse a conditional code (from conds[] below). The value returned is in the
6096 range 0 .. 14, or FAIL. */
6098 parse_cond (char **str
)
6101 const struct asm_cond
*c
;
6103 /* Condition codes are always 2 characters, so matching up to
6104 3 characters is sufficient. */
6109 while (ISALPHA (*q
) && n
< 3)
6111 cond
[n
] = TOLOWER (*q
);
6116 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6119 inst
.error
= _("condition required");
6127 /* Record a use of the given feature. */
6129 record_feature_use (const arm_feature_set
*feature
)
6132 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6134 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6137 /* If the given feature available in the selected CPU, mark it as used.
6138 Returns TRUE iff feature is available. */
6140 mark_feature_used (const arm_feature_set
*feature
)
6142 /* Ensure the option is valid on the current architecture. */
6143 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6146 /* Add the appropriate architecture feature for the barrier option used.
6148 record_feature_use (feature
);
6153 /* Parse an option for a barrier instruction. Returns the encoding for the
6156 parse_barrier (char **str
)
6159 const struct asm_barrier_opt
*o
;
6162 while (ISALPHA (*q
))
6165 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6170 if (!mark_feature_used (&o
->arch
))
6177 /* Parse the operands of a table branch instruction. Similar to a memory
6180 parse_tb (char **str
)
6185 if (skip_past_char (&p
, '[') == FAIL
)
6187 inst
.error
= _("'[' expected");
6191 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6193 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6196 inst
.operands
[0].reg
= reg
;
6198 if (skip_past_comma (&p
) == FAIL
)
6200 inst
.error
= _("',' expected");
6204 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6206 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6209 inst
.operands
[0].imm
= reg
;
6211 if (skip_past_comma (&p
) == SUCCESS
)
6213 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6215 if (inst
.reloc
.exp
.X_add_number
!= 1)
6217 inst
.error
= _("invalid shift");
6220 inst
.operands
[0].shifted
= 1;
6223 if (skip_past_char (&p
, ']') == FAIL
)
6225 inst
.error
= _("']' expected");
6232 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6233 information on the types the operands can take and how they are encoded.
6234 Up to four operands may be read; this function handles setting the
6235 ".present" field for each read operand itself.
6236 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6237 else returns FAIL. */
6240 parse_neon_mov (char **str
, int *which_operand
)
6242 int i
= *which_operand
, val
;
6243 enum arm_reg_type rtype
;
6245 struct neon_type_el optype
;
6247 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6249 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6250 inst
.operands
[i
].reg
= val
;
6251 inst
.operands
[i
].isscalar
= 1;
6252 inst
.operands
[i
].vectype
= optype
;
6253 inst
.operands
[i
++].present
= 1;
6255 if (skip_past_comma (&ptr
) == FAIL
)
6258 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6261 inst
.operands
[i
].reg
= val
;
6262 inst
.operands
[i
].isreg
= 1;
6263 inst
.operands
[i
].present
= 1;
6265 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6268 /* Cases 0, 1, 2, 3, 5 (D only). */
6269 if (skip_past_comma (&ptr
) == FAIL
)
6272 inst
.operands
[i
].reg
= val
;
6273 inst
.operands
[i
].isreg
= 1;
6274 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6275 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6276 inst
.operands
[i
].isvec
= 1;
6277 inst
.operands
[i
].vectype
= optype
;
6278 inst
.operands
[i
++].present
= 1;
6280 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6282 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6283 Case 13: VMOV <Sd>, <Rm> */
6284 inst
.operands
[i
].reg
= val
;
6285 inst
.operands
[i
].isreg
= 1;
6286 inst
.operands
[i
].present
= 1;
6288 if (rtype
== REG_TYPE_NQ
)
6290 first_error (_("can't use Neon quad register here"));
6293 else if (rtype
!= REG_TYPE_VFS
)
6296 if (skip_past_comma (&ptr
) == FAIL
)
6298 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6300 inst
.operands
[i
].reg
= val
;
6301 inst
.operands
[i
].isreg
= 1;
6302 inst
.operands
[i
].present
= 1;
6305 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6308 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6309 Case 1: VMOV<c><q> <Dd>, <Dm>
6310 Case 8: VMOV.F32 <Sd>, <Sm>
6311 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6313 inst
.operands
[i
].reg
= val
;
6314 inst
.operands
[i
].isreg
= 1;
6315 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6316 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6317 inst
.operands
[i
].isvec
= 1;
6318 inst
.operands
[i
].vectype
= optype
;
6319 inst
.operands
[i
].present
= 1;
6321 if (skip_past_comma (&ptr
) == SUCCESS
)
6326 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6329 inst
.operands
[i
].reg
= val
;
6330 inst
.operands
[i
].isreg
= 1;
6331 inst
.operands
[i
++].present
= 1;
6333 if (skip_past_comma (&ptr
) == FAIL
)
6336 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6339 inst
.operands
[i
].reg
= val
;
6340 inst
.operands
[i
].isreg
= 1;
6341 inst
.operands
[i
].present
= 1;
6344 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6345 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6346 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6347 Case 10: VMOV.F32 <Sd>, #<imm>
6348 Case 11: VMOV.F64 <Dd>, #<imm> */
6349 inst
.operands
[i
].immisfloat
= 1;
6350 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6352 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6353 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6357 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6361 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6364 inst
.operands
[i
].reg
= val
;
6365 inst
.operands
[i
].isreg
= 1;
6366 inst
.operands
[i
++].present
= 1;
6368 if (skip_past_comma (&ptr
) == FAIL
)
6371 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6373 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6374 inst
.operands
[i
].reg
= val
;
6375 inst
.operands
[i
].isscalar
= 1;
6376 inst
.operands
[i
].present
= 1;
6377 inst
.operands
[i
].vectype
= optype
;
6379 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6381 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6382 inst
.operands
[i
].reg
= val
;
6383 inst
.operands
[i
].isreg
= 1;
6384 inst
.operands
[i
++].present
= 1;
6386 if (skip_past_comma (&ptr
) == FAIL
)
6389 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6392 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6396 inst
.operands
[i
].reg
= val
;
6397 inst
.operands
[i
].isreg
= 1;
6398 inst
.operands
[i
].isvec
= 1;
6399 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6400 inst
.operands
[i
].vectype
= optype
;
6401 inst
.operands
[i
].present
= 1;
6403 if (rtype
== REG_TYPE_VFS
)
6407 if (skip_past_comma (&ptr
) == FAIL
)
6409 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6412 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6415 inst
.operands
[i
].reg
= val
;
6416 inst
.operands
[i
].isreg
= 1;
6417 inst
.operands
[i
].isvec
= 1;
6418 inst
.operands
[i
].issingle
= 1;
6419 inst
.operands
[i
].vectype
= optype
;
6420 inst
.operands
[i
].present
= 1;
6423 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6427 inst
.operands
[i
].reg
= val
;
6428 inst
.operands
[i
].isreg
= 1;
6429 inst
.operands
[i
].isvec
= 1;
6430 inst
.operands
[i
].issingle
= 1;
6431 inst
.operands
[i
].vectype
= optype
;
6432 inst
.operands
[i
].present
= 1;
6437 first_error (_("parse error"));
6441 /* Successfully parsed the operands. Update args. */
6447 first_error (_("expected comma"));
6451 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6455 /* Use this macro when the operand constraints are different
6456 for ARM and THUMB (e.g. ldrd). */
6457 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6458 ((arm_operand) | ((thumb_operand) << 16))
6460 /* Matcher codes for parse_operands. */
6461 enum operand_parse_code
6463 OP_stop
, /* end of line */
6465 OP_RR
, /* ARM register */
6466 OP_RRnpc
, /* ARM register, not r15 */
6467 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6468 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6469 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6470 optional trailing ! */
6471 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6472 OP_RCP
, /* Coprocessor number */
6473 OP_RCN
, /* Coprocessor register */
6474 OP_RF
, /* FPA register */
6475 OP_RVS
, /* VFP single precision register */
6476 OP_RVD
, /* VFP double precision register (0..15) */
6477 OP_RND
, /* Neon double precision register (0..31) */
6478 OP_RNQ
, /* Neon quad precision register */
6479 OP_RVSD
, /* VFP single or double precision register */
6480 OP_RNDQ
, /* Neon double or quad precision register */
6481 OP_RNSDQ
, /* Neon single, double or quad precision register */
6482 OP_RNSC
, /* Neon scalar D[X] */
6483 OP_RVC
, /* VFP control register */
6484 OP_RMF
, /* Maverick F register */
6485 OP_RMD
, /* Maverick D register */
6486 OP_RMFX
, /* Maverick FX register */
6487 OP_RMDX
, /* Maverick DX register */
6488 OP_RMAX
, /* Maverick AX register */
6489 OP_RMDS
, /* Maverick DSPSC register */
6490 OP_RIWR
, /* iWMMXt wR register */
6491 OP_RIWC
, /* iWMMXt wC register */
6492 OP_RIWG
, /* iWMMXt wCG register */
6493 OP_RXA
, /* XScale accumulator register */
6495 OP_REGLST
, /* ARM register list */
6496 OP_VRSLST
, /* VFP single-precision register list */
6497 OP_VRDLST
, /* VFP double-precision register list */
6498 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6499 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6500 OP_NSTRLST
, /* Neon element/structure list */
6502 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6503 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6504 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6505 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6506 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6507 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6508 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6509 OP_VMOV
, /* Neon VMOV operands. */
6510 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6511 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6512 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6514 OP_I0
, /* immediate zero */
6515 OP_I7
, /* immediate value 0 .. 7 */
6516 OP_I15
, /* 0 .. 15 */
6517 OP_I16
, /* 1 .. 16 */
6518 OP_I16z
, /* 0 .. 16 */
6519 OP_I31
, /* 0 .. 31 */
6520 OP_I31w
, /* 0 .. 31, optional trailing ! */
6521 OP_I32
, /* 1 .. 32 */
6522 OP_I32z
, /* 0 .. 32 */
6523 OP_I63
, /* 0 .. 63 */
6524 OP_I63s
, /* -64 .. 63 */
6525 OP_I64
, /* 1 .. 64 */
6526 OP_I64z
, /* 0 .. 64 */
6527 OP_I255
, /* 0 .. 255 */
6529 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6530 OP_I7b
, /* 0 .. 7 */
6531 OP_I15b
, /* 0 .. 15 */
6532 OP_I31b
, /* 0 .. 31 */
6534 OP_SH
, /* shifter operand */
6535 OP_SHG
, /* shifter operand with possible group relocation */
6536 OP_ADDR
, /* Memory address expression (any mode) */
6537 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6538 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6539 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6540 OP_EXP
, /* arbitrary expression */
6541 OP_EXPi
, /* same, with optional immediate prefix */
6542 OP_EXPr
, /* same, with optional relocation suffix */
6543 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6544 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6545 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6547 OP_CPSF
, /* CPS flags */
6548 OP_ENDI
, /* Endianness specifier */
6549 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6550 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6551 OP_COND
, /* conditional code */
6552 OP_TB
, /* Table branch. */
6554 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6556 OP_RRnpc_I0
, /* ARM register or literal 0 */
6557 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6558 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6559 OP_RF_IF
, /* FPA register or immediate */
6560 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6561 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6563 /* Optional operands. */
6564 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6565 OP_oI31b
, /* 0 .. 31 */
6566 OP_oI32b
, /* 1 .. 32 */
6567 OP_oI32z
, /* 0 .. 32 */
6568 OP_oIffffb
, /* 0 .. 65535 */
6569 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6571 OP_oRR
, /* ARM register */
6572 OP_oRRnpc
, /* ARM register, not the PC */
6573 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6574 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6575 OP_oRND
, /* Optional Neon double precision register */
6576 OP_oRNQ
, /* Optional Neon quad precision register */
6577 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6578 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6579 OP_oSHll
, /* LSL immediate */
6580 OP_oSHar
, /* ASR immediate */
6581 OP_oSHllar
, /* LSL or ASR immediate */
6582 OP_oROR
, /* ROR 0/8/16/24 */
6583 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6585 /* Some pre-defined mixed (ARM/THUMB) operands. */
6586 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6587 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6588 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6590 OP_FIRST_OPTIONAL
= OP_oI7b
6593 /* Generic instruction operand parser. This does no encoding and no
6594 semantic validation; it merely squirrels values away in the inst
6595 structure. Returns SUCCESS or FAIL depending on whether the
6596 specified grammar matched. */
6598 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6600 unsigned const int *upat
= pattern
;
6601 char *backtrack_pos
= 0;
6602 const char *backtrack_error
= 0;
6603 int i
, val
= 0, backtrack_index
= 0;
6604 enum arm_reg_type rtype
;
6605 parse_operand_result result
;
6606 unsigned int op_parse_code
;
6608 #define po_char_or_fail(chr) \
6611 if (skip_past_char (&str, chr) == FAIL) \
6616 #define po_reg_or_fail(regtype) \
6619 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6620 & inst.operands[i].vectype); \
6623 first_error (_(reg_expected_msgs[regtype])); \
6626 inst.operands[i].reg = val; \
6627 inst.operands[i].isreg = 1; \
6628 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6629 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6630 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6631 || rtype == REG_TYPE_VFD \
6632 || rtype == REG_TYPE_NQ); \
6636 #define po_reg_or_goto(regtype, label) \
6639 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6640 & inst.operands[i].vectype); \
6644 inst.operands[i].reg = val; \
6645 inst.operands[i].isreg = 1; \
6646 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6647 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6648 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6649 || rtype == REG_TYPE_VFD \
6650 || rtype == REG_TYPE_NQ); \
6654 #define po_imm_or_fail(min, max, popt) \
6657 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6659 inst.operands[i].imm = val; \
6663 #define po_scalar_or_goto(elsz, label) \
6666 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6669 inst.operands[i].reg = val; \
6670 inst.operands[i].isscalar = 1; \
6674 #define po_misc_or_fail(expr) \
6682 #define po_misc_or_fail_no_backtrack(expr) \
6686 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6687 backtrack_pos = 0; \
6688 if (result != PARSE_OPERAND_SUCCESS) \
6693 #define po_barrier_or_imm(str) \
6696 val = parse_barrier (&str); \
6697 if (val == FAIL && ! ISALPHA (*str)) \
6700 /* ISB can only take SY as an option. */ \
6701 || ((inst.instruction & 0xf0) == 0x60 \
6704 inst.error = _("invalid barrier type"); \
6705 backtrack_pos = 0; \
6711 skip_whitespace (str
);
6713 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6715 op_parse_code
= upat
[i
];
6716 if (op_parse_code
>= 1<<16)
6717 op_parse_code
= thumb
? (op_parse_code
>> 16)
6718 : (op_parse_code
& ((1<<16)-1));
6720 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6722 /* Remember where we are in case we need to backtrack. */
6723 gas_assert (!backtrack_pos
);
6724 backtrack_pos
= str
;
6725 backtrack_error
= inst
.error
;
6726 backtrack_index
= i
;
6729 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6730 po_char_or_fail (',');
6732 switch (op_parse_code
)
6740 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6741 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6742 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6743 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6744 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6745 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6747 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6749 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6751 /* Also accept generic coprocessor regs for unknown registers. */
6753 po_reg_or_fail (REG_TYPE_CN
);
6755 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6756 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6757 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6758 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6759 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6760 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6761 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6762 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6763 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6764 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6766 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6768 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6769 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6771 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6773 /* Neon scalar. Using an element size of 8 means that some invalid
6774 scalars are accepted here, so deal with those in later code. */
6775 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6779 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6782 po_imm_or_fail (0, 0, TRUE
);
6787 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6792 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6795 if (parse_ifimm_zero (&str
))
6796 inst
.operands
[i
].imm
= 0;
6800 = _("only floating point zero is allowed as immediate value");
6808 po_scalar_or_goto (8, try_rr
);
6811 po_reg_or_fail (REG_TYPE_RN
);
6817 po_scalar_or_goto (8, try_nsdq
);
6820 po_reg_or_fail (REG_TYPE_NSDQ
);
6826 po_scalar_or_goto (8, try_ndq
);
6829 po_reg_or_fail (REG_TYPE_NDQ
);
6835 po_scalar_or_goto (8, try_vfd
);
6838 po_reg_or_fail (REG_TYPE_VFD
);
6843 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6844 not careful then bad things might happen. */
6845 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6850 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6853 /* There's a possibility of getting a 64-bit immediate here, so
6854 we need special handling. */
6855 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6858 inst
.error
= _("immediate value is out of range");
6866 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6869 po_imm_or_fail (0, 63, TRUE
);
6874 po_char_or_fail ('[');
6875 po_reg_or_fail (REG_TYPE_RN
);
6876 po_char_or_fail (']');
6882 po_reg_or_fail (REG_TYPE_RN
);
6883 if (skip_past_char (&str
, '!') == SUCCESS
)
6884 inst
.operands
[i
].writeback
= 1;
6888 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6889 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6890 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6891 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6892 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6893 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6894 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6895 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6896 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6897 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6898 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6899 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6901 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6903 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6904 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6906 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6907 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6908 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6909 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6911 /* Immediate variants */
6913 po_char_or_fail ('{');
6914 po_imm_or_fail (0, 255, TRUE
);
6915 po_char_or_fail ('}');
6919 /* The expression parser chokes on a trailing !, so we have
6920 to find it first and zap it. */
6923 while (*s
&& *s
!= ',')
6928 inst
.operands
[i
].writeback
= 1;
6930 po_imm_or_fail (0, 31, TRUE
);
6938 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6943 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6948 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6950 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6952 val
= parse_reloc (&str
);
6955 inst
.error
= _("unrecognized relocation suffix");
6958 else if (val
!= BFD_RELOC_UNUSED
)
6960 inst
.operands
[i
].imm
= val
;
6961 inst
.operands
[i
].hasreloc
= 1;
6966 /* Operand for MOVW or MOVT. */
6968 po_misc_or_fail (parse_half (&str
));
6971 /* Register or expression. */
6972 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6973 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6975 /* Register or immediate. */
6976 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6977 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6979 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6981 if (!is_immediate_prefix (*str
))
6984 val
= parse_fpa_immediate (&str
);
6987 /* FPA immediates are encoded as registers 8-15.
6988 parse_fpa_immediate has already applied the offset. */
6989 inst
.operands
[i
].reg
= val
;
6990 inst
.operands
[i
].isreg
= 1;
6993 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6994 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6996 /* Two kinds of register. */
6999 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7001 || (rege
->type
!= REG_TYPE_MMXWR
7002 && rege
->type
!= REG_TYPE_MMXWC
7003 && rege
->type
!= REG_TYPE_MMXWCG
))
7005 inst
.error
= _("iWMMXt data or control register expected");
7008 inst
.operands
[i
].reg
= rege
->number
;
7009 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7015 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7017 || (rege
->type
!= REG_TYPE_MMXWC
7018 && rege
->type
!= REG_TYPE_MMXWCG
))
7020 inst
.error
= _("iWMMXt control register expected");
7023 inst
.operands
[i
].reg
= rege
->number
;
7024 inst
.operands
[i
].isreg
= 1;
7029 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7030 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7031 case OP_oROR
: val
= parse_ror (&str
); break;
7032 case OP_COND
: val
= parse_cond (&str
); break;
7033 case OP_oBARRIER_I15
:
7034 po_barrier_or_imm (str
); break;
7036 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7042 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7043 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7045 inst
.error
= _("Banked registers are not available with this "
7051 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7055 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7058 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7060 if (strncasecmp (str
, "APSR_", 5) == 0)
7067 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7068 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7069 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7070 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7071 default: found
= 16;
7075 inst
.operands
[i
].isvec
= 1;
7076 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7077 inst
.operands
[i
].reg
= REG_PC
;
7084 po_misc_or_fail (parse_tb (&str
));
7087 /* Register lists. */
7089 val
= parse_reg_list (&str
);
7092 inst
.operands
[i
].writeback
= 1;
7098 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7102 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7106 /* Allow Q registers too. */
7107 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7112 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7114 inst
.operands
[i
].issingle
= 1;
7119 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7124 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7125 &inst
.operands
[i
].vectype
);
7128 /* Addressing modes */
7130 po_misc_or_fail (parse_address (&str
, i
));
7134 po_misc_or_fail_no_backtrack (
7135 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7139 po_misc_or_fail_no_backtrack (
7140 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7144 po_misc_or_fail_no_backtrack (
7145 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7149 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7153 po_misc_or_fail_no_backtrack (
7154 parse_shifter_operand_group_reloc (&str
, i
));
7158 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7162 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7166 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7170 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7173 /* Various value-based sanity checks and shared operations. We
7174 do not signal immediate failures for the register constraints;
7175 this allows a syntax error to take precedence. */
7176 switch (op_parse_code
)
7184 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7185 inst
.error
= BAD_PC
;
7190 if (inst
.operands
[i
].isreg
)
7192 if (inst
.operands
[i
].reg
== REG_PC
)
7193 inst
.error
= BAD_PC
;
7194 else if (inst
.operands
[i
].reg
== REG_SP
7195 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7196 relaxed since ARMv8-A. */
7197 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7200 inst
.error
= BAD_SP
;
7206 if (inst
.operands
[i
].isreg
7207 && inst
.operands
[i
].reg
== REG_PC
7208 && (inst
.operands
[i
].writeback
|| thumb
))
7209 inst
.error
= BAD_PC
;
7218 case OP_oBARRIER_I15
:
7227 inst
.operands
[i
].imm
= val
;
7234 /* If we get here, this operand was successfully parsed. */
7235 inst
.operands
[i
].present
= 1;
7239 inst
.error
= BAD_ARGS
;
7244 /* The parse routine should already have set inst.error, but set a
7245 default here just in case. */
7247 inst
.error
= _("syntax error");
7251 /* Do not backtrack over a trailing optional argument that
7252 absorbed some text. We will only fail again, with the
7253 'garbage following instruction' error message, which is
7254 probably less helpful than the current one. */
7255 if (backtrack_index
== i
&& backtrack_pos
!= str
7256 && upat
[i
+1] == OP_stop
)
7259 inst
.error
= _("syntax error");
7263 /* Try again, skipping the optional argument at backtrack_pos. */
7264 str
= backtrack_pos
;
7265 inst
.error
= backtrack_error
;
7266 inst
.operands
[backtrack_index
].present
= 0;
7267 i
= backtrack_index
;
7271 /* Check that we have parsed all the arguments. */
7272 if (*str
!= '\0' && !inst
.error
)
7273 inst
.error
= _("garbage following instruction");
7275 return inst
.error
? FAIL
: SUCCESS
;
7278 #undef po_char_or_fail
7279 #undef po_reg_or_fail
7280 #undef po_reg_or_goto
7281 #undef po_imm_or_fail
7282 #undef po_scalar_or_fail
7283 #undef po_barrier_or_imm
7285 /* Shorthand macro for instruction encoding functions issuing errors. */
7286 #define constraint(expr, err) \
7297 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7298 instructions are unpredictable if these registers are used. This
7299 is the BadReg predicate in ARM's Thumb-2 documentation.
7301 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7302 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7303 #define reject_bad_reg(reg) \
7305 if (reg == REG_PC) \
7307 inst.error = BAD_PC; \
7310 else if (reg == REG_SP \
7311 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7313 inst.error = BAD_SP; \
7318 /* If REG is R13 (the stack pointer), warn that its use is
7320 #define warn_deprecated_sp(reg) \
7322 if (warn_on_deprecated && reg == REG_SP) \
7323 as_tsktsk (_("use of r13 is deprecated")); \
7326 /* Functions for operand encoding. ARM, then Thumb. */
7328 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7330 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7332 The only binary encoding difference is the Coprocessor number. Coprocessor
7333 9 is used for half-precision calculations or conversions. The format of the
7334 instruction is the same as the equivalent Coprocessor 10 instruction that
7335 exists for Single-Precision operation. */
7338 do_scalar_fp16_v82_encode (void)
7340 if (inst
.cond
!= COND_ALWAYS
)
7341 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7342 " the behaviour is UNPREDICTABLE"));
7343 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7346 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7347 mark_feature_used (&arm_ext_fp16
);
7350 /* If VAL can be encoded in the immediate field of an ARM instruction,
7351 return the encoded form. Otherwise, return FAIL. */
7354 encode_arm_immediate (unsigned int val
)
7361 for (i
= 2; i
< 32; i
+= 2)
7362 if ((a
= rotate_left (val
, i
)) <= 0xff)
7363 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7368 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7369 return the encoded form. Otherwise, return FAIL. */
7371 encode_thumb32_immediate (unsigned int val
)
7378 for (i
= 1; i
<= 24; i
++)
7381 if ((val
& ~(0xff << i
)) == 0)
7382 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7386 if (val
== ((a
<< 16) | a
))
7388 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7392 if (val
== ((a
<< 16) | a
))
7393 return 0x200 | (a
>> 8);
7397 /* Encode a VFP SP or DP register number into inst.instruction. */
7400 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7402 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7405 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7408 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7411 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7416 first_error (_("D register out of range for selected VFP version"));
7424 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7428 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7432 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7436 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7440 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7444 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7452 /* Encode a <shift> in an ARM-format instruction. The immediate,
7453 if any, is handled by md_apply_fix. */
7455 encode_arm_shift (int i
)
7457 /* register-shifted register. */
7458 if (inst
.operands
[i
].immisreg
)
7461 for (op_index
= 0; op_index
<= i
; ++op_index
)
7463 /* Check the operand only when it's presented. In pre-UAL syntax,
7464 if the destination register is the same as the first operand, two
7465 register form of the instruction can be used. */
7466 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7467 && inst
.operands
[op_index
].reg
== REG_PC
)
7468 as_warn (UNPRED_REG ("r15"));
7471 if (inst
.operands
[i
].imm
== REG_PC
)
7472 as_warn (UNPRED_REG ("r15"));
7475 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7476 inst
.instruction
|= SHIFT_ROR
<< 5;
7479 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7480 if (inst
.operands
[i
].immisreg
)
7482 inst
.instruction
|= SHIFT_BY_REG
;
7483 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7486 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7491 encode_arm_shifter_operand (int i
)
7493 if (inst
.operands
[i
].isreg
)
7495 inst
.instruction
|= inst
.operands
[i
].reg
;
7496 encode_arm_shift (i
);
7500 inst
.instruction
|= INST_IMMEDIATE
;
7501 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7502 inst
.instruction
|= inst
.operands
[i
].imm
;
7506 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7508 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7511 Generate an error if the operand is not a register. */
7512 constraint (!inst
.operands
[i
].isreg
,
7513 _("Instruction does not support =N addresses"));
7515 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7517 if (inst
.operands
[i
].preind
)
7521 inst
.error
= _("instruction does not accept preindexed addressing");
7524 inst
.instruction
|= PRE_INDEX
;
7525 if (inst
.operands
[i
].writeback
)
7526 inst
.instruction
|= WRITE_BACK
;
7529 else if (inst
.operands
[i
].postind
)
7531 gas_assert (inst
.operands
[i
].writeback
);
7533 inst
.instruction
|= WRITE_BACK
;
7535 else /* unindexed - only for coprocessor */
7537 inst
.error
= _("instruction does not accept unindexed addressing");
7541 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7542 && (((inst
.instruction
& 0x000f0000) >> 16)
7543 == ((inst
.instruction
& 0x0000f000) >> 12)))
7544 as_warn ((inst
.instruction
& LOAD_BIT
)
7545 ? _("destination register same as write-back base")
7546 : _("source register same as write-back base"));
7549 /* inst.operands[i] was set up by parse_address. Encode it into an
7550 ARM-format mode 2 load or store instruction. If is_t is true,
7551 reject forms that cannot be used with a T instruction (i.e. not
7554 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7556 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7558 encode_arm_addr_mode_common (i
, is_t
);
7560 if (inst
.operands
[i
].immisreg
)
7562 constraint ((inst
.operands
[i
].imm
== REG_PC
7563 || (is_pc
&& inst
.operands
[i
].writeback
)),
7565 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7566 inst
.instruction
|= inst
.operands
[i
].imm
;
7567 if (!inst
.operands
[i
].negative
)
7568 inst
.instruction
|= INDEX_UP
;
7569 if (inst
.operands
[i
].shifted
)
7571 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7572 inst
.instruction
|= SHIFT_ROR
<< 5;
7575 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7576 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7580 else /* immediate offset in inst.reloc */
7582 if (is_pc
&& !inst
.reloc
.pc_rel
)
7584 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7586 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7587 cannot use PC in addressing.
7588 PC cannot be used in writeback addressing, either. */
7589 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7592 /* Use of PC in str is deprecated for ARMv7. */
7593 if (warn_on_deprecated
7595 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7596 as_tsktsk (_("use of PC in this instruction is deprecated"));
7599 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7601 /* Prefer + for zero encoded value. */
7602 if (!inst
.operands
[i
].negative
)
7603 inst
.instruction
|= INDEX_UP
;
7604 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7609 /* inst.operands[i] was set up by parse_address. Encode it into an
7610 ARM-format mode 3 load or store instruction. Reject forms that
7611 cannot be used with such instructions. If is_t is true, reject
7612 forms that cannot be used with a T instruction (i.e. not
7615 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7617 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7619 inst
.error
= _("instruction does not accept scaled register index");
7623 encode_arm_addr_mode_common (i
, is_t
);
7625 if (inst
.operands
[i
].immisreg
)
7627 constraint ((inst
.operands
[i
].imm
== REG_PC
7628 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7630 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7632 inst
.instruction
|= inst
.operands
[i
].imm
;
7633 if (!inst
.operands
[i
].negative
)
7634 inst
.instruction
|= INDEX_UP
;
7636 else /* immediate offset in inst.reloc */
7638 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7639 && inst
.operands
[i
].writeback
),
7641 inst
.instruction
|= HWOFFSET_IMM
;
7642 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7644 /* Prefer + for zero encoded value. */
7645 if (!inst
.operands
[i
].negative
)
7646 inst
.instruction
|= INDEX_UP
;
7648 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7653 /* Write immediate bits [7:0] to the following locations:
7655 |28/24|23 19|18 16|15 4|3 0|
7656 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7658 This function is used by VMOV/VMVN/VORR/VBIC. */
7661 neon_write_immbits (unsigned immbits
)
7663 inst
.instruction
|= immbits
& 0xf;
7664 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7665 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7668 /* Invert low-order SIZE bits of XHI:XLO. */
7671 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7673 unsigned immlo
= xlo
? *xlo
: 0;
7674 unsigned immhi
= xhi
? *xhi
: 0;
7679 immlo
= (~immlo
) & 0xff;
7683 immlo
= (~immlo
) & 0xffff;
7687 immhi
= (~immhi
) & 0xffffffff;
7691 immlo
= (~immlo
) & 0xffffffff;
7705 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7709 neon_bits_same_in_bytes (unsigned imm
)
7711 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7712 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7713 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7714 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7717 /* For immediate of above form, return 0bABCD. */
7720 neon_squash_bits (unsigned imm
)
7722 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7723 | ((imm
& 0x01000000) >> 21);
7726 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7729 neon_qfloat_bits (unsigned imm
)
7731 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7734 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7735 the instruction. *OP is passed as the initial value of the op field, and
7736 may be set to a different value depending on the constant (i.e.
7737 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7738 MVN). If the immediate looks like a repeated pattern then also
7739 try smaller element sizes. */
7742 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7743 unsigned *immbits
, int *op
, int size
,
7744 enum neon_el_type type
)
7746 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7748 if (type
== NT_float
&& !float_p
)
7751 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7753 if (size
!= 32 || *op
== 1)
7755 *immbits
= neon_qfloat_bits (immlo
);
7761 if (neon_bits_same_in_bytes (immhi
)
7762 && neon_bits_same_in_bytes (immlo
))
7766 *immbits
= (neon_squash_bits (immhi
) << 4)
7767 | neon_squash_bits (immlo
);
7778 if (immlo
== (immlo
& 0x000000ff))
7783 else if (immlo
== (immlo
& 0x0000ff00))
7785 *immbits
= immlo
>> 8;
7788 else if (immlo
== (immlo
& 0x00ff0000))
7790 *immbits
= immlo
>> 16;
7793 else if (immlo
== (immlo
& 0xff000000))
7795 *immbits
= immlo
>> 24;
7798 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7800 *immbits
= (immlo
>> 8) & 0xff;
7803 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7805 *immbits
= (immlo
>> 16) & 0xff;
7809 if ((immlo
& 0xffff) != (immlo
>> 16))
7816 if (immlo
== (immlo
& 0x000000ff))
7821 else if (immlo
== (immlo
& 0x0000ff00))
7823 *immbits
= immlo
>> 8;
7827 if ((immlo
& 0xff) != (immlo
>> 8))
7832 if (immlo
== (immlo
& 0x000000ff))
7834 /* Don't allow MVN with 8-bit immediate. */
7844 #if defined BFD_HOST_64_BIT
7845 /* Returns TRUE if double precision value V may be cast
7846 to single precision without loss of accuracy. */
7849 is_double_a_single (bfd_int64_t v
)
7851 int exp
= (int)((v
>> 52) & 0x7FF);
7852 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7854 return (exp
== 0 || exp
== 0x7FF
7855 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7856 && (mantissa
& 0x1FFFFFFFl
) == 0;
7859 /* Returns a double precision value casted to single precision
7860 (ignoring the least significant bits in exponent and mantissa). */
7863 double_to_single (bfd_int64_t v
)
7865 int sign
= (int) ((v
>> 63) & 1l);
7866 int exp
= (int) ((v
>> 52) & 0x7FF);
7867 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7873 exp
= exp
- 1023 + 127;
7882 /* No denormalized numbers. */
7888 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7890 #endif /* BFD_HOST_64_BIT */
7899 static void do_vfp_nsyn_opcode (const char *);
7901 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7902 Determine whether it can be performed with a move instruction; if
7903 it can, convert inst.instruction to that move instruction and
7904 return TRUE; if it can't, convert inst.instruction to a literal-pool
7905 load and return FALSE. If this is not a valid thing to do in the
7906 current context, set inst.error and return TRUE.
7908 inst.operands[i] describes the destination register. */
7911 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7914 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7915 bfd_boolean arm_p
= (t
== CONST_ARM
);
7918 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7922 if ((inst
.instruction
& tbit
) == 0)
7924 inst
.error
= _("invalid pseudo operation");
7928 if (inst
.reloc
.exp
.X_op
!= O_constant
7929 && inst
.reloc
.exp
.X_op
!= O_symbol
7930 && inst
.reloc
.exp
.X_op
!= O_big
)
7932 inst
.error
= _("constant expression expected");
7936 if (inst
.reloc
.exp
.X_op
== O_constant
7937 || inst
.reloc
.exp
.X_op
== O_big
)
7939 #if defined BFD_HOST_64_BIT
7944 if (inst
.reloc
.exp
.X_op
== O_big
)
7946 LITTLENUM_TYPE w
[X_PRECISION
];
7949 if (inst
.reloc
.exp
.X_add_number
== -1)
7951 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7953 /* FIXME: Should we check words w[2..5] ? */
7958 #if defined BFD_HOST_64_BIT
7960 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7961 << LITTLENUM_NUMBER_OF_BITS
)
7962 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7963 << LITTLENUM_NUMBER_OF_BITS
)
7964 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7965 << LITTLENUM_NUMBER_OF_BITS
)
7966 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7968 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7969 | (l
[0] & LITTLENUM_MASK
);
7973 v
= inst
.reloc
.exp
.X_add_number
;
7975 if (!inst
.operands
[i
].issingle
)
7979 /* LDR should not use lead in a flag-setting instruction being
7980 chosen so we do not check whether movs can be used. */
7982 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7983 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7984 && inst
.operands
[i
].reg
!= 13
7985 && inst
.operands
[i
].reg
!= 15)
7987 /* Check if on thumb2 it can be done with a mov.w, mvn or
7988 movw instruction. */
7989 unsigned int newimm
;
7990 bfd_boolean isNegated
;
7992 newimm
= encode_thumb32_immediate (v
);
7993 if (newimm
!= (unsigned int) FAIL
)
7997 newimm
= encode_thumb32_immediate (~v
);
7998 if (newimm
!= (unsigned int) FAIL
)
8002 /* The number can be loaded with a mov.w or mvn
8004 if (newimm
!= (unsigned int) FAIL
8005 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8007 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8008 | (inst
.operands
[i
].reg
<< 8));
8009 /* Change to MOVN. */
8010 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8011 inst
.instruction
|= (newimm
& 0x800) << 15;
8012 inst
.instruction
|= (newimm
& 0x700) << 4;
8013 inst
.instruction
|= (newimm
& 0x0ff);
8016 /* The number can be loaded with a movw instruction. */
8017 else if ((v
& ~0xFFFF) == 0
8018 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8020 int imm
= v
& 0xFFFF;
8022 inst
.instruction
= 0xf2400000; /* MOVW. */
8023 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8024 inst
.instruction
|= (imm
& 0xf000) << 4;
8025 inst
.instruction
|= (imm
& 0x0800) << 15;
8026 inst
.instruction
|= (imm
& 0x0700) << 4;
8027 inst
.instruction
|= (imm
& 0x00ff);
8034 int value
= encode_arm_immediate (v
);
8038 /* This can be done with a mov instruction. */
8039 inst
.instruction
&= LITERAL_MASK
;
8040 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8041 inst
.instruction
|= value
& 0xfff;
8045 value
= encode_arm_immediate (~ v
);
8048 /* This can be done with a mvn instruction. */
8049 inst
.instruction
&= LITERAL_MASK
;
8050 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8051 inst
.instruction
|= value
& 0xfff;
8055 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8058 unsigned immbits
= 0;
8059 unsigned immlo
= inst
.operands
[1].imm
;
8060 unsigned immhi
= inst
.operands
[1].regisimm
8061 ? inst
.operands
[1].reg
8062 : inst
.reloc
.exp
.X_unsigned
8064 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8065 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8066 &op
, 64, NT_invtype
);
8070 neon_invert_size (&immlo
, &immhi
, 64);
8072 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8073 &op
, 64, NT_invtype
);
8078 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8084 /* Fill other bits in vmov encoding for both thumb and arm. */
8086 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8088 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8089 neon_write_immbits (immbits
);
8097 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8098 if (inst
.operands
[i
].issingle
8099 && is_quarter_float (inst
.operands
[1].imm
)
8100 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8102 inst
.operands
[1].imm
=
8103 neon_qfloat_bits (v
);
8104 do_vfp_nsyn_opcode ("fconsts");
8108 /* If our host does not support a 64-bit type then we cannot perform
8109 the following optimization. This mean that there will be a
8110 discrepancy between the output produced by an assembler built for
8111 a 32-bit-only host and the output produced from a 64-bit host, but
8112 this cannot be helped. */
8113 #if defined BFD_HOST_64_BIT
8114 else if (!inst
.operands
[1].issingle
8115 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8117 if (is_double_a_single (v
)
8118 && is_quarter_float (double_to_single (v
)))
8120 inst
.operands
[1].imm
=
8121 neon_qfloat_bits (double_to_single (v
));
8122 do_vfp_nsyn_opcode ("fconstd");
8130 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8131 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8134 inst
.operands
[1].reg
= REG_PC
;
8135 inst
.operands
[1].isreg
= 1;
8136 inst
.operands
[1].preind
= 1;
8137 inst
.reloc
.pc_rel
= 1;
8138 inst
.reloc
.type
= (thumb_p
8139 ? BFD_RELOC_ARM_THUMB_OFFSET
8141 ? BFD_RELOC_ARM_HWLITERAL
8142 : BFD_RELOC_ARM_LITERAL
));
8146 /* inst.operands[i] was set up by parse_address. Encode it into an
8147 ARM-format instruction. Reject all forms which cannot be encoded
8148 into a coprocessor load/store instruction. If wb_ok is false,
8149 reject use of writeback; if unind_ok is false, reject use of
8150 unindexed addressing. If reloc_override is not 0, use it instead
8151 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8152 (in which case it is preserved). */
8155 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8157 if (!inst
.operands
[i
].isreg
)
8160 if (! inst
.operands
[0].isvec
)
8162 inst
.error
= _("invalid co-processor operand");
8165 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8169 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8171 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8173 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8175 gas_assert (!inst
.operands
[i
].writeback
);
8178 inst
.error
= _("instruction does not support unindexed addressing");
8181 inst
.instruction
|= inst
.operands
[i
].imm
;
8182 inst
.instruction
|= INDEX_UP
;
8186 if (inst
.operands
[i
].preind
)
8187 inst
.instruction
|= PRE_INDEX
;
8189 if (inst
.operands
[i
].writeback
)
8191 if (inst
.operands
[i
].reg
== REG_PC
)
8193 inst
.error
= _("pc may not be used with write-back");
8198 inst
.error
= _("instruction does not support writeback");
8201 inst
.instruction
|= WRITE_BACK
;
8205 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8206 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8207 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8208 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8211 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8213 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8216 /* Prefer + for zero encoded value. */
8217 if (!inst
.operands
[i
].negative
)
8218 inst
.instruction
|= INDEX_UP
;
8223 /* Functions for instruction encoding, sorted by sub-architecture.
8224 First some generics; their names are taken from the conventional
8225 bit positions for register arguments in ARM format instructions. */
8235 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8241 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8247 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8248 inst
.instruction
|= inst
.operands
[1].reg
;
8254 inst
.instruction
|= inst
.operands
[0].reg
;
8255 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8261 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8262 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8268 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8269 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8275 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8276 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8280 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8282 if (ARM_CPU_IS_ANY (cpu_variant
))
8284 as_tsktsk ("%s", msg
);
8287 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8299 unsigned Rn
= inst
.operands
[2].reg
;
8300 /* Enforce restrictions on SWP instruction. */
8301 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8303 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8304 _("Rn must not overlap other operands"));
8306 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8308 if (!check_obsolete (&arm_ext_v8
,
8309 _("swp{b} use is obsoleted for ARMv8 and later"))
8310 && warn_on_deprecated
8311 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8312 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8315 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8316 inst
.instruction
|= inst
.operands
[1].reg
;
8317 inst
.instruction
|= Rn
<< 16;
8323 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8324 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8325 inst
.instruction
|= inst
.operands
[2].reg
;
8331 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8332 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8333 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8334 || inst
.reloc
.exp
.X_add_number
!= 0),
8336 inst
.instruction
|= inst
.operands
[0].reg
;
8337 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8338 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8344 inst
.instruction
|= inst
.operands
[0].imm
;
8350 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8351 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8354 /* ARM instructions, in alphabetical order by function name (except
8355 that wrapper functions appear immediately after the function they
8358 /* This is a pseudo-op of the form "adr rd, label" to be converted
8359 into a relative address of the form "add rd, pc, #label-.-8". */
8364 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8366 /* Frag hacking will turn this into a sub instruction if the offset turns
8367 out to be negative. */
8368 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8369 inst
.reloc
.pc_rel
= 1;
8370 inst
.reloc
.exp
.X_add_number
-= 8;
8372 if (inst
.reloc
.exp
.X_op
== O_symbol
8373 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8374 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8375 && THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8376 inst
.reloc
.exp
.X_add_number
+= 1;
8379 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8380 into a relative address of the form:
8381 add rd, pc, #low(label-.-8)"
8382 add rd, rd, #high(label-.-8)" */
8387 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8389 /* Frag hacking will turn this into a sub instruction if the offset turns
8390 out to be negative. */
8391 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8392 inst
.reloc
.pc_rel
= 1;
8393 inst
.size
= INSN_SIZE
* 2;
8394 inst
.reloc
.exp
.X_add_number
-= 8;
8396 if (inst
.reloc
.exp
.X_op
== O_symbol
8397 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8398 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8399 && THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8400 inst
.reloc
.exp
.X_add_number
+= 1;
8406 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8407 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8409 if (!inst
.operands
[1].present
)
8410 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8411 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8412 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8413 encode_arm_shifter_operand (2);
8419 if (inst
.operands
[0].present
)
8420 inst
.instruction
|= inst
.operands
[0].imm
;
8422 inst
.instruction
|= 0xf;
8428 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8429 constraint (msb
> 32, _("bit-field extends past end of register"));
8430 /* The instruction encoding stores the LSB and MSB,
8431 not the LSB and width. */
8432 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8433 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8434 inst
.instruction
|= (msb
- 1) << 16;
8442 /* #0 in second position is alternative syntax for bfc, which is
8443 the same instruction but with REG_PC in the Rm field. */
8444 if (!inst
.operands
[1].isreg
)
8445 inst
.operands
[1].reg
= REG_PC
;
8447 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8448 constraint (msb
> 32, _("bit-field extends past end of register"));
8449 /* The instruction encoding stores the LSB and MSB,
8450 not the LSB and width. */
8451 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8452 inst
.instruction
|= inst
.operands
[1].reg
;
8453 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8454 inst
.instruction
|= (msb
- 1) << 16;
8460 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8461 _("bit-field extends past end of register"));
8462 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8463 inst
.instruction
|= inst
.operands
[1].reg
;
8464 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8465 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8468 /* ARM V5 breakpoint instruction (argument parse)
8469 BKPT <16 bit unsigned immediate>
8470 Instruction is not conditional.
8471 The bit pattern given in insns[] has the COND_ALWAYS condition,
8472 and it is an error if the caller tried to override that. */
8477 /* Top 12 of 16 bits to bits 19:8. */
8478 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8480 /* Bottom 4 of 16 bits to bits 3:0. */
8481 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8485 encode_branch (int default_reloc
)
8487 if (inst
.operands
[0].hasreloc
)
8489 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8490 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8491 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8492 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8493 ? BFD_RELOC_ARM_PLT32
8494 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8497 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8498 inst
.reloc
.pc_rel
= 1;
8505 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8506 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8509 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8516 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8518 if (inst
.cond
== COND_ALWAYS
)
8519 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8521 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8525 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8528 /* ARM V5 branch-link-exchange instruction (argument parse)
8529 BLX <target_addr> ie BLX(1)
8530 BLX{<condition>} <Rm> ie BLX(2)
8531 Unfortunately, there are two different opcodes for this mnemonic.
8532 So, the insns[].value is not used, and the code here zaps values
8533 into inst.instruction.
8534 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8539 if (inst
.operands
[0].isreg
)
8541 /* Arg is a register; the opcode provided by insns[] is correct.
8542 It is not illegal to do "blx pc", just useless. */
8543 if (inst
.operands
[0].reg
== REG_PC
)
8544 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8546 inst
.instruction
|= inst
.operands
[0].reg
;
8550 /* Arg is an address; this instruction cannot be executed
8551 conditionally, and the opcode must be adjusted.
8552 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8553 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8554 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8555 inst
.instruction
= 0xfa000000;
8556 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8563 bfd_boolean want_reloc
;
8565 if (inst
.operands
[0].reg
== REG_PC
)
8566 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8568 inst
.instruction
|= inst
.operands
[0].reg
;
8569 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8570 it is for ARMv4t or earlier. */
8571 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8572 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8576 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8581 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8585 /* ARM v5TEJ. Jump to Jazelle code. */
8590 if (inst
.operands
[0].reg
== REG_PC
)
8591 as_tsktsk (_("use of r15 in bxj is not really useful"));
8593 inst
.instruction
|= inst
.operands
[0].reg
;
8596 /* Co-processor data operation:
8597 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8598 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8602 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8603 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8604 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8605 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8606 inst
.instruction
|= inst
.operands
[4].reg
;
8607 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8613 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8614 encode_arm_shifter_operand (1);
8617 /* Transfer between coprocessor and ARM registers.
8618 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8623 No special properties. */
8625 struct deprecated_coproc_regs_s
8632 arm_feature_set deprecated
;
8633 arm_feature_set obsoleted
;
8634 const char *dep_msg
;
8635 const char *obs_msg
;
8638 #define DEPR_ACCESS_V8 \
8639 N_("This coprocessor register access is deprecated in ARMv8")
8641 /* Table of all deprecated coprocessor registers. */
8642 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8644 {15, 0, 7, 10, 5, /* CP15DMB. */
8645 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8646 DEPR_ACCESS_V8
, NULL
},
8647 {15, 0, 7, 10, 4, /* CP15DSB. */
8648 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8649 DEPR_ACCESS_V8
, NULL
},
8650 {15, 0, 7, 5, 4, /* CP15ISB. */
8651 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8652 DEPR_ACCESS_V8
, NULL
},
8653 {14, 6, 1, 0, 0, /* TEEHBR. */
8654 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8655 DEPR_ACCESS_V8
, NULL
},
8656 {14, 6, 0, 0, 0, /* TEECR. */
8657 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8658 DEPR_ACCESS_V8
, NULL
},
8661 #undef DEPR_ACCESS_V8
8663 static const size_t deprecated_coproc_reg_count
=
8664 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8672 Rd
= inst
.operands
[2].reg
;
8675 if (inst
.instruction
== 0xee000010
8676 || inst
.instruction
== 0xfe000010)
8678 reject_bad_reg (Rd
);
8679 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
8681 constraint (Rd
== REG_SP
, BAD_SP
);
8686 if (inst
.instruction
== 0xe000010)
8687 constraint (Rd
== REG_PC
, BAD_PC
);
8690 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8692 const struct deprecated_coproc_regs_s
*r
=
8693 deprecated_coproc_regs
+ i
;
8695 if (inst
.operands
[0].reg
== r
->cp
8696 && inst
.operands
[1].imm
== r
->opc1
8697 && inst
.operands
[3].reg
== r
->crn
8698 && inst
.operands
[4].reg
== r
->crm
8699 && inst
.operands
[5].imm
== r
->opc2
)
8701 if (! ARM_CPU_IS_ANY (cpu_variant
)
8702 && warn_on_deprecated
8703 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8704 as_tsktsk ("%s", r
->dep_msg
);
8708 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8709 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8710 inst
.instruction
|= Rd
<< 12;
8711 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8712 inst
.instruction
|= inst
.operands
[4].reg
;
8713 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8716 /* Transfer between coprocessor register and pair of ARM registers.
8717 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8722 Two XScale instructions are special cases of these:
8724 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8725 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8727 Result unpredictable if Rd or Rn is R15. */
8734 Rd
= inst
.operands
[2].reg
;
8735 Rn
= inst
.operands
[3].reg
;
8739 reject_bad_reg (Rd
);
8740 reject_bad_reg (Rn
);
8744 constraint (Rd
== REG_PC
, BAD_PC
);
8745 constraint (Rn
== REG_PC
, BAD_PC
);
8748 /* Only check the MRRC{2} variants. */
8749 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
8751 /* If Rd == Rn, error that the operation is
8752 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8753 constraint (Rd
== Rn
, BAD_OVERLAP
);
8756 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8757 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8758 inst
.instruction
|= Rd
<< 12;
8759 inst
.instruction
|= Rn
<< 16;
8760 inst
.instruction
|= inst
.operands
[4].reg
;
8766 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8767 if (inst
.operands
[1].present
)
8769 inst
.instruction
|= CPSI_MMOD
;
8770 inst
.instruction
|= inst
.operands
[1].imm
;
8777 inst
.instruction
|= inst
.operands
[0].imm
;
8783 unsigned Rd
, Rn
, Rm
;
8785 Rd
= inst
.operands
[0].reg
;
8786 Rn
= (inst
.operands
[1].present
8787 ? inst
.operands
[1].reg
: Rd
);
8788 Rm
= inst
.operands
[2].reg
;
8790 constraint ((Rd
== REG_PC
), BAD_PC
);
8791 constraint ((Rn
== REG_PC
), BAD_PC
);
8792 constraint ((Rm
== REG_PC
), BAD_PC
);
8794 inst
.instruction
|= Rd
<< 16;
8795 inst
.instruction
|= Rn
<< 0;
8796 inst
.instruction
|= Rm
<< 8;
8802 /* There is no IT instruction in ARM mode. We
8803 process it to do the validation as if in
8804 thumb mode, just in case the code gets
8805 assembled for thumb using the unified syntax. */
8810 set_it_insn_type (IT_INSN
);
8811 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8812 now_it
.cc
= inst
.operands
[0].imm
;
8816 /* If there is only one register in the register list,
8817 then return its register number. Otherwise return -1. */
8819 only_one_reg_in_list (int range
)
8821 int i
= ffs (range
) - 1;
8822 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8826 encode_ldmstm(int from_push_pop_mnem
)
8828 int base_reg
= inst
.operands
[0].reg
;
8829 int range
= inst
.operands
[1].imm
;
8832 inst
.instruction
|= base_reg
<< 16;
8833 inst
.instruction
|= range
;
8835 if (inst
.operands
[1].writeback
)
8836 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8838 if (inst
.operands
[0].writeback
)
8840 inst
.instruction
|= WRITE_BACK
;
8841 /* Check for unpredictable uses of writeback. */
8842 if (inst
.instruction
& LOAD_BIT
)
8844 /* Not allowed in LDM type 2. */
8845 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8846 && ((range
& (1 << REG_PC
)) == 0))
8847 as_warn (_("writeback of base register is UNPREDICTABLE"));
8848 /* Only allowed if base reg not in list for other types. */
8849 else if (range
& (1 << base_reg
))
8850 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8854 /* Not allowed for type 2. */
8855 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8856 as_warn (_("writeback of base register is UNPREDICTABLE"));
8857 /* Only allowed if base reg not in list, or first in list. */
8858 else if ((range
& (1 << base_reg
))
8859 && (range
& ((1 << base_reg
) - 1)))
8860 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8864 /* If PUSH/POP has only one register, then use the A2 encoding. */
8865 one_reg
= only_one_reg_in_list (range
);
8866 if (from_push_pop_mnem
&& one_reg
>= 0)
8868 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8870 inst
.instruction
&= A_COND_MASK
;
8871 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8872 inst
.instruction
|= one_reg
<< 12;
8879 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8882 /* ARMv5TE load-consecutive (argument parse)
8891 constraint (inst
.operands
[0].reg
% 2 != 0,
8892 _("first transfer register must be even"));
8893 constraint (inst
.operands
[1].present
8894 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8895 _("can only transfer two consecutive registers"));
8896 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8897 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8899 if (!inst
.operands
[1].present
)
8900 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8902 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8903 register and the first register written; we have to diagnose
8904 overlap between the base and the second register written here. */
8906 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8907 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8908 as_warn (_("base register written back, and overlaps "
8909 "second transfer register"));
8911 if (!(inst
.instruction
& V4_STR_BIT
))
8913 /* For an index-register load, the index register must not overlap the
8914 destination (even if not write-back). */
8915 if (inst
.operands
[2].immisreg
8916 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8917 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8918 as_warn (_("index register overlaps transfer register"));
8920 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8921 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8927 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8928 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8929 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8930 || inst
.operands
[1].negative
8931 /* This can arise if the programmer has written
8933 or if they have mistakenly used a register name as the last
8936 It is very difficult to distinguish between these two cases
8937 because "rX" might actually be a label. ie the register
8938 name has been occluded by a symbol of the same name. So we
8939 just generate a general 'bad addressing mode' type error
8940 message and leave it up to the programmer to discover the
8941 true cause and fix their mistake. */
8942 || (inst
.operands
[1].reg
== REG_PC
),
8945 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8946 || inst
.reloc
.exp
.X_add_number
!= 0,
8947 _("offset must be zero in ARM encoding"));
8949 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8951 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8952 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8953 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8959 constraint (inst
.operands
[0].reg
% 2 != 0,
8960 _("even register required"));
8961 constraint (inst
.operands
[1].present
8962 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8963 _("can only load two consecutive registers"));
8964 /* If op 1 were present and equal to PC, this function wouldn't
8965 have been called in the first place. */
8966 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8968 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8969 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8972 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8973 which is not a multiple of four is UNPREDICTABLE. */
8975 check_ldr_r15_aligned (void)
8977 constraint (!(inst
.operands
[1].immisreg
)
8978 && (inst
.operands
[0].reg
== REG_PC
8979 && inst
.operands
[1].reg
== REG_PC
8980 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8981 _("ldr to register 15 must be 4-byte alligned"));
8987 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8988 if (!inst
.operands
[1].isreg
)
8989 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8991 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8992 check_ldr_r15_aligned ();
8998 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9000 if (inst
.operands
[1].preind
)
9002 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9003 || inst
.reloc
.exp
.X_add_number
!= 0,
9004 _("this instruction requires a post-indexed address"));
9006 inst
.operands
[1].preind
= 0;
9007 inst
.operands
[1].postind
= 1;
9008 inst
.operands
[1].writeback
= 1;
9010 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9011 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9014 /* Halfword and signed-byte load/store operations. */
9019 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9020 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9021 if (!inst
.operands
[1].isreg
)
9022 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9024 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9030 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9032 if (inst
.operands
[1].preind
)
9034 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9035 || inst
.reloc
.exp
.X_add_number
!= 0,
9036 _("this instruction requires a post-indexed address"));
9038 inst
.operands
[1].preind
= 0;
9039 inst
.operands
[1].postind
= 1;
9040 inst
.operands
[1].writeback
= 1;
9042 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9043 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9046 /* Co-processor register load/store.
9047 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9051 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9052 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9053 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9059 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9060 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9061 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9062 && !(inst
.instruction
& 0x00400000))
9063 as_tsktsk (_("Rd and Rm should be different in mla"));
9065 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9066 inst
.instruction
|= inst
.operands
[1].reg
;
9067 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9068 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9074 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9075 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9077 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9078 encode_arm_shifter_operand (1);
9081 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9088 top
= (inst
.instruction
& 0x00400000) != 0;
9089 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
9090 _(":lower16: not allowed in this instruction"));
9091 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
9092 _(":upper16: not allowed in this instruction"));
9093 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9094 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9096 imm
= inst
.reloc
.exp
.X_add_number
;
9097 /* The value is in two pieces: 0:11, 16:19. */
9098 inst
.instruction
|= (imm
& 0x00000fff);
9099 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9104 do_vfp_nsyn_mrs (void)
9106 if (inst
.operands
[0].isvec
)
9108 if (inst
.operands
[1].reg
!= 1)
9109 first_error (_("operand 1 must be FPSCR"));
9110 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9111 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9112 do_vfp_nsyn_opcode ("fmstat");
9114 else if (inst
.operands
[1].isvec
)
9115 do_vfp_nsyn_opcode ("fmrx");
9123 do_vfp_nsyn_msr (void)
9125 if (inst
.operands
[0].isvec
)
9126 do_vfp_nsyn_opcode ("fmxr");
9136 unsigned Rt
= inst
.operands
[0].reg
;
9138 if (thumb_mode
&& Rt
== REG_SP
)
9140 inst
.error
= BAD_SP
;
9144 /* APSR_ sets isvec. All other refs to PC are illegal. */
9145 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9147 inst
.error
= BAD_PC
;
9151 /* If we get through parsing the register name, we just insert the number
9152 generated into the instruction without further validation. */
9153 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9154 inst
.instruction
|= (Rt
<< 12);
9160 unsigned Rt
= inst
.operands
[1].reg
;
9163 reject_bad_reg (Rt
);
9164 else if (Rt
== REG_PC
)
9166 inst
.error
= BAD_PC
;
9170 /* If we get through parsing the register name, we just insert the number
9171 generated into the instruction without further validation. */
9172 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9173 inst
.instruction
|= (Rt
<< 12);
9181 if (do_vfp_nsyn_mrs () == SUCCESS
)
9184 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9185 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9187 if (inst
.operands
[1].isreg
)
9189 br
= inst
.operands
[1].reg
;
9190 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9191 as_bad (_("bad register for mrs"));
9195 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9196 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9198 _("'APSR', 'CPSR' or 'SPSR' expected"));
9199 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9202 inst
.instruction
|= br
;
9205 /* Two possible forms:
9206 "{C|S}PSR_<field>, Rm",
9207 "{C|S}PSR_f, #expression". */
9212 if (do_vfp_nsyn_msr () == SUCCESS
)
9215 inst
.instruction
|= inst
.operands
[0].imm
;
9216 if (inst
.operands
[1].isreg
)
9217 inst
.instruction
|= inst
.operands
[1].reg
;
9220 inst
.instruction
|= INST_IMMEDIATE
;
9221 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9222 inst
.reloc
.pc_rel
= 0;
9229 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9231 if (!inst
.operands
[2].present
)
9232 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9233 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9234 inst
.instruction
|= inst
.operands
[1].reg
;
9235 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9237 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9238 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9239 as_tsktsk (_("Rd and Rm should be different in mul"));
9242 /* Long Multiply Parser
9243 UMULL RdLo, RdHi, Rm, Rs
9244 SMULL RdLo, RdHi, Rm, Rs
9245 UMLAL RdLo, RdHi, Rm, Rs
9246 SMLAL RdLo, RdHi, Rm, Rs. */
9251 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9252 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9253 inst
.instruction
|= inst
.operands
[2].reg
;
9254 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9256 /* rdhi and rdlo must be different. */
9257 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9258 as_tsktsk (_("rdhi and rdlo must be different"));
9260 /* rdhi, rdlo and rm must all be different before armv6. */
9261 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9262 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9263 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9264 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9270 if (inst
.operands
[0].present
9271 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9273 /* Architectural NOP hints are CPSR sets with no bits selected. */
9274 inst
.instruction
&= 0xf0000000;
9275 inst
.instruction
|= 0x0320f000;
9276 if (inst
.operands
[0].present
)
9277 inst
.instruction
|= inst
.operands
[0].imm
;
9281 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9282 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9283 Condition defaults to COND_ALWAYS.
9284 Error if Rd, Rn or Rm are R15. */
9289 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9290 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9291 inst
.instruction
|= inst
.operands
[2].reg
;
9292 if (inst
.operands
[3].present
)
9293 encode_arm_shift (3);
9296 /* ARM V6 PKHTB (Argument Parse). */
9301 if (!inst
.operands
[3].present
)
9303 /* If the shift specifier is omitted, turn the instruction
9304 into pkhbt rd, rm, rn. */
9305 inst
.instruction
&= 0xfff00010;
9306 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9307 inst
.instruction
|= inst
.operands
[1].reg
;
9308 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9312 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9313 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9314 inst
.instruction
|= inst
.operands
[2].reg
;
9315 encode_arm_shift (3);
9319 /* ARMv5TE: Preload-Cache
9320 MP Extensions: Preload for write
9324 Syntactically, like LDR with B=1, W=0, L=1. */
9329 constraint (!inst
.operands
[0].isreg
,
9330 _("'[' expected after PLD mnemonic"));
9331 constraint (inst
.operands
[0].postind
,
9332 _("post-indexed expression used in preload instruction"));
9333 constraint (inst
.operands
[0].writeback
,
9334 _("writeback used in preload instruction"));
9335 constraint (!inst
.operands
[0].preind
,
9336 _("unindexed addressing used in preload instruction"));
9337 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9340 /* ARMv7: PLI <addr_mode> */
9344 constraint (!inst
.operands
[0].isreg
,
9345 _("'[' expected after PLI mnemonic"));
9346 constraint (inst
.operands
[0].postind
,
9347 _("post-indexed expression used in preload instruction"));
9348 constraint (inst
.operands
[0].writeback
,
9349 _("writeback used in preload instruction"));
9350 constraint (!inst
.operands
[0].preind
,
9351 _("unindexed addressing used in preload instruction"));
9352 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9353 inst
.instruction
&= ~PRE_INDEX
;
9359 constraint (inst
.operands
[0].writeback
,
9360 _("push/pop do not support {reglist}^"));
9361 inst
.operands
[1] = inst
.operands
[0];
9362 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9363 inst
.operands
[0].isreg
= 1;
9364 inst
.operands
[0].writeback
= 1;
9365 inst
.operands
[0].reg
= REG_SP
;
9366 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9369 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9370 word at the specified address and the following word
9372 Unconditionally executed.
9373 Error if Rn is R15. */
9378 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9379 if (inst
.operands
[0].writeback
)
9380 inst
.instruction
|= WRITE_BACK
;
9383 /* ARM V6 ssat (argument parse). */
9388 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9389 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9390 inst
.instruction
|= inst
.operands
[2].reg
;
9392 if (inst
.operands
[3].present
)
9393 encode_arm_shift (3);
9396 /* ARM V6 usat (argument parse). */
9401 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9402 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9403 inst
.instruction
|= inst
.operands
[2].reg
;
9405 if (inst
.operands
[3].present
)
9406 encode_arm_shift (3);
9409 /* ARM V6 ssat16 (argument parse). */
9414 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9415 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9416 inst
.instruction
|= inst
.operands
[2].reg
;
9422 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9423 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9424 inst
.instruction
|= inst
.operands
[2].reg
;
9427 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9428 preserving the other bits.
9430 setend <endian_specifier>, where <endian_specifier> is either
9436 if (warn_on_deprecated
9437 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9438 as_tsktsk (_("setend use is deprecated for ARMv8"));
9440 if (inst
.operands
[0].imm
)
9441 inst
.instruction
|= 0x200;
9447 unsigned int Rm
= (inst
.operands
[1].present
9448 ? inst
.operands
[1].reg
9449 : inst
.operands
[0].reg
);
9451 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9452 inst
.instruction
|= Rm
;
9453 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9455 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9456 inst
.instruction
|= SHIFT_BY_REG
;
9457 /* PR 12854: Error on extraneous shifts. */
9458 constraint (inst
.operands
[2].shifted
,
9459 _("extraneous shift as part of operand to shift insn"));
9462 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9468 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9469 inst
.reloc
.pc_rel
= 0;
9475 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9476 inst
.reloc
.pc_rel
= 0;
9482 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9483 inst
.reloc
.pc_rel
= 0;
9489 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9490 _("selected processor does not support SETPAN instruction"));
9492 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9498 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9499 _("selected processor does not support SETPAN instruction"));
9501 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9504 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9505 SMLAxy{cond} Rd,Rm,Rs,Rn
9506 SMLAWy{cond} Rd,Rm,Rs,Rn
9507 Error if any register is R15. */
9512 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9513 inst
.instruction
|= inst
.operands
[1].reg
;
9514 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9515 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9518 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9519 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9520 Error if any register is R15.
9521 Warning if Rdlo == Rdhi. */
9526 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9527 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9528 inst
.instruction
|= inst
.operands
[2].reg
;
9529 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9531 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9532 as_tsktsk (_("rdhi and rdlo must be different"));
9535 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9536 SMULxy{cond} Rd,Rm,Rs
9537 Error if any register is R15. */
9542 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9543 inst
.instruction
|= inst
.operands
[1].reg
;
9544 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9547 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9548 the same for both ARM and Thumb-2. */
9555 if (inst
.operands
[0].present
)
9557 reg
= inst
.operands
[0].reg
;
9558 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9563 inst
.instruction
|= reg
<< 16;
9564 inst
.instruction
|= inst
.operands
[1].imm
;
9565 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9566 inst
.instruction
|= WRITE_BACK
;
9569 /* ARM V6 strex (argument parse). */
9574 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9575 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9576 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9577 || inst
.operands
[2].negative
9578 /* See comment in do_ldrex(). */
9579 || (inst
.operands
[2].reg
== REG_PC
),
9582 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9583 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9585 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9586 || inst
.reloc
.exp
.X_add_number
!= 0,
9587 _("offset must be zero in ARM encoding"));
9589 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9590 inst
.instruction
|= inst
.operands
[1].reg
;
9591 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9592 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9598 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9599 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9600 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9601 || inst
.operands
[2].negative
,
9604 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9605 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9613 constraint (inst
.operands
[1].reg
% 2 != 0,
9614 _("even register required"));
9615 constraint (inst
.operands
[2].present
9616 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9617 _("can only store two consecutive registers"));
9618 /* If op 2 were present and equal to PC, this function wouldn't
9619 have been called in the first place. */
9620 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9622 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9623 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9624 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9627 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9628 inst
.instruction
|= inst
.operands
[1].reg
;
9629 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9636 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9637 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9645 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9646 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9651 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9652 extends it to 32-bits, and adds the result to a value in another
9653 register. You can specify a rotation by 0, 8, 16, or 24 bits
9654 before extracting the 16-bit value.
9655 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9656 Condition defaults to COND_ALWAYS.
9657 Error if any register uses R15. */
9662 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9663 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9664 inst
.instruction
|= inst
.operands
[2].reg
;
9665 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9670 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9671 Condition defaults to COND_ALWAYS.
9672 Error if any register uses R15. */
9677 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9678 inst
.instruction
|= inst
.operands
[1].reg
;
9679 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9682 /* VFP instructions. In a logical order: SP variant first, monad
9683 before dyad, arithmetic then move then load/store. */
9686 do_vfp_sp_monadic (void)
9688 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9689 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9693 do_vfp_sp_dyadic (void)
9695 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9696 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9697 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9701 do_vfp_sp_compare_z (void)
9703 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9707 do_vfp_dp_sp_cvt (void)
9709 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9710 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9714 do_vfp_sp_dp_cvt (void)
9716 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9717 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9721 do_vfp_reg_from_sp (void)
9723 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9724 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9728 do_vfp_reg2_from_sp2 (void)
9730 constraint (inst
.operands
[2].imm
!= 2,
9731 _("only two consecutive VFP SP registers allowed here"));
9732 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9733 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9734 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9738 do_vfp_sp_from_reg (void)
9740 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9741 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9745 do_vfp_sp2_from_reg2 (void)
9747 constraint (inst
.operands
[0].imm
!= 2,
9748 _("only two consecutive VFP SP registers allowed here"));
9749 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9750 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9751 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9755 do_vfp_sp_ldst (void)
9757 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9758 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9762 do_vfp_dp_ldst (void)
9764 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9765 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9770 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9772 if (inst
.operands
[0].writeback
)
9773 inst
.instruction
|= WRITE_BACK
;
9775 constraint (ldstm_type
!= VFP_LDSTMIA
,
9776 _("this addressing mode requires base-register writeback"));
9777 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9778 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9779 inst
.instruction
|= inst
.operands
[1].imm
;
9783 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9787 if (inst
.operands
[0].writeback
)
9788 inst
.instruction
|= WRITE_BACK
;
9790 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9791 _("this addressing mode requires base-register writeback"));
9793 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9794 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9796 count
= inst
.operands
[1].imm
<< 1;
9797 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9800 inst
.instruction
|= count
;
9804 do_vfp_sp_ldstmia (void)
9806 vfp_sp_ldstm (VFP_LDSTMIA
);
9810 do_vfp_sp_ldstmdb (void)
9812 vfp_sp_ldstm (VFP_LDSTMDB
);
9816 do_vfp_dp_ldstmia (void)
9818 vfp_dp_ldstm (VFP_LDSTMIA
);
9822 do_vfp_dp_ldstmdb (void)
9824 vfp_dp_ldstm (VFP_LDSTMDB
);
9828 do_vfp_xp_ldstmia (void)
9830 vfp_dp_ldstm (VFP_LDSTMIAX
);
9834 do_vfp_xp_ldstmdb (void)
9836 vfp_dp_ldstm (VFP_LDSTMDBX
);
9840 do_vfp_dp_rd_rm (void)
9842 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9843 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9847 do_vfp_dp_rn_rd (void)
9849 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9850 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9854 do_vfp_dp_rd_rn (void)
9856 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9857 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9861 do_vfp_dp_rd_rn_rm (void)
9863 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9864 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9865 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9871 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9875 do_vfp_dp_rm_rd_rn (void)
9877 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9878 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9879 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9882 /* VFPv3 instructions. */
9884 do_vfp_sp_const (void)
9886 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9887 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9888 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9892 do_vfp_dp_const (void)
9894 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9895 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9896 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9900 vfp_conv (int srcsize
)
9902 int immbits
= srcsize
- inst
.operands
[1].imm
;
9904 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9906 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9907 i.e. immbits must be in range 0 - 16. */
9908 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9911 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9913 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9914 i.e. immbits must be in range 0 - 31. */
9915 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9919 inst
.instruction
|= (immbits
& 1) << 5;
9920 inst
.instruction
|= (immbits
>> 1);
9924 do_vfp_sp_conv_16 (void)
9926 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9931 do_vfp_dp_conv_16 (void)
9933 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9938 do_vfp_sp_conv_32 (void)
9940 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9945 do_vfp_dp_conv_32 (void)
9947 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9951 /* FPA instructions. Also in a logical order. */
9956 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9957 inst
.instruction
|= inst
.operands
[1].reg
;
9961 do_fpa_ldmstm (void)
9963 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9964 switch (inst
.operands
[1].imm
)
9966 case 1: inst
.instruction
|= CP_T_X
; break;
9967 case 2: inst
.instruction
|= CP_T_Y
; break;
9968 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9973 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9975 /* The instruction specified "ea" or "fd", so we can only accept
9976 [Rn]{!}. The instruction does not really support stacking or
9977 unstacking, so we have to emulate these by setting appropriate
9978 bits and offsets. */
9979 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9980 || inst
.reloc
.exp
.X_add_number
!= 0,
9981 _("this instruction does not support indexing"));
9983 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9984 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9986 if (!(inst
.instruction
& INDEX_UP
))
9987 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9989 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9991 inst
.operands
[2].preind
= 0;
9992 inst
.operands
[2].postind
= 1;
9996 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9999 /* iWMMXt instructions: strictly in alphabetical order. */
10002 do_iwmmxt_tandorc (void)
10004 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10008 do_iwmmxt_textrc (void)
10010 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10011 inst
.instruction
|= inst
.operands
[1].imm
;
10015 do_iwmmxt_textrm (void)
10017 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10018 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10019 inst
.instruction
|= inst
.operands
[2].imm
;
10023 do_iwmmxt_tinsr (void)
10025 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10026 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10027 inst
.instruction
|= inst
.operands
[2].imm
;
10031 do_iwmmxt_tmia (void)
10033 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10034 inst
.instruction
|= inst
.operands
[1].reg
;
10035 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10039 do_iwmmxt_waligni (void)
10041 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10042 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10043 inst
.instruction
|= inst
.operands
[2].reg
;
10044 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10048 do_iwmmxt_wmerge (void)
10050 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10051 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10052 inst
.instruction
|= inst
.operands
[2].reg
;
10053 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10057 do_iwmmxt_wmov (void)
10059 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10060 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10061 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10062 inst
.instruction
|= inst
.operands
[1].reg
;
10066 do_iwmmxt_wldstbh (void)
10069 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10071 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10073 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10074 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10078 do_iwmmxt_wldstw (void)
10080 /* RIWR_RIWC clears .isreg for a control register. */
10081 if (!inst
.operands
[0].isreg
)
10083 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10084 inst
.instruction
|= 0xf0000000;
10087 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10088 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10092 do_iwmmxt_wldstd (void)
10094 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10095 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10096 && inst
.operands
[1].immisreg
)
10098 inst
.instruction
&= ~0x1a000ff;
10099 inst
.instruction
|= (0xfU
<< 28);
10100 if (inst
.operands
[1].preind
)
10101 inst
.instruction
|= PRE_INDEX
;
10102 if (!inst
.operands
[1].negative
)
10103 inst
.instruction
|= INDEX_UP
;
10104 if (inst
.operands
[1].writeback
)
10105 inst
.instruction
|= WRITE_BACK
;
10106 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10107 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10108 inst
.instruction
|= inst
.operands
[1].imm
;
10111 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10115 do_iwmmxt_wshufh (void)
10117 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10118 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10119 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10120 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10124 do_iwmmxt_wzero (void)
10126 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10127 inst
.instruction
|= inst
.operands
[0].reg
;
10128 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10129 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10133 do_iwmmxt_wrwrwr_or_imm5 (void)
10135 if (inst
.operands
[2].isreg
)
10138 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10139 _("immediate operand requires iWMMXt2"));
10141 if (inst
.operands
[2].imm
== 0)
10143 switch ((inst
.instruction
>> 20) & 0xf)
10149 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10150 inst
.operands
[2].imm
= 16;
10151 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10157 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10158 inst
.operands
[2].imm
= 32;
10159 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10166 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10168 wrn
= (inst
.instruction
>> 16) & 0xf;
10169 inst
.instruction
&= 0xff0fff0f;
10170 inst
.instruction
|= wrn
;
10171 /* Bail out here; the instruction is now assembled. */
10176 /* Map 32 -> 0, etc. */
10177 inst
.operands
[2].imm
&= 0x1f;
10178 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10182 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10183 operations first, then control, shift, and load/store. */
10185 /* Insns like "foo X,Y,Z". */
10188 do_mav_triple (void)
10190 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10191 inst
.instruction
|= inst
.operands
[1].reg
;
10192 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10195 /* Insns like "foo W,X,Y,Z".
10196 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10201 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10202 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10203 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10204 inst
.instruction
|= inst
.operands
[3].reg
;
10207 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10209 do_mav_dspsc (void)
10211 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10214 /* Maverick shift immediate instructions.
10215 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10216 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10219 do_mav_shift (void)
10221 int imm
= inst
.operands
[2].imm
;
10223 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10224 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10226 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10227 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10228 Bit 4 should be 0. */
10229 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10231 inst
.instruction
|= imm
;
10234 /* XScale instructions. Also sorted arithmetic before move. */
10236 /* Xscale multiply-accumulate (argument parse)
10239 MIAxycc acc0,Rm,Rs. */
10244 inst
.instruction
|= inst
.operands
[1].reg
;
10245 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10248 /* Xscale move-accumulator-register (argument parse)
10250 MARcc acc0,RdLo,RdHi. */
10255 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10256 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10259 /* Xscale move-register-accumulator (argument parse)
10261 MRAcc RdLo,RdHi,acc0. */
10266 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10267 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10268 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10271 /* Encoding functions relevant only to Thumb. */
10273 /* inst.operands[i] is a shifted-register operand; encode
10274 it into inst.instruction in the format used by Thumb32. */
10277 encode_thumb32_shifted_operand (int i
)
10279 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10280 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10282 constraint (inst
.operands
[i
].immisreg
,
10283 _("shift by register not allowed in thumb mode"));
10284 inst
.instruction
|= inst
.operands
[i
].reg
;
10285 if (shift
== SHIFT_RRX
)
10286 inst
.instruction
|= SHIFT_ROR
<< 4;
10289 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10290 _("expression too complex"));
10292 constraint (value
> 32
10293 || (value
== 32 && (shift
== SHIFT_LSL
10294 || shift
== SHIFT_ROR
)),
10295 _("shift expression is too large"));
10299 else if (value
== 32)
10302 inst
.instruction
|= shift
<< 4;
10303 inst
.instruction
|= (value
& 0x1c) << 10;
10304 inst
.instruction
|= (value
& 0x03) << 6;
10309 /* inst.operands[i] was set up by parse_address. Encode it into a
10310 Thumb32 format load or store instruction. Reject forms that cannot
10311 be used with such instructions. If is_t is true, reject forms that
10312 cannot be used with a T instruction; if is_d is true, reject forms
10313 that cannot be used with a D instruction. If it is a store insn,
10314 reject PC in Rn. */
10317 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10319 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10321 constraint (!inst
.operands
[i
].isreg
,
10322 _("Instruction does not support =N addresses"));
10324 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10325 if (inst
.operands
[i
].immisreg
)
10327 constraint (is_pc
, BAD_PC_ADDRESSING
);
10328 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10329 constraint (inst
.operands
[i
].negative
,
10330 _("Thumb does not support negative register indexing"));
10331 constraint (inst
.operands
[i
].postind
,
10332 _("Thumb does not support register post-indexing"));
10333 constraint (inst
.operands
[i
].writeback
,
10334 _("Thumb does not support register indexing with writeback"));
10335 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10336 _("Thumb supports only LSL in shifted register indexing"));
10338 inst
.instruction
|= inst
.operands
[i
].imm
;
10339 if (inst
.operands
[i
].shifted
)
10341 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10342 _("expression too complex"));
10343 constraint (inst
.reloc
.exp
.X_add_number
< 0
10344 || inst
.reloc
.exp
.X_add_number
> 3,
10345 _("shift out of range"));
10346 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10348 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10350 else if (inst
.operands
[i
].preind
)
10352 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10353 constraint (is_t
&& inst
.operands
[i
].writeback
,
10354 _("cannot use writeback with this instruction"));
10355 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10356 BAD_PC_ADDRESSING
);
10360 inst
.instruction
|= 0x01000000;
10361 if (inst
.operands
[i
].writeback
)
10362 inst
.instruction
|= 0x00200000;
10366 inst
.instruction
|= 0x00000c00;
10367 if (inst
.operands
[i
].writeback
)
10368 inst
.instruction
|= 0x00000100;
10370 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10372 else if (inst
.operands
[i
].postind
)
10374 gas_assert (inst
.operands
[i
].writeback
);
10375 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10376 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10379 inst
.instruction
|= 0x00200000;
10381 inst
.instruction
|= 0x00000900;
10382 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10384 else /* unindexed - only for coprocessor */
10385 inst
.error
= _("instruction does not accept unindexed addressing");
10388 /* Table of Thumb instructions which exist in both 16- and 32-bit
10389 encodings (the latter only in post-V6T2 cores). The index is the
10390 value used in the insns table below. When there is more than one
10391 possible 16-bit encoding for the instruction, this table always
10393 Also contains several pseudo-instructions used during relaxation. */
10394 #define T16_32_TAB \
10395 X(_adc, 4140, eb400000), \
10396 X(_adcs, 4140, eb500000), \
10397 X(_add, 1c00, eb000000), \
10398 X(_adds, 1c00, eb100000), \
10399 X(_addi, 0000, f1000000), \
10400 X(_addis, 0000, f1100000), \
10401 X(_add_pc,000f, f20f0000), \
10402 X(_add_sp,000d, f10d0000), \
10403 X(_adr, 000f, f20f0000), \
10404 X(_and, 4000, ea000000), \
10405 X(_ands, 4000, ea100000), \
10406 X(_asr, 1000, fa40f000), \
10407 X(_asrs, 1000, fa50f000), \
10408 X(_b, e000, f000b000), \
10409 X(_bcond, d000, f0008000), \
10410 X(_bic, 4380, ea200000), \
10411 X(_bics, 4380, ea300000), \
10412 X(_cmn, 42c0, eb100f00), \
10413 X(_cmp, 2800, ebb00f00), \
10414 X(_cpsie, b660, f3af8400), \
10415 X(_cpsid, b670, f3af8600), \
10416 X(_cpy, 4600, ea4f0000), \
10417 X(_dec_sp,80dd, f1ad0d00), \
10418 X(_eor, 4040, ea800000), \
10419 X(_eors, 4040, ea900000), \
10420 X(_inc_sp,00dd, f10d0d00), \
10421 X(_ldmia, c800, e8900000), \
10422 X(_ldr, 6800, f8500000), \
10423 X(_ldrb, 7800, f8100000), \
10424 X(_ldrh, 8800, f8300000), \
10425 X(_ldrsb, 5600, f9100000), \
10426 X(_ldrsh, 5e00, f9300000), \
10427 X(_ldr_pc,4800, f85f0000), \
10428 X(_ldr_pc2,4800, f85f0000), \
10429 X(_ldr_sp,9800, f85d0000), \
10430 X(_lsl, 0000, fa00f000), \
10431 X(_lsls, 0000, fa10f000), \
10432 X(_lsr, 0800, fa20f000), \
10433 X(_lsrs, 0800, fa30f000), \
10434 X(_mov, 2000, ea4f0000), \
10435 X(_movs, 2000, ea5f0000), \
10436 X(_mul, 4340, fb00f000), \
10437 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10438 X(_mvn, 43c0, ea6f0000), \
10439 X(_mvns, 43c0, ea7f0000), \
10440 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10441 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10442 X(_orr, 4300, ea400000), \
10443 X(_orrs, 4300, ea500000), \
10444 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10445 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10446 X(_rev, ba00, fa90f080), \
10447 X(_rev16, ba40, fa90f090), \
10448 X(_revsh, bac0, fa90f0b0), \
10449 X(_ror, 41c0, fa60f000), \
10450 X(_rors, 41c0, fa70f000), \
10451 X(_sbc, 4180, eb600000), \
10452 X(_sbcs, 4180, eb700000), \
10453 X(_stmia, c000, e8800000), \
10454 X(_str, 6000, f8400000), \
10455 X(_strb, 7000, f8000000), \
10456 X(_strh, 8000, f8200000), \
10457 X(_str_sp,9000, f84d0000), \
10458 X(_sub, 1e00, eba00000), \
10459 X(_subs, 1e00, ebb00000), \
10460 X(_subi, 8000, f1a00000), \
10461 X(_subis, 8000, f1b00000), \
10462 X(_sxtb, b240, fa4ff080), \
10463 X(_sxth, b200, fa0ff080), \
10464 X(_tst, 4200, ea100f00), \
10465 X(_uxtb, b2c0, fa5ff080), \
10466 X(_uxth, b280, fa1ff080), \
10467 X(_nop, bf00, f3af8000), \
10468 X(_yield, bf10, f3af8001), \
10469 X(_wfe, bf20, f3af8002), \
10470 X(_wfi, bf30, f3af8003), \
10471 X(_sev, bf40, f3af8004), \
10472 X(_sevl, bf50, f3af8005), \
10473 X(_udf, de00, f7f0a000)
10475 /* To catch errors in encoding functions, the codes are all offset by
10476 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10477 as 16-bit instructions. */
10478 #define X(a,b,c) T_MNEM##a
10479 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10482 #define X(a,b,c) 0x##b
10483 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10484 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10487 #define X(a,b,c) 0x##c
10488 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10489 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10490 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10494 /* Thumb instruction encoders, in alphabetical order. */
10496 /* ADDW or SUBW. */
10499 do_t_add_sub_w (void)
10503 Rd
= inst
.operands
[0].reg
;
10504 Rn
= inst
.operands
[1].reg
;
10506 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10507 is the SP-{plus,minus}-immediate form of the instruction. */
10509 constraint (Rd
== REG_PC
, BAD_PC
);
10511 reject_bad_reg (Rd
);
10513 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10514 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10517 /* Parse an add or subtract instruction. We get here with inst.instruction
10518 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10521 do_t_add_sub (void)
10525 Rd
= inst
.operands
[0].reg
;
10526 Rs
= (inst
.operands
[1].present
10527 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10528 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10531 set_it_insn_type_last ();
10533 if (unified_syntax
)
10536 bfd_boolean narrow
;
10539 flags
= (inst
.instruction
== T_MNEM_adds
10540 || inst
.instruction
== T_MNEM_subs
);
10542 narrow
= !in_it_block ();
10544 narrow
= in_it_block ();
10545 if (!inst
.operands
[2].isreg
)
10549 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10550 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10552 add
= (inst
.instruction
== T_MNEM_add
10553 || inst
.instruction
== T_MNEM_adds
);
10555 if (inst
.size_req
!= 4)
10557 /* Attempt to use a narrow opcode, with relaxation if
10559 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10560 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10561 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10562 opcode
= T_MNEM_add_sp
;
10563 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10564 opcode
= T_MNEM_add_pc
;
10565 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10568 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10570 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10574 inst
.instruction
= THUMB_OP16(opcode
);
10575 inst
.instruction
|= (Rd
<< 4) | Rs
;
10576 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10577 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10579 if (inst
.size_req
== 2)
10580 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10582 inst
.relax
= opcode
;
10586 constraint (inst
.size_req
== 2, BAD_HIREG
);
10588 if (inst
.size_req
== 4
10589 || (inst
.size_req
!= 2 && !opcode
))
10591 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10592 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
10593 THUMB1_RELOC_ONLY
);
10596 constraint (add
, BAD_PC
);
10597 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10598 _("only SUBS PC, LR, #const allowed"));
10599 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10600 _("expression too complex"));
10601 constraint (inst
.reloc
.exp
.X_add_number
< 0
10602 || inst
.reloc
.exp
.X_add_number
> 0xff,
10603 _("immediate value out of range"));
10604 inst
.instruction
= T2_SUBS_PC_LR
10605 | inst
.reloc
.exp
.X_add_number
;
10606 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10609 else if (Rs
== REG_PC
)
10611 /* Always use addw/subw. */
10612 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10613 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10617 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10618 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10621 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10623 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10625 inst
.instruction
|= Rd
<< 8;
10626 inst
.instruction
|= Rs
<< 16;
10631 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10632 unsigned int shift
= inst
.operands
[2].shift_kind
;
10634 Rn
= inst
.operands
[2].reg
;
10635 /* See if we can do this with a 16-bit instruction. */
10636 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10638 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10643 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10644 || inst
.instruction
== T_MNEM_add
)
10646 : T_OPCODE_SUB_R3
);
10647 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10651 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10653 /* Thumb-1 cores (except v6-M) require at least one high
10654 register in a narrow non flag setting add. */
10655 if (Rd
> 7 || Rn
> 7
10656 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10657 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10664 inst
.instruction
= T_OPCODE_ADD_HI
;
10665 inst
.instruction
|= (Rd
& 8) << 4;
10666 inst
.instruction
|= (Rd
& 7);
10667 inst
.instruction
|= Rn
<< 3;
10673 constraint (Rd
== REG_PC
, BAD_PC
);
10674 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10675 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10676 constraint (Rs
== REG_PC
, BAD_PC
);
10677 reject_bad_reg (Rn
);
10679 /* If we get here, it can't be done in 16 bits. */
10680 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10681 _("shift must be constant"));
10682 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10683 inst
.instruction
|= Rd
<< 8;
10684 inst
.instruction
|= Rs
<< 16;
10685 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10686 _("shift value over 3 not allowed in thumb mode"));
10687 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10688 _("only LSL shift allowed in thumb mode"));
10689 encode_thumb32_shifted_operand (2);
10694 constraint (inst
.instruction
== T_MNEM_adds
10695 || inst
.instruction
== T_MNEM_subs
,
10698 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10700 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10701 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10704 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10705 ? 0x0000 : 0x8000);
10706 inst
.instruction
|= (Rd
<< 4) | Rs
;
10707 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10711 Rn
= inst
.operands
[2].reg
;
10712 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10714 /* We now have Rd, Rs, and Rn set to registers. */
10715 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10717 /* Can't do this for SUB. */
10718 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10719 inst
.instruction
= T_OPCODE_ADD_HI
;
10720 inst
.instruction
|= (Rd
& 8) << 4;
10721 inst
.instruction
|= (Rd
& 7);
10723 inst
.instruction
|= Rn
<< 3;
10725 inst
.instruction
|= Rs
<< 3;
10727 constraint (1, _("dest must overlap one source register"));
10731 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10732 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10733 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10743 Rd
= inst
.operands
[0].reg
;
10744 reject_bad_reg (Rd
);
10746 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10748 /* Defer to section relaxation. */
10749 inst
.relax
= inst
.instruction
;
10750 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10751 inst
.instruction
|= Rd
<< 4;
10753 else if (unified_syntax
&& inst
.size_req
!= 2)
10755 /* Generate a 32-bit opcode. */
10756 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10757 inst
.instruction
|= Rd
<< 8;
10758 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10759 inst
.reloc
.pc_rel
= 1;
10763 /* Generate a 16-bit opcode. */
10764 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10765 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10766 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10767 inst
.reloc
.pc_rel
= 1;
10768 inst
.instruction
|= Rd
<< 4;
10771 if (inst
.reloc
.exp
.X_op
== O_symbol
10772 && inst
.reloc
.exp
.X_add_symbol
!= NULL
10773 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
10774 && THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
10775 inst
.reloc
.exp
.X_add_number
+= 1;
10778 /* Arithmetic instructions for which there is just one 16-bit
10779 instruction encoding, and it allows only two low registers.
10780 For maximal compatibility with ARM syntax, we allow three register
10781 operands even when Thumb-32 instructions are not available, as long
10782 as the first two are identical. For instance, both "sbc r0,r1" and
10783 "sbc r0,r0,r1" are allowed. */
10789 Rd
= inst
.operands
[0].reg
;
10790 Rs
= (inst
.operands
[1].present
10791 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10792 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10793 Rn
= inst
.operands
[2].reg
;
10795 reject_bad_reg (Rd
);
10796 reject_bad_reg (Rs
);
10797 if (inst
.operands
[2].isreg
)
10798 reject_bad_reg (Rn
);
10800 if (unified_syntax
)
10802 if (!inst
.operands
[2].isreg
)
10804 /* For an immediate, we always generate a 32-bit opcode;
10805 section relaxation will shrink it later if possible. */
10806 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10807 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10808 inst
.instruction
|= Rd
<< 8;
10809 inst
.instruction
|= Rs
<< 16;
10810 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10814 bfd_boolean narrow
;
10816 /* See if we can do this with a 16-bit instruction. */
10817 if (THUMB_SETS_FLAGS (inst
.instruction
))
10818 narrow
= !in_it_block ();
10820 narrow
= in_it_block ();
10822 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10824 if (inst
.operands
[2].shifted
)
10826 if (inst
.size_req
== 4)
10832 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10833 inst
.instruction
|= Rd
;
10834 inst
.instruction
|= Rn
<< 3;
10838 /* If we get here, it can't be done in 16 bits. */
10839 constraint (inst
.operands
[2].shifted
10840 && inst
.operands
[2].immisreg
,
10841 _("shift must be constant"));
10842 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10843 inst
.instruction
|= Rd
<< 8;
10844 inst
.instruction
|= Rs
<< 16;
10845 encode_thumb32_shifted_operand (2);
10850 /* On its face this is a lie - the instruction does set the
10851 flags. However, the only supported mnemonic in this mode
10852 says it doesn't. */
10853 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10855 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10856 _("unshifted register required"));
10857 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10858 constraint (Rd
!= Rs
,
10859 _("dest and source1 must be the same register"));
10861 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10862 inst
.instruction
|= Rd
;
10863 inst
.instruction
|= Rn
<< 3;
10867 /* Similarly, but for instructions where the arithmetic operation is
10868 commutative, so we can allow either of them to be different from
10869 the destination operand in a 16-bit instruction. For instance, all
10870 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10877 Rd
= inst
.operands
[0].reg
;
10878 Rs
= (inst
.operands
[1].present
10879 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10880 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10881 Rn
= inst
.operands
[2].reg
;
10883 reject_bad_reg (Rd
);
10884 reject_bad_reg (Rs
);
10885 if (inst
.operands
[2].isreg
)
10886 reject_bad_reg (Rn
);
10888 if (unified_syntax
)
10890 if (!inst
.operands
[2].isreg
)
10892 /* For an immediate, we always generate a 32-bit opcode;
10893 section relaxation will shrink it later if possible. */
10894 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10895 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10896 inst
.instruction
|= Rd
<< 8;
10897 inst
.instruction
|= Rs
<< 16;
10898 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10902 bfd_boolean narrow
;
10904 /* See if we can do this with a 16-bit instruction. */
10905 if (THUMB_SETS_FLAGS (inst
.instruction
))
10906 narrow
= !in_it_block ();
10908 narrow
= in_it_block ();
10910 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10912 if (inst
.operands
[2].shifted
)
10914 if (inst
.size_req
== 4)
10921 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10922 inst
.instruction
|= Rd
;
10923 inst
.instruction
|= Rn
<< 3;
10928 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10929 inst
.instruction
|= Rd
;
10930 inst
.instruction
|= Rs
<< 3;
10935 /* If we get here, it can't be done in 16 bits. */
10936 constraint (inst
.operands
[2].shifted
10937 && inst
.operands
[2].immisreg
,
10938 _("shift must be constant"));
10939 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10940 inst
.instruction
|= Rd
<< 8;
10941 inst
.instruction
|= Rs
<< 16;
10942 encode_thumb32_shifted_operand (2);
10947 /* On its face this is a lie - the instruction does set the
10948 flags. However, the only supported mnemonic in this mode
10949 says it doesn't. */
10950 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10952 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10953 _("unshifted register required"));
10954 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10956 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10957 inst
.instruction
|= Rd
;
10960 inst
.instruction
|= Rn
<< 3;
10962 inst
.instruction
|= Rs
<< 3;
10964 constraint (1, _("dest must overlap one source register"));
10972 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10973 constraint (msb
> 32, _("bit-field extends past end of register"));
10974 /* The instruction encoding stores the LSB and MSB,
10975 not the LSB and width. */
10976 Rd
= inst
.operands
[0].reg
;
10977 reject_bad_reg (Rd
);
10978 inst
.instruction
|= Rd
<< 8;
10979 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10980 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10981 inst
.instruction
|= msb
- 1;
10990 Rd
= inst
.operands
[0].reg
;
10991 reject_bad_reg (Rd
);
10993 /* #0 in second position is alternative syntax for bfc, which is
10994 the same instruction but with REG_PC in the Rm field. */
10995 if (!inst
.operands
[1].isreg
)
10999 Rn
= inst
.operands
[1].reg
;
11000 reject_bad_reg (Rn
);
11003 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11004 constraint (msb
> 32, _("bit-field extends past end of register"));
11005 /* The instruction encoding stores the LSB and MSB,
11006 not the LSB and width. */
11007 inst
.instruction
|= Rd
<< 8;
11008 inst
.instruction
|= Rn
<< 16;
11009 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11010 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11011 inst
.instruction
|= msb
- 1;
11019 Rd
= inst
.operands
[0].reg
;
11020 Rn
= inst
.operands
[1].reg
;
11022 reject_bad_reg (Rd
);
11023 reject_bad_reg (Rn
);
11025 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11026 _("bit-field extends past end of register"));
11027 inst
.instruction
|= Rd
<< 8;
11028 inst
.instruction
|= Rn
<< 16;
11029 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11030 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11031 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11034 /* ARM V5 Thumb BLX (argument parse)
11035 BLX <target_addr> which is BLX(1)
11036 BLX <Rm> which is BLX(2)
11037 Unfortunately, there are two different opcodes for this mnemonic.
11038 So, the insns[].value is not used, and the code here zaps values
11039 into inst.instruction.
11041 ??? How to take advantage of the additional two bits of displacement
11042 available in Thumb32 mode? Need new relocation? */
11047 set_it_insn_type_last ();
11049 if (inst
.operands
[0].isreg
)
11051 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11052 /* We have a register, so this is BLX(2). */
11053 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11057 /* No register. This must be BLX(1). */
11058 inst
.instruction
= 0xf000e800;
11059 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11068 bfd_reloc_code_real_type reloc
;
11071 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
11073 if (in_it_block ())
11075 /* Conditional branches inside IT blocks are encoded as unconditional
11077 cond
= COND_ALWAYS
;
11082 if (cond
!= COND_ALWAYS
)
11083 opcode
= T_MNEM_bcond
;
11085 opcode
= inst
.instruction
;
11088 && (inst
.size_req
== 4
11089 || (inst
.size_req
!= 2
11090 && (inst
.operands
[0].hasreloc
11091 || inst
.reloc
.exp
.X_op
== O_constant
))))
11093 inst
.instruction
= THUMB_OP32(opcode
);
11094 if (cond
== COND_ALWAYS
)
11095 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11098 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11099 _("selected architecture does not support "
11100 "wide conditional branch instruction"));
11102 gas_assert (cond
!= 0xF);
11103 inst
.instruction
|= cond
<< 22;
11104 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11109 inst
.instruction
= THUMB_OP16(opcode
);
11110 if (cond
== COND_ALWAYS
)
11111 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11114 inst
.instruction
|= cond
<< 8;
11115 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11117 /* Allow section relaxation. */
11118 if (unified_syntax
&& inst
.size_req
!= 2)
11119 inst
.relax
= opcode
;
11121 inst
.reloc
.type
= reloc
;
11122 inst
.reloc
.pc_rel
= 1;
11125 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11126 between the two is the maximum immediate allowed - which is passed in
11129 do_t_bkpt_hlt1 (int range
)
11131 constraint (inst
.cond
!= COND_ALWAYS
,
11132 _("instruction is always unconditional"));
11133 if (inst
.operands
[0].present
)
11135 constraint (inst
.operands
[0].imm
> range
,
11136 _("immediate value out of range"));
11137 inst
.instruction
|= inst
.operands
[0].imm
;
11140 set_it_insn_type (NEUTRAL_IT_INSN
);
11146 do_t_bkpt_hlt1 (63);
11152 do_t_bkpt_hlt1 (255);
11156 do_t_branch23 (void)
11158 set_it_insn_type_last ();
11159 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11161 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11162 this file. We used to simply ignore the PLT reloc type here --
11163 the branch encoding is now needed to deal with TLSCALL relocs.
11164 So if we see a PLT reloc now, put it back to how it used to be to
11165 keep the preexisting behaviour. */
11166 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11167 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11169 #if defined(OBJ_COFF)
11170 /* If the destination of the branch is a defined symbol which does not have
11171 the THUMB_FUNC attribute, then we must be calling a function which has
11172 the (interfacearm) attribute. We look for the Thumb entry point to that
11173 function and change the branch to refer to that function instead. */
11174 if ( inst
.reloc
.exp
.X_op
== O_symbol
11175 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11176 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11177 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11178 inst
.reloc
.exp
.X_add_symbol
=
11179 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11186 set_it_insn_type_last ();
11187 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11188 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11189 should cause the alignment to be checked once it is known. This is
11190 because BX PC only works if the instruction is word aligned. */
11198 set_it_insn_type_last ();
11199 Rm
= inst
.operands
[0].reg
;
11200 reject_bad_reg (Rm
);
11201 inst
.instruction
|= Rm
<< 16;
11210 Rd
= inst
.operands
[0].reg
;
11211 Rm
= inst
.operands
[1].reg
;
11213 reject_bad_reg (Rd
);
11214 reject_bad_reg (Rm
);
11216 inst
.instruction
|= Rd
<< 8;
11217 inst
.instruction
|= Rm
<< 16;
11218 inst
.instruction
|= Rm
;
11224 set_it_insn_type (OUTSIDE_IT_INSN
);
11225 inst
.instruction
|= inst
.operands
[0].imm
;
11231 set_it_insn_type (OUTSIDE_IT_INSN
);
11233 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11234 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11236 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11237 inst
.instruction
= 0xf3af8000;
11238 inst
.instruction
|= imod
<< 9;
11239 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11240 if (inst
.operands
[1].present
)
11241 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11245 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11246 && (inst
.operands
[0].imm
& 4),
11247 _("selected processor does not support 'A' form "
11248 "of this instruction"));
11249 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11250 _("Thumb does not support the 2-argument "
11251 "form of this instruction"));
11252 inst
.instruction
|= inst
.operands
[0].imm
;
11256 /* THUMB CPY instruction (argument parse). */
11261 if (inst
.size_req
== 4)
11263 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11264 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11265 inst
.instruction
|= inst
.operands
[1].reg
;
11269 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11270 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11271 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11278 set_it_insn_type (OUTSIDE_IT_INSN
);
11279 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11280 inst
.instruction
|= inst
.operands
[0].reg
;
11281 inst
.reloc
.pc_rel
= 1;
11282 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11288 inst
.instruction
|= inst
.operands
[0].imm
;
11294 unsigned Rd
, Rn
, Rm
;
11296 Rd
= inst
.operands
[0].reg
;
11297 Rn
= (inst
.operands
[1].present
11298 ? inst
.operands
[1].reg
: Rd
);
11299 Rm
= inst
.operands
[2].reg
;
11301 reject_bad_reg (Rd
);
11302 reject_bad_reg (Rn
);
11303 reject_bad_reg (Rm
);
11305 inst
.instruction
|= Rd
<< 8;
11306 inst
.instruction
|= Rn
<< 16;
11307 inst
.instruction
|= Rm
;
11313 if (unified_syntax
&& inst
.size_req
== 4)
11314 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11316 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11322 unsigned int cond
= inst
.operands
[0].imm
;
11324 set_it_insn_type (IT_INSN
);
11325 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11327 now_it
.warn_deprecated
= FALSE
;
11329 /* If the condition is a negative condition, invert the mask. */
11330 if ((cond
& 0x1) == 0x0)
11332 unsigned int mask
= inst
.instruction
& 0x000f;
11334 if ((mask
& 0x7) == 0)
11336 /* No conversion needed. */
11337 now_it
.block_length
= 1;
11339 else if ((mask
& 0x3) == 0)
11342 now_it
.block_length
= 2;
11344 else if ((mask
& 0x1) == 0)
11347 now_it
.block_length
= 3;
11352 now_it
.block_length
= 4;
11355 inst
.instruction
&= 0xfff0;
11356 inst
.instruction
|= mask
;
11359 inst
.instruction
|= cond
<< 4;
11362 /* Helper function used for both push/pop and ldm/stm. */
11364 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11368 load
= (inst
.instruction
& (1 << 20)) != 0;
11370 if (mask
& (1 << 13))
11371 inst
.error
= _("SP not allowed in register list");
11373 if ((mask
& (1 << base
)) != 0
11375 inst
.error
= _("having the base register in the register list when "
11376 "using write back is UNPREDICTABLE");
11380 if (mask
& (1 << 15))
11382 if (mask
& (1 << 14))
11383 inst
.error
= _("LR and PC should not both be in register list");
11385 set_it_insn_type_last ();
11390 if (mask
& (1 << 15))
11391 inst
.error
= _("PC not allowed in register list");
11394 if ((mask
& (mask
- 1)) == 0)
11396 /* Single register transfers implemented as str/ldr. */
11399 if (inst
.instruction
& (1 << 23))
11400 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11402 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11406 if (inst
.instruction
& (1 << 23))
11407 inst
.instruction
= 0x00800000; /* ia -> [base] */
11409 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11412 inst
.instruction
|= 0xf8400000;
11414 inst
.instruction
|= 0x00100000;
11416 mask
= ffs (mask
) - 1;
11419 else if (writeback
)
11420 inst
.instruction
|= WRITE_BACK
;
11422 inst
.instruction
|= mask
;
11423 inst
.instruction
|= base
<< 16;
11429 /* This really doesn't seem worth it. */
11430 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11431 _("expression too complex"));
11432 constraint (inst
.operands
[1].writeback
,
11433 _("Thumb load/store multiple does not support {reglist}^"));
11435 if (unified_syntax
)
11437 bfd_boolean narrow
;
11441 /* See if we can use a 16-bit instruction. */
11442 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11443 && inst
.size_req
!= 4
11444 && !(inst
.operands
[1].imm
& ~0xff))
11446 mask
= 1 << inst
.operands
[0].reg
;
11448 if (inst
.operands
[0].reg
<= 7)
11450 if (inst
.instruction
== T_MNEM_stmia
11451 ? inst
.operands
[0].writeback
11452 : (inst
.operands
[0].writeback
11453 == !(inst
.operands
[1].imm
& mask
)))
11455 if (inst
.instruction
== T_MNEM_stmia
11456 && (inst
.operands
[1].imm
& mask
)
11457 && (inst
.operands
[1].imm
& (mask
- 1)))
11458 as_warn (_("value stored for r%d is UNKNOWN"),
11459 inst
.operands
[0].reg
);
11461 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11462 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11463 inst
.instruction
|= inst
.operands
[1].imm
;
11466 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11468 /* This means 1 register in reg list one of 3 situations:
11469 1. Instruction is stmia, but without writeback.
11470 2. lmdia without writeback, but with Rn not in
11472 3. ldmia with writeback, but with Rn in reglist.
11473 Case 3 is UNPREDICTABLE behaviour, so we handle
11474 case 1 and 2 which can be converted into a 16-bit
11475 str or ldr. The SP cases are handled below. */
11476 unsigned long opcode
;
11477 /* First, record an error for Case 3. */
11478 if (inst
.operands
[1].imm
& mask
11479 && inst
.operands
[0].writeback
)
11481 _("having the base register in the register list when "
11482 "using write back is UNPREDICTABLE");
11484 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11486 inst
.instruction
= THUMB_OP16 (opcode
);
11487 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11488 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11492 else if (inst
.operands
[0] .reg
== REG_SP
)
11494 if (inst
.operands
[0].writeback
)
11497 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11498 ? T_MNEM_push
: T_MNEM_pop
);
11499 inst
.instruction
|= inst
.operands
[1].imm
;
11502 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11505 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11506 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11507 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11515 if (inst
.instruction
< 0xffff)
11516 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11518 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11519 inst
.operands
[0].writeback
);
11524 constraint (inst
.operands
[0].reg
> 7
11525 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11526 constraint (inst
.instruction
!= T_MNEM_ldmia
11527 && inst
.instruction
!= T_MNEM_stmia
,
11528 _("Thumb-2 instruction only valid in unified syntax"));
11529 if (inst
.instruction
== T_MNEM_stmia
)
11531 if (!inst
.operands
[0].writeback
)
11532 as_warn (_("this instruction will write back the base register"));
11533 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11534 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11535 as_warn (_("value stored for r%d is UNKNOWN"),
11536 inst
.operands
[0].reg
);
11540 if (!inst
.operands
[0].writeback
11541 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11542 as_warn (_("this instruction will write back the base register"));
11543 else if (inst
.operands
[0].writeback
11544 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11545 as_warn (_("this instruction will not write back the base register"));
11548 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11549 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11550 inst
.instruction
|= inst
.operands
[1].imm
;
11557 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11558 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11559 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11560 || inst
.operands
[1].negative
,
11563 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11565 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11566 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11567 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11573 if (!inst
.operands
[1].present
)
11575 constraint (inst
.operands
[0].reg
== REG_LR
,
11576 _("r14 not allowed as first register "
11577 "when second register is omitted"));
11578 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11580 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11583 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11584 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11585 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11591 unsigned long opcode
;
11594 if (inst
.operands
[0].isreg
11595 && !inst
.operands
[0].preind
11596 && inst
.operands
[0].reg
== REG_PC
)
11597 set_it_insn_type_last ();
11599 opcode
= inst
.instruction
;
11600 if (unified_syntax
)
11602 if (!inst
.operands
[1].isreg
)
11604 if (opcode
<= 0xffff)
11605 inst
.instruction
= THUMB_OP32 (opcode
);
11606 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11609 if (inst
.operands
[1].isreg
11610 && !inst
.operands
[1].writeback
11611 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11612 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11613 && opcode
<= 0xffff
11614 && inst
.size_req
!= 4)
11616 /* Insn may have a 16-bit form. */
11617 Rn
= inst
.operands
[1].reg
;
11618 if (inst
.operands
[1].immisreg
)
11620 inst
.instruction
= THUMB_OP16 (opcode
);
11622 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11624 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11625 reject_bad_reg (inst
.operands
[1].imm
);
11627 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11628 && opcode
!= T_MNEM_ldrsb
)
11629 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11630 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11637 if (inst
.reloc
.pc_rel
)
11638 opcode
= T_MNEM_ldr_pc2
;
11640 opcode
= T_MNEM_ldr_pc
;
11644 if (opcode
== T_MNEM_ldr
)
11645 opcode
= T_MNEM_ldr_sp
;
11647 opcode
= T_MNEM_str_sp
;
11649 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11653 inst
.instruction
= inst
.operands
[0].reg
;
11654 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11656 inst
.instruction
|= THUMB_OP16 (opcode
);
11657 if (inst
.size_req
== 2)
11658 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11660 inst
.relax
= opcode
;
11664 /* Definitely a 32-bit variant. */
11666 /* Warning for Erratum 752419. */
11667 if (opcode
== T_MNEM_ldr
11668 && inst
.operands
[0].reg
== REG_SP
11669 && inst
.operands
[1].writeback
== 1
11670 && !inst
.operands
[1].immisreg
)
11672 if (no_cpu_selected ()
11673 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11674 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11675 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11676 as_warn (_("This instruction may be unpredictable "
11677 "if executed on M-profile cores "
11678 "with interrupts enabled."));
11681 /* Do some validations regarding addressing modes. */
11682 if (inst
.operands
[1].immisreg
)
11683 reject_bad_reg (inst
.operands
[1].imm
);
11685 constraint (inst
.operands
[1].writeback
== 1
11686 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11689 inst
.instruction
= THUMB_OP32 (opcode
);
11690 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11691 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11692 check_ldr_r15_aligned ();
11696 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11698 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11700 /* Only [Rn,Rm] is acceptable. */
11701 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11702 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11703 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11704 || inst
.operands
[1].negative
,
11705 _("Thumb does not support this addressing mode"));
11706 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11710 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11711 if (!inst
.operands
[1].isreg
)
11712 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11715 constraint (!inst
.operands
[1].preind
11716 || inst
.operands
[1].shifted
11717 || inst
.operands
[1].writeback
,
11718 _("Thumb does not support this addressing mode"));
11719 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11721 constraint (inst
.instruction
& 0x0600,
11722 _("byte or halfword not valid for base register"));
11723 constraint (inst
.operands
[1].reg
== REG_PC
11724 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11725 _("r15 based store not allowed"));
11726 constraint (inst
.operands
[1].immisreg
,
11727 _("invalid base register for register offset"));
11729 if (inst
.operands
[1].reg
== REG_PC
)
11730 inst
.instruction
= T_OPCODE_LDR_PC
;
11731 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11732 inst
.instruction
= T_OPCODE_LDR_SP
;
11734 inst
.instruction
= T_OPCODE_STR_SP
;
11736 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11737 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11741 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11742 if (!inst
.operands
[1].immisreg
)
11744 /* Immediate offset. */
11745 inst
.instruction
|= inst
.operands
[0].reg
;
11746 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11747 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11751 /* Register offset. */
11752 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11753 constraint (inst
.operands
[1].negative
,
11754 _("Thumb does not support this addressing mode"));
11757 switch (inst
.instruction
)
11759 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11760 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11761 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11762 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11763 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11764 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11765 case 0x5600 /* ldrsb */:
11766 case 0x5e00 /* ldrsh */: break;
11770 inst
.instruction
|= inst
.operands
[0].reg
;
11771 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11772 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11778 if (!inst
.operands
[1].present
)
11780 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11781 constraint (inst
.operands
[0].reg
== REG_LR
,
11782 _("r14 not allowed here"));
11783 constraint (inst
.operands
[0].reg
== REG_R12
,
11784 _("r12 not allowed here"));
11787 if (inst
.operands
[2].writeback
11788 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11789 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11790 as_warn (_("base register written back, and overlaps "
11791 "one of transfer registers"));
11793 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11794 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11795 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11801 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11802 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11808 unsigned Rd
, Rn
, Rm
, Ra
;
11810 Rd
= inst
.operands
[0].reg
;
11811 Rn
= inst
.operands
[1].reg
;
11812 Rm
= inst
.operands
[2].reg
;
11813 Ra
= inst
.operands
[3].reg
;
11815 reject_bad_reg (Rd
);
11816 reject_bad_reg (Rn
);
11817 reject_bad_reg (Rm
);
11818 reject_bad_reg (Ra
);
11820 inst
.instruction
|= Rd
<< 8;
11821 inst
.instruction
|= Rn
<< 16;
11822 inst
.instruction
|= Rm
;
11823 inst
.instruction
|= Ra
<< 12;
11829 unsigned RdLo
, RdHi
, Rn
, Rm
;
11831 RdLo
= inst
.operands
[0].reg
;
11832 RdHi
= inst
.operands
[1].reg
;
11833 Rn
= inst
.operands
[2].reg
;
11834 Rm
= inst
.operands
[3].reg
;
11836 reject_bad_reg (RdLo
);
11837 reject_bad_reg (RdHi
);
11838 reject_bad_reg (Rn
);
11839 reject_bad_reg (Rm
);
11841 inst
.instruction
|= RdLo
<< 12;
11842 inst
.instruction
|= RdHi
<< 8;
11843 inst
.instruction
|= Rn
<< 16;
11844 inst
.instruction
|= Rm
;
11848 do_t_mov_cmp (void)
11852 Rn
= inst
.operands
[0].reg
;
11853 Rm
= inst
.operands
[1].reg
;
11856 set_it_insn_type_last ();
11858 if (unified_syntax
)
11860 int r0off
= (inst
.instruction
== T_MNEM_mov
11861 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11862 unsigned long opcode
;
11863 bfd_boolean narrow
;
11864 bfd_boolean low_regs
;
11866 low_regs
= (Rn
<= 7 && Rm
<= 7);
11867 opcode
= inst
.instruction
;
11868 if (in_it_block ())
11869 narrow
= opcode
!= T_MNEM_movs
;
11871 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11872 if (inst
.size_req
== 4
11873 || inst
.operands
[1].shifted
)
11876 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11877 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11878 && !inst
.operands
[1].shifted
11882 inst
.instruction
= T2_SUBS_PC_LR
;
11886 if (opcode
== T_MNEM_cmp
)
11888 constraint (Rn
== REG_PC
, BAD_PC
);
11891 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11893 warn_deprecated_sp (Rm
);
11894 /* R15 was documented as a valid choice for Rm in ARMv6,
11895 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11896 tools reject R15, so we do too. */
11897 constraint (Rm
== REG_PC
, BAD_PC
);
11900 reject_bad_reg (Rm
);
11902 else if (opcode
== T_MNEM_mov
11903 || opcode
== T_MNEM_movs
)
11905 if (inst
.operands
[1].isreg
)
11907 if (opcode
== T_MNEM_movs
)
11909 reject_bad_reg (Rn
);
11910 reject_bad_reg (Rm
);
11914 /* This is mov.n. */
11915 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11916 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11918 as_tsktsk (_("Use of r%u as a source register is "
11919 "deprecated when r%u is the destination "
11920 "register."), Rm
, Rn
);
11925 /* This is mov.w. */
11926 constraint (Rn
== REG_PC
, BAD_PC
);
11927 constraint (Rm
== REG_PC
, BAD_PC
);
11928 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11929 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11933 reject_bad_reg (Rn
);
11936 if (!inst
.operands
[1].isreg
)
11938 /* Immediate operand. */
11939 if (!in_it_block () && opcode
== T_MNEM_mov
)
11941 if (low_regs
&& narrow
)
11943 inst
.instruction
= THUMB_OP16 (opcode
);
11944 inst
.instruction
|= Rn
<< 8;
11945 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11946 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11948 if (inst
.size_req
== 2)
11949 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11951 inst
.relax
= opcode
;
11956 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11957 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
11958 THUMB1_RELOC_ONLY
);
11960 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11961 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11962 inst
.instruction
|= Rn
<< r0off
;
11963 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11966 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11967 && (inst
.instruction
== T_MNEM_mov
11968 || inst
.instruction
== T_MNEM_movs
))
11970 /* Register shifts are encoded as separate shift instructions. */
11971 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11973 if (in_it_block ())
11978 if (inst
.size_req
== 4)
11981 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11987 switch (inst
.operands
[1].shift_kind
)
11990 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11993 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11996 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11999 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12005 inst
.instruction
= opcode
;
12008 inst
.instruction
|= Rn
;
12009 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12014 inst
.instruction
|= CONDS_BIT
;
12016 inst
.instruction
|= Rn
<< 8;
12017 inst
.instruction
|= Rm
<< 16;
12018 inst
.instruction
|= inst
.operands
[1].imm
;
12023 /* Some mov with immediate shift have narrow variants.
12024 Register shifts are handled above. */
12025 if (low_regs
&& inst
.operands
[1].shifted
12026 && (inst
.instruction
== T_MNEM_mov
12027 || inst
.instruction
== T_MNEM_movs
))
12029 if (in_it_block ())
12030 narrow
= (inst
.instruction
== T_MNEM_mov
);
12032 narrow
= (inst
.instruction
== T_MNEM_movs
);
12037 switch (inst
.operands
[1].shift_kind
)
12039 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12040 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12041 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12042 default: narrow
= FALSE
; break;
12048 inst
.instruction
|= Rn
;
12049 inst
.instruction
|= Rm
<< 3;
12050 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12054 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12055 inst
.instruction
|= Rn
<< r0off
;
12056 encode_thumb32_shifted_operand (1);
12060 switch (inst
.instruction
)
12063 /* In v4t or v5t a move of two lowregs produces unpredictable
12064 results. Don't allow this. */
12067 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12068 "MOV Rd, Rs with two low registers is not "
12069 "permitted on this architecture");
12070 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12074 inst
.instruction
= T_OPCODE_MOV_HR
;
12075 inst
.instruction
|= (Rn
& 0x8) << 4;
12076 inst
.instruction
|= (Rn
& 0x7);
12077 inst
.instruction
|= Rm
<< 3;
12081 /* We know we have low registers at this point.
12082 Generate LSLS Rd, Rs, #0. */
12083 inst
.instruction
= T_OPCODE_LSL_I
;
12084 inst
.instruction
|= Rn
;
12085 inst
.instruction
|= Rm
<< 3;
12091 inst
.instruction
= T_OPCODE_CMP_LR
;
12092 inst
.instruction
|= Rn
;
12093 inst
.instruction
|= Rm
<< 3;
12097 inst
.instruction
= T_OPCODE_CMP_HR
;
12098 inst
.instruction
|= (Rn
& 0x8) << 4;
12099 inst
.instruction
|= (Rn
& 0x7);
12100 inst
.instruction
|= Rm
<< 3;
12107 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12109 /* PR 10443: Do not silently ignore shifted operands. */
12110 constraint (inst
.operands
[1].shifted
,
12111 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12113 if (inst
.operands
[1].isreg
)
12115 if (Rn
< 8 && Rm
< 8)
12117 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12118 since a MOV instruction produces unpredictable results. */
12119 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12120 inst
.instruction
= T_OPCODE_ADD_I3
;
12122 inst
.instruction
= T_OPCODE_CMP_LR
;
12124 inst
.instruction
|= Rn
;
12125 inst
.instruction
|= Rm
<< 3;
12129 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12130 inst
.instruction
= T_OPCODE_MOV_HR
;
12132 inst
.instruction
= T_OPCODE_CMP_HR
;
12138 constraint (Rn
> 7,
12139 _("only lo regs allowed with immediate"));
12140 inst
.instruction
|= Rn
<< 8;
12141 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
12152 top
= (inst
.instruction
& 0x00800000) != 0;
12153 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
12155 constraint (top
, _(":lower16: not allowed in this instruction"));
12156 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12158 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12160 constraint (!top
, _(":upper16: not allowed in this instruction"));
12161 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12164 Rd
= inst
.operands
[0].reg
;
12165 reject_bad_reg (Rd
);
12167 inst
.instruction
|= Rd
<< 8;
12168 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12170 imm
= inst
.reloc
.exp
.X_add_number
;
12171 inst
.instruction
|= (imm
& 0xf000) << 4;
12172 inst
.instruction
|= (imm
& 0x0800) << 15;
12173 inst
.instruction
|= (imm
& 0x0700) << 4;
12174 inst
.instruction
|= (imm
& 0x00ff);
12179 do_t_mvn_tst (void)
12183 Rn
= inst
.operands
[0].reg
;
12184 Rm
= inst
.operands
[1].reg
;
12186 if (inst
.instruction
== T_MNEM_cmp
12187 || inst
.instruction
== T_MNEM_cmn
)
12188 constraint (Rn
== REG_PC
, BAD_PC
);
12190 reject_bad_reg (Rn
);
12191 reject_bad_reg (Rm
);
12193 if (unified_syntax
)
12195 int r0off
= (inst
.instruction
== T_MNEM_mvn
12196 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12197 bfd_boolean narrow
;
12199 if (inst
.size_req
== 4
12200 || inst
.instruction
> 0xffff
12201 || inst
.operands
[1].shifted
12202 || Rn
> 7 || Rm
> 7)
12204 else if (inst
.instruction
== T_MNEM_cmn
12205 || inst
.instruction
== T_MNEM_tst
)
12207 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12208 narrow
= !in_it_block ();
12210 narrow
= in_it_block ();
12212 if (!inst
.operands
[1].isreg
)
12214 /* For an immediate, we always generate a 32-bit opcode;
12215 section relaxation will shrink it later if possible. */
12216 if (inst
.instruction
< 0xffff)
12217 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12218 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12219 inst
.instruction
|= Rn
<< r0off
;
12220 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12224 /* See if we can do this with a 16-bit instruction. */
12227 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12228 inst
.instruction
|= Rn
;
12229 inst
.instruction
|= Rm
<< 3;
12233 constraint (inst
.operands
[1].shifted
12234 && inst
.operands
[1].immisreg
,
12235 _("shift must be constant"));
12236 if (inst
.instruction
< 0xffff)
12237 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12238 inst
.instruction
|= Rn
<< r0off
;
12239 encode_thumb32_shifted_operand (1);
12245 constraint (inst
.instruction
> 0xffff
12246 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12247 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12248 _("unshifted register required"));
12249 constraint (Rn
> 7 || Rm
> 7,
12252 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12253 inst
.instruction
|= Rn
;
12254 inst
.instruction
|= Rm
<< 3;
12263 if (do_vfp_nsyn_mrs () == SUCCESS
)
12266 Rd
= inst
.operands
[0].reg
;
12267 reject_bad_reg (Rd
);
12268 inst
.instruction
|= Rd
<< 8;
12270 if (inst
.operands
[1].isreg
)
12272 unsigned br
= inst
.operands
[1].reg
;
12273 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12274 as_bad (_("bad register for mrs"));
12276 inst
.instruction
|= br
& (0xf << 16);
12277 inst
.instruction
|= (br
& 0x300) >> 4;
12278 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12282 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12284 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12286 /* PR gas/12698: The constraint is only applied for m_profile.
12287 If the user has specified -march=all, we want to ignore it as
12288 we are building for any CPU type, including non-m variants. */
12289 bfd_boolean m_profile
=
12290 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12291 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12292 "not support requested special purpose register"));
12295 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12297 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12298 _("'APSR', 'CPSR' or 'SPSR' expected"));
12300 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12301 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12302 inst
.instruction
|= 0xf0000;
12312 if (do_vfp_nsyn_msr () == SUCCESS
)
12315 constraint (!inst
.operands
[1].isreg
,
12316 _("Thumb encoding does not support an immediate here"));
12318 if (inst
.operands
[0].isreg
)
12319 flags
= (int)(inst
.operands
[0].reg
);
12321 flags
= inst
.operands
[0].imm
;
12323 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12325 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12327 /* PR gas/12698: The constraint is only applied for m_profile.
12328 If the user has specified -march=all, we want to ignore it as
12329 we are building for any CPU type, including non-m variants. */
12330 bfd_boolean m_profile
=
12331 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12332 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12333 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12334 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12335 && bits
!= PSR_f
)) && m_profile
,
12336 _("selected processor does not support requested special "
12337 "purpose register"));
12340 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12341 "requested special purpose register"));
12343 Rn
= inst
.operands
[1].reg
;
12344 reject_bad_reg (Rn
);
12346 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12347 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12348 inst
.instruction
|= (flags
& 0x300) >> 4;
12349 inst
.instruction
|= (flags
& 0xff);
12350 inst
.instruction
|= Rn
<< 16;
12356 bfd_boolean narrow
;
12357 unsigned Rd
, Rn
, Rm
;
12359 if (!inst
.operands
[2].present
)
12360 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12362 Rd
= inst
.operands
[0].reg
;
12363 Rn
= inst
.operands
[1].reg
;
12364 Rm
= inst
.operands
[2].reg
;
12366 if (unified_syntax
)
12368 if (inst
.size_req
== 4
12374 else if (inst
.instruction
== T_MNEM_muls
)
12375 narrow
= !in_it_block ();
12377 narrow
= in_it_block ();
12381 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12382 constraint (Rn
> 7 || Rm
> 7,
12389 /* 16-bit MULS/Conditional MUL. */
12390 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12391 inst
.instruction
|= Rd
;
12394 inst
.instruction
|= Rm
<< 3;
12396 inst
.instruction
|= Rn
<< 3;
12398 constraint (1, _("dest must overlap one source register"));
12402 constraint (inst
.instruction
!= T_MNEM_mul
,
12403 _("Thumb-2 MUL must not set flags"));
12405 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12406 inst
.instruction
|= Rd
<< 8;
12407 inst
.instruction
|= Rn
<< 16;
12408 inst
.instruction
|= Rm
<< 0;
12410 reject_bad_reg (Rd
);
12411 reject_bad_reg (Rn
);
12412 reject_bad_reg (Rm
);
12419 unsigned RdLo
, RdHi
, Rn
, Rm
;
12421 RdLo
= inst
.operands
[0].reg
;
12422 RdHi
= inst
.operands
[1].reg
;
12423 Rn
= inst
.operands
[2].reg
;
12424 Rm
= inst
.operands
[3].reg
;
12426 reject_bad_reg (RdLo
);
12427 reject_bad_reg (RdHi
);
12428 reject_bad_reg (Rn
);
12429 reject_bad_reg (Rm
);
12431 inst
.instruction
|= RdLo
<< 12;
12432 inst
.instruction
|= RdHi
<< 8;
12433 inst
.instruction
|= Rn
<< 16;
12434 inst
.instruction
|= Rm
;
12437 as_tsktsk (_("rdhi and rdlo must be different"));
12443 set_it_insn_type (NEUTRAL_IT_INSN
);
12445 if (unified_syntax
)
12447 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12449 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12450 inst
.instruction
|= inst
.operands
[0].imm
;
12454 /* PR9722: Check for Thumb2 availability before
12455 generating a thumb2 nop instruction. */
12456 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12458 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12459 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12462 inst
.instruction
= 0x46c0;
12467 constraint (inst
.operands
[0].present
,
12468 _("Thumb does not support NOP with hints"));
12469 inst
.instruction
= 0x46c0;
12476 if (unified_syntax
)
12478 bfd_boolean narrow
;
12480 if (THUMB_SETS_FLAGS (inst
.instruction
))
12481 narrow
= !in_it_block ();
12483 narrow
= in_it_block ();
12484 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12486 if (inst
.size_req
== 4)
12491 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12492 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12493 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12497 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12498 inst
.instruction
|= inst
.operands
[0].reg
;
12499 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12504 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12506 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12508 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12509 inst
.instruction
|= inst
.operands
[0].reg
;
12510 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12519 Rd
= inst
.operands
[0].reg
;
12520 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12522 reject_bad_reg (Rd
);
12523 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12524 reject_bad_reg (Rn
);
12526 inst
.instruction
|= Rd
<< 8;
12527 inst
.instruction
|= Rn
<< 16;
12529 if (!inst
.operands
[2].isreg
)
12531 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12532 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12538 Rm
= inst
.operands
[2].reg
;
12539 reject_bad_reg (Rm
);
12541 constraint (inst
.operands
[2].shifted
12542 && inst
.operands
[2].immisreg
,
12543 _("shift must be constant"));
12544 encode_thumb32_shifted_operand (2);
12551 unsigned Rd
, Rn
, Rm
;
12553 Rd
= inst
.operands
[0].reg
;
12554 Rn
= inst
.operands
[1].reg
;
12555 Rm
= inst
.operands
[2].reg
;
12557 reject_bad_reg (Rd
);
12558 reject_bad_reg (Rn
);
12559 reject_bad_reg (Rm
);
12561 inst
.instruction
|= Rd
<< 8;
12562 inst
.instruction
|= Rn
<< 16;
12563 inst
.instruction
|= Rm
;
12564 if (inst
.operands
[3].present
)
12566 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12567 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12568 _("expression too complex"));
12569 inst
.instruction
|= (val
& 0x1c) << 10;
12570 inst
.instruction
|= (val
& 0x03) << 6;
12577 if (!inst
.operands
[3].present
)
12581 inst
.instruction
&= ~0x00000020;
12583 /* PR 10168. Swap the Rm and Rn registers. */
12584 Rtmp
= inst
.operands
[1].reg
;
12585 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12586 inst
.operands
[2].reg
= Rtmp
;
12594 if (inst
.operands
[0].immisreg
)
12595 reject_bad_reg (inst
.operands
[0].imm
);
12597 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12601 do_t_push_pop (void)
12605 constraint (inst
.operands
[0].writeback
,
12606 _("push/pop do not support {reglist}^"));
12607 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12608 _("expression too complex"));
12610 mask
= inst
.operands
[0].imm
;
12611 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12612 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12613 else if (inst
.size_req
!= 4
12614 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
12615 ? REG_LR
: REG_PC
)))
12617 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12618 inst
.instruction
|= THUMB_PP_PC_LR
;
12619 inst
.instruction
|= mask
& 0xff;
12621 else if (unified_syntax
)
12623 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12624 encode_thumb2_ldmstm (13, mask
, TRUE
);
12628 inst
.error
= _("invalid register list to push/pop instruction");
12638 Rd
= inst
.operands
[0].reg
;
12639 Rm
= inst
.operands
[1].reg
;
12641 reject_bad_reg (Rd
);
12642 reject_bad_reg (Rm
);
12644 inst
.instruction
|= Rd
<< 8;
12645 inst
.instruction
|= Rm
<< 16;
12646 inst
.instruction
|= Rm
;
12654 Rd
= inst
.operands
[0].reg
;
12655 Rm
= inst
.operands
[1].reg
;
12657 reject_bad_reg (Rd
);
12658 reject_bad_reg (Rm
);
12660 if (Rd
<= 7 && Rm
<= 7
12661 && inst
.size_req
!= 4)
12663 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12664 inst
.instruction
|= Rd
;
12665 inst
.instruction
|= Rm
<< 3;
12667 else if (unified_syntax
)
12669 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12670 inst
.instruction
|= Rd
<< 8;
12671 inst
.instruction
|= Rm
<< 16;
12672 inst
.instruction
|= Rm
;
12675 inst
.error
= BAD_HIREG
;
12683 Rd
= inst
.operands
[0].reg
;
12684 Rm
= inst
.operands
[1].reg
;
12686 reject_bad_reg (Rd
);
12687 reject_bad_reg (Rm
);
12689 inst
.instruction
|= Rd
<< 8;
12690 inst
.instruction
|= Rm
;
12698 Rd
= inst
.operands
[0].reg
;
12699 Rs
= (inst
.operands
[1].present
12700 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12701 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12703 reject_bad_reg (Rd
);
12704 reject_bad_reg (Rs
);
12705 if (inst
.operands
[2].isreg
)
12706 reject_bad_reg (inst
.operands
[2].reg
);
12708 inst
.instruction
|= Rd
<< 8;
12709 inst
.instruction
|= Rs
<< 16;
12710 if (!inst
.operands
[2].isreg
)
12712 bfd_boolean narrow
;
12714 if ((inst
.instruction
& 0x00100000) != 0)
12715 narrow
= !in_it_block ();
12717 narrow
= in_it_block ();
12719 if (Rd
> 7 || Rs
> 7)
12722 if (inst
.size_req
== 4 || !unified_syntax
)
12725 if (inst
.reloc
.exp
.X_op
!= O_constant
12726 || inst
.reloc
.exp
.X_add_number
!= 0)
12729 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12730 relaxation, but it doesn't seem worth the hassle. */
12733 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12734 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12735 inst
.instruction
|= Rs
<< 3;
12736 inst
.instruction
|= Rd
;
12740 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12741 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12745 encode_thumb32_shifted_operand (2);
12751 if (warn_on_deprecated
12752 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12753 as_tsktsk (_("setend use is deprecated for ARMv8"));
12755 set_it_insn_type (OUTSIDE_IT_INSN
);
12756 if (inst
.operands
[0].imm
)
12757 inst
.instruction
|= 0x8;
12763 if (!inst
.operands
[1].present
)
12764 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12766 if (unified_syntax
)
12768 bfd_boolean narrow
;
12771 switch (inst
.instruction
)
12774 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12776 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12778 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12780 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12784 if (THUMB_SETS_FLAGS (inst
.instruction
))
12785 narrow
= !in_it_block ();
12787 narrow
= in_it_block ();
12788 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12790 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12792 if (inst
.operands
[2].isreg
12793 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12794 || inst
.operands
[2].reg
> 7))
12796 if (inst
.size_req
== 4)
12799 reject_bad_reg (inst
.operands
[0].reg
);
12800 reject_bad_reg (inst
.operands
[1].reg
);
12804 if (inst
.operands
[2].isreg
)
12806 reject_bad_reg (inst
.operands
[2].reg
);
12807 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12808 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12809 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12810 inst
.instruction
|= inst
.operands
[2].reg
;
12812 /* PR 12854: Error on extraneous shifts. */
12813 constraint (inst
.operands
[2].shifted
,
12814 _("extraneous shift as part of operand to shift insn"));
12818 inst
.operands
[1].shifted
= 1;
12819 inst
.operands
[1].shift_kind
= shift_kind
;
12820 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12821 ? T_MNEM_movs
: T_MNEM_mov
);
12822 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12823 encode_thumb32_shifted_operand (1);
12824 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12825 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12830 if (inst
.operands
[2].isreg
)
12832 switch (shift_kind
)
12834 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12835 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12836 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12837 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12841 inst
.instruction
|= inst
.operands
[0].reg
;
12842 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12844 /* PR 12854: Error on extraneous shifts. */
12845 constraint (inst
.operands
[2].shifted
,
12846 _("extraneous shift as part of operand to shift insn"));
12850 switch (shift_kind
)
12852 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12853 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12854 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12857 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12858 inst
.instruction
|= inst
.operands
[0].reg
;
12859 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12865 constraint (inst
.operands
[0].reg
> 7
12866 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12867 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12869 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12871 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12872 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12873 _("source1 and dest must be same register"));
12875 switch (inst
.instruction
)
12877 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12878 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12879 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12880 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12884 inst
.instruction
|= inst
.operands
[0].reg
;
12885 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12887 /* PR 12854: Error on extraneous shifts. */
12888 constraint (inst
.operands
[2].shifted
,
12889 _("extraneous shift as part of operand to shift insn"));
12893 switch (inst
.instruction
)
12895 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12896 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12897 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12898 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12901 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12902 inst
.instruction
|= inst
.operands
[0].reg
;
12903 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12911 unsigned Rd
, Rn
, Rm
;
12913 Rd
= inst
.operands
[0].reg
;
12914 Rn
= inst
.operands
[1].reg
;
12915 Rm
= inst
.operands
[2].reg
;
12917 reject_bad_reg (Rd
);
12918 reject_bad_reg (Rn
);
12919 reject_bad_reg (Rm
);
12921 inst
.instruction
|= Rd
<< 8;
12922 inst
.instruction
|= Rn
<< 16;
12923 inst
.instruction
|= Rm
;
12929 unsigned Rd
, Rn
, Rm
;
12931 Rd
= inst
.operands
[0].reg
;
12932 Rm
= inst
.operands
[1].reg
;
12933 Rn
= inst
.operands
[2].reg
;
12935 reject_bad_reg (Rd
);
12936 reject_bad_reg (Rn
);
12937 reject_bad_reg (Rm
);
12939 inst
.instruction
|= Rd
<< 8;
12940 inst
.instruction
|= Rn
<< 16;
12941 inst
.instruction
|= Rm
;
12947 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12948 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12949 _("SMC is not permitted on this architecture"));
12950 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12951 _("expression too complex"));
12952 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12953 inst
.instruction
|= (value
& 0xf000) >> 12;
12954 inst
.instruction
|= (value
& 0x0ff0);
12955 inst
.instruction
|= (value
& 0x000f) << 16;
12956 /* PR gas/15623: SMC instructions must be last in an IT block. */
12957 set_it_insn_type_last ();
12963 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12965 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12966 inst
.instruction
|= (value
& 0x0fff);
12967 inst
.instruction
|= (value
& 0xf000) << 4;
12971 do_t_ssat_usat (int bias
)
12975 Rd
= inst
.operands
[0].reg
;
12976 Rn
= inst
.operands
[2].reg
;
12978 reject_bad_reg (Rd
);
12979 reject_bad_reg (Rn
);
12981 inst
.instruction
|= Rd
<< 8;
12982 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12983 inst
.instruction
|= Rn
<< 16;
12985 if (inst
.operands
[3].present
)
12987 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12989 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12991 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12992 _("expression too complex"));
12994 if (shift_amount
!= 0)
12996 constraint (shift_amount
> 31,
12997 _("shift expression is too large"));
12999 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13000 inst
.instruction
|= 0x00200000; /* sh bit. */
13002 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13003 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13011 do_t_ssat_usat (1);
13019 Rd
= inst
.operands
[0].reg
;
13020 Rn
= inst
.operands
[2].reg
;
13022 reject_bad_reg (Rd
);
13023 reject_bad_reg (Rn
);
13025 inst
.instruction
|= Rd
<< 8;
13026 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13027 inst
.instruction
|= Rn
<< 16;
13033 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13034 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13035 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13036 || inst
.operands
[2].negative
,
13039 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13041 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13042 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13043 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13044 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13050 if (!inst
.operands
[2].present
)
13051 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13053 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13054 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13055 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13058 inst
.instruction
|= inst
.operands
[0].reg
;
13059 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13060 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13061 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13067 unsigned Rd
, Rn
, Rm
;
13069 Rd
= inst
.operands
[0].reg
;
13070 Rn
= inst
.operands
[1].reg
;
13071 Rm
= inst
.operands
[2].reg
;
13073 reject_bad_reg (Rd
);
13074 reject_bad_reg (Rn
);
13075 reject_bad_reg (Rm
);
13077 inst
.instruction
|= Rd
<< 8;
13078 inst
.instruction
|= Rn
<< 16;
13079 inst
.instruction
|= Rm
;
13080 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13088 Rd
= inst
.operands
[0].reg
;
13089 Rm
= inst
.operands
[1].reg
;
13091 reject_bad_reg (Rd
);
13092 reject_bad_reg (Rm
);
13094 if (inst
.instruction
<= 0xffff
13095 && inst
.size_req
!= 4
13096 && Rd
<= 7 && Rm
<= 7
13097 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13099 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13100 inst
.instruction
|= Rd
;
13101 inst
.instruction
|= Rm
<< 3;
13103 else if (unified_syntax
)
13105 if (inst
.instruction
<= 0xffff)
13106 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13107 inst
.instruction
|= Rd
<< 8;
13108 inst
.instruction
|= Rm
;
13109 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13113 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13114 _("Thumb encoding does not support rotation"));
13115 constraint (1, BAD_HIREG
);
13122 /* We have to do the following check manually as ARM_EXT_OS only applies
13124 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
13126 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
13127 /* This only applies to the v6m however, not later architectures. */
13128 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
13129 as_bad (_("SVC is not permitted on this architecture"));
13130 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
13133 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
13142 half
= (inst
.instruction
& 0x10) != 0;
13143 set_it_insn_type_last ();
13144 constraint (inst
.operands
[0].immisreg
,
13145 _("instruction requires register index"));
13147 Rn
= inst
.operands
[0].reg
;
13148 Rm
= inst
.operands
[0].imm
;
13150 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13151 constraint (Rn
== REG_SP
, BAD_SP
);
13152 reject_bad_reg (Rm
);
13154 constraint (!half
&& inst
.operands
[0].shifted
,
13155 _("instruction does not allow shifted index"));
13156 inst
.instruction
|= (Rn
<< 16) | Rm
;
13162 if (!inst
.operands
[0].present
)
13163 inst
.operands
[0].imm
= 0;
13165 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13167 constraint (inst
.size_req
== 2,
13168 _("immediate value out of range"));
13169 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13170 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13171 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13175 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13176 inst
.instruction
|= inst
.operands
[0].imm
;
13179 set_it_insn_type (NEUTRAL_IT_INSN
);
13186 do_t_ssat_usat (0);
13194 Rd
= inst
.operands
[0].reg
;
13195 Rn
= inst
.operands
[2].reg
;
13197 reject_bad_reg (Rd
);
13198 reject_bad_reg (Rn
);
13200 inst
.instruction
|= Rd
<< 8;
13201 inst
.instruction
|= inst
.operands
[1].imm
;
13202 inst
.instruction
|= Rn
<< 16;
13205 /* Neon instruction encoder helpers. */
13207 /* Encodings for the different types for various Neon opcodes. */
13209 /* An "invalid" code for the following tables. */
13212 struct neon_tab_entry
13215 unsigned float_or_poly
;
13216 unsigned scalar_or_imm
;
13219 /* Map overloaded Neon opcodes to their respective encodings. */
13220 #define NEON_ENC_TAB \
13221 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13222 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13223 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13224 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13225 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13226 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13227 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13228 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13229 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13230 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13231 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13232 /* Register variants of the following two instructions are encoded as
13233 vcge / vcgt with the operands reversed. */ \
13234 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13235 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13236 X(vfma, N_INV, 0x0000c10, N_INV), \
13237 X(vfms, N_INV, 0x0200c10, N_INV), \
13238 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13239 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13240 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13241 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13242 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13243 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13244 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13245 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13246 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13247 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13248 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13249 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13250 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13251 X(vshl, 0x0000400, N_INV, 0x0800510), \
13252 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13253 X(vand, 0x0000110, N_INV, 0x0800030), \
13254 X(vbic, 0x0100110, N_INV, 0x0800030), \
13255 X(veor, 0x1000110, N_INV, N_INV), \
13256 X(vorn, 0x0300110, N_INV, 0x0800010), \
13257 X(vorr, 0x0200110, N_INV, 0x0800010), \
13258 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13259 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13260 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13261 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13262 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13263 X(vst1, 0x0000000, 0x0800000, N_INV), \
13264 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13265 X(vst2, 0x0000100, 0x0800100, N_INV), \
13266 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13267 X(vst3, 0x0000200, 0x0800200, N_INV), \
13268 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13269 X(vst4, 0x0000300, 0x0800300, N_INV), \
13270 X(vmovn, 0x1b20200, N_INV, N_INV), \
13271 X(vtrn, 0x1b20080, N_INV, N_INV), \
13272 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13273 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13274 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13275 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13276 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13277 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13278 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13279 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13280 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13281 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13282 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13283 X(vseleq, 0xe000a00, N_INV, N_INV), \
13284 X(vselvs, 0xe100a00, N_INV, N_INV), \
13285 X(vselge, 0xe200a00, N_INV, N_INV), \
13286 X(vselgt, 0xe300a00, N_INV, N_INV), \
13287 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13288 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13289 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13290 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13291 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13292 X(aes, 0x3b00300, N_INV, N_INV), \
13293 X(sha3op, 0x2000c00, N_INV, N_INV), \
13294 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13295 X(sha2op, 0x3ba0380, N_INV, N_INV)
13299 #define X(OPC,I,F,S) N_MNEM_##OPC
13304 static const struct neon_tab_entry neon_enc_tab
[] =
13306 #define X(OPC,I,F,S) { (I), (F), (S) }
13311 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13312 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13313 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13314 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13315 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13316 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13317 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13318 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13319 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13320 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13321 #define NEON_ENC_SINGLE_(X) \
13322 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13323 #define NEON_ENC_DOUBLE_(X) \
13324 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13325 #define NEON_ENC_FPV8_(X) \
13326 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13328 #define NEON_ENCODE(type, inst) \
13331 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13332 inst.is_neon = 1; \
13336 #define check_neon_suffixes \
13339 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13341 as_bad (_("invalid neon suffix for non neon instruction")); \
13347 /* Define shapes for instruction operands. The following mnemonic characters
13348 are used in this table:
13350 F - VFP S<n> register
13351 D - Neon D<n> register
13352 Q - Neon Q<n> register
13356 L - D<n> register list
13358 This table is used to generate various data:
13359 - enumerations of the form NS_DDR to be used as arguments to
13361 - a table classifying shapes into single, double, quad, mixed.
13362 - a table used to drive neon_select_shape. */
13364 #define NEON_SHAPE_DEF \
13365 X(3, (D, D, D), DOUBLE), \
13366 X(3, (Q, Q, Q), QUAD), \
13367 X(3, (D, D, I), DOUBLE), \
13368 X(3, (Q, Q, I), QUAD), \
13369 X(3, (D, D, S), DOUBLE), \
13370 X(3, (Q, Q, S), QUAD), \
13371 X(2, (D, D), DOUBLE), \
13372 X(2, (Q, Q), QUAD), \
13373 X(2, (D, S), DOUBLE), \
13374 X(2, (Q, S), QUAD), \
13375 X(2, (D, R), DOUBLE), \
13376 X(2, (Q, R), QUAD), \
13377 X(2, (D, I), DOUBLE), \
13378 X(2, (Q, I), QUAD), \
13379 X(3, (D, L, D), DOUBLE), \
13380 X(2, (D, Q), MIXED), \
13381 X(2, (Q, D), MIXED), \
13382 X(3, (D, Q, I), MIXED), \
13383 X(3, (Q, D, I), MIXED), \
13384 X(3, (Q, D, D), MIXED), \
13385 X(3, (D, Q, Q), MIXED), \
13386 X(3, (Q, Q, D), MIXED), \
13387 X(3, (Q, D, S), MIXED), \
13388 X(3, (D, Q, S), MIXED), \
13389 X(4, (D, D, D, I), DOUBLE), \
13390 X(4, (Q, Q, Q, I), QUAD), \
13391 X(4, (D, D, S, I), DOUBLE), \
13392 X(4, (Q, Q, S, I), QUAD), \
13393 X(2, (F, F), SINGLE), \
13394 X(3, (F, F, F), SINGLE), \
13395 X(2, (F, I), SINGLE), \
13396 X(2, (F, D), MIXED), \
13397 X(2, (D, F), MIXED), \
13398 X(3, (F, F, I), MIXED), \
13399 X(4, (R, R, F, F), SINGLE), \
13400 X(4, (F, F, R, R), SINGLE), \
13401 X(3, (D, R, R), DOUBLE), \
13402 X(3, (R, R, D), DOUBLE), \
13403 X(2, (S, R), SINGLE), \
13404 X(2, (R, S), SINGLE), \
13405 X(2, (F, R), SINGLE), \
13406 X(2, (R, F), SINGLE), \
13407 /* Half float shape supported so far. */\
13408 X (2, (H, D), MIXED), \
13409 X (2, (D, H), MIXED), \
13410 X (2, (H, F), MIXED), \
13411 X (2, (F, H), MIXED), \
13412 X (2, (H, H), HALF), \
13413 X (2, (H, R), HALF), \
13414 X (2, (R, H), HALF), \
13415 X (2, (H, I), HALF), \
13416 X (3, (H, H, H), HALF), \
13417 X (3, (H, F, I), MIXED), \
13418 X (3, (F, H, I), MIXED)
13420 #define S2(A,B) NS_##A##B
13421 #define S3(A,B,C) NS_##A##B##C
13422 #define S4(A,B,C,D) NS_##A##B##C##D
13424 #define X(N, L, C) S##N L
13437 enum neon_shape_class
13446 #define X(N, L, C) SC_##C
13448 static enum neon_shape_class neon_shape_class
[] =
13467 /* Register widths of above. */
13468 static unsigned neon_shape_el_size
[] =
13480 struct neon_shape_info
13483 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13486 #define S2(A,B) { SE_##A, SE_##B }
13487 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13488 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13490 #define X(N, L, C) { N, S##N L }
13492 static struct neon_shape_info neon_shape_tab
[] =
13502 /* Bit masks used in type checking given instructions.
13503 'N_EQK' means the type must be the same as (or based on in some way) the key
13504 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13505 set, various other bits can be set as well in order to modify the meaning of
13506 the type constraint. */
13508 enum neon_type_mask
13532 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13533 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13534 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13535 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13536 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13537 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13538 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13539 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13540 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13541 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13542 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13544 N_MAX_NONSPECIAL
= N_P64
13547 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13549 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13550 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13551 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13552 #define N_S_32 (N_S8 | N_S16 | N_S32)
13553 #define N_F_16_32 (N_F16 | N_F32)
13554 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13555 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13556 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13557 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13559 /* Pass this as the first type argument to neon_check_type to ignore types
13561 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13563 /* Select a "shape" for the current instruction (describing register types or
13564 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13565 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13566 function of operand parsing, so this function doesn't need to be called.
13567 Shapes should be listed in order of decreasing length. */
13569 static enum neon_shape
13570 neon_select_shape (enum neon_shape shape
, ...)
13573 enum neon_shape first_shape
= shape
;
13575 /* Fix missing optional operands. FIXME: we don't know at this point how
13576 many arguments we should have, so this makes the assumption that we have
13577 > 1. This is true of all current Neon opcodes, I think, but may not be
13578 true in the future. */
13579 if (!inst
.operands
[1].present
)
13580 inst
.operands
[1] = inst
.operands
[0];
13582 va_start (ap
, shape
);
13584 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13589 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13591 if (!inst
.operands
[j
].present
)
13597 switch (neon_shape_tab
[shape
].el
[j
])
13599 /* If a .f16, .16, .u16, .s16 type specifier is given over
13600 a VFP single precision register operand, it's essentially
13601 means only half of the register is used.
13603 If the type specifier is given after the mnemonics, the
13604 information is stored in inst.vectype. If the type specifier
13605 is given after register operand, the information is stored
13606 in inst.operands[].vectype.
13608 When there is only one type specifier, and all the register
13609 operands are the same type of hardware register, the type
13610 specifier applies to all register operands.
13612 If no type specifier is given, the shape is inferred from
13613 operand information.
13616 vadd.f16 s0, s1, s2: NS_HHH
13617 vabs.f16 s0, s1: NS_HH
13618 vmov.f16 s0, r1: NS_HR
13619 vmov.f16 r0, s1: NS_RH
13620 vcvt.f16 r0, s1: NS_RH
13621 vcvt.f16.s32 s2, s2, #29: NS_HFI
13622 vcvt.f16.s32 s2, s2: NS_HF
13625 if (!(inst
.operands
[j
].isreg
13626 && inst
.operands
[j
].isvec
13627 && inst
.operands
[j
].issingle
13628 && !inst
.operands
[j
].isquad
13629 && ((inst
.vectype
.elems
== 1
13630 && inst
.vectype
.el
[0].size
== 16)
13631 || (inst
.vectype
.elems
> 1
13632 && inst
.vectype
.el
[j
].size
== 16)
13633 || (inst
.vectype
.elems
== 0
13634 && inst
.operands
[j
].vectype
.type
!= NT_invtype
13635 && inst
.operands
[j
].vectype
.size
== 16))))
13640 if (!(inst
.operands
[j
].isreg
13641 && inst
.operands
[j
].isvec
13642 && inst
.operands
[j
].issingle
13643 && !inst
.operands
[j
].isquad
13644 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
13645 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
13646 || (inst
.vectype
.elems
== 0
13647 && (inst
.operands
[j
].vectype
.size
== 32
13648 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
13653 if (!(inst
.operands
[j
].isreg
13654 && inst
.operands
[j
].isvec
13655 && !inst
.operands
[j
].isquad
13656 && !inst
.operands
[j
].issingle
))
13661 if (!(inst
.operands
[j
].isreg
13662 && !inst
.operands
[j
].isvec
))
13667 if (!(inst
.operands
[j
].isreg
13668 && inst
.operands
[j
].isvec
13669 && inst
.operands
[j
].isquad
13670 && !inst
.operands
[j
].issingle
))
13675 if (!(!inst
.operands
[j
].isreg
13676 && !inst
.operands
[j
].isscalar
))
13681 if (!(!inst
.operands
[j
].isreg
13682 && inst
.operands
[j
].isscalar
))
13692 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13693 /* We've matched all the entries in the shape table, and we don't
13694 have any left over operands which have not been matched. */
13700 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13701 first_error (_("invalid instruction shape"));
13706 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13707 means the Q bit should be set). */
13710 neon_quad (enum neon_shape shape
)
13712 return neon_shape_class
[shape
] == SC_QUAD
;
13716 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13719 /* Allow modification to be made to types which are constrained to be
13720 based on the key element, based on bits set alongside N_EQK. */
13721 if ((typebits
& N_EQK
) != 0)
13723 if ((typebits
& N_HLF
) != 0)
13725 else if ((typebits
& N_DBL
) != 0)
13727 if ((typebits
& N_SGN
) != 0)
13728 *g_type
= NT_signed
;
13729 else if ((typebits
& N_UNS
) != 0)
13730 *g_type
= NT_unsigned
;
13731 else if ((typebits
& N_INT
) != 0)
13732 *g_type
= NT_integer
;
13733 else if ((typebits
& N_FLT
) != 0)
13734 *g_type
= NT_float
;
13735 else if ((typebits
& N_SIZ
) != 0)
13736 *g_type
= NT_untyped
;
13740 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13741 operand type, i.e. the single type specified in a Neon instruction when it
13742 is the only one given. */
13744 static struct neon_type_el
13745 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13747 struct neon_type_el dest
= *key
;
13749 gas_assert ((thisarg
& N_EQK
) != 0);
13751 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13756 /* Convert Neon type and size into compact bitmask representation. */
13758 static enum neon_type_mask
13759 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13766 case 8: return N_8
;
13767 case 16: return N_16
;
13768 case 32: return N_32
;
13769 case 64: return N_64
;
13777 case 8: return N_I8
;
13778 case 16: return N_I16
;
13779 case 32: return N_I32
;
13780 case 64: return N_I64
;
13788 case 16: return N_F16
;
13789 case 32: return N_F32
;
13790 case 64: return N_F64
;
13798 case 8: return N_P8
;
13799 case 16: return N_P16
;
13800 case 64: return N_P64
;
13808 case 8: return N_S8
;
13809 case 16: return N_S16
;
13810 case 32: return N_S32
;
13811 case 64: return N_S64
;
13819 case 8: return N_U8
;
13820 case 16: return N_U16
;
13821 case 32: return N_U32
;
13822 case 64: return N_U64
;
13833 /* Convert compact Neon bitmask type representation to a type and size. Only
13834 handles the case where a single bit is set in the mask. */
13837 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13838 enum neon_type_mask mask
)
13840 if ((mask
& N_EQK
) != 0)
13843 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13845 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13847 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13849 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13854 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13856 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13857 *type
= NT_unsigned
;
13858 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13859 *type
= NT_integer
;
13860 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13861 *type
= NT_untyped
;
13862 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13864 else if ((mask
& (N_F_ALL
)) != 0)
13872 /* Modify a bitmask of allowed types. This is only needed for type
13876 modify_types_allowed (unsigned allowed
, unsigned mods
)
13879 enum neon_el_type type
;
13885 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13887 if (el_type_of_type_chk (&type
, &size
,
13888 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13890 neon_modify_type_size (mods
, &type
, &size
);
13891 destmask
|= type_chk_of_el_type (type
, size
);
13898 /* Check type and return type classification.
13899 The manual states (paraphrase): If one datatype is given, it indicates the
13901 - the second operand, if there is one
13902 - the operand, if there is no second operand
13903 - the result, if there are no operands.
13904 This isn't quite good enough though, so we use a concept of a "key" datatype
13905 which is set on a per-instruction basis, which is the one which matters when
13906 only one data type is written.
13907 Note: this function has side-effects (e.g. filling in missing operands). All
13908 Neon instructions should call it before performing bit encoding. */
13910 static struct neon_type_el
13911 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13914 unsigned i
, pass
, key_el
= 0;
13915 unsigned types
[NEON_MAX_TYPE_ELS
];
13916 enum neon_el_type k_type
= NT_invtype
;
13917 unsigned k_size
= -1u;
13918 struct neon_type_el badtype
= {NT_invtype
, -1};
13919 unsigned key_allowed
= 0;
13921 /* Optional registers in Neon instructions are always (not) in operand 1.
13922 Fill in the missing operand here, if it was omitted. */
13923 if (els
> 1 && !inst
.operands
[1].present
)
13924 inst
.operands
[1] = inst
.operands
[0];
13926 /* Suck up all the varargs. */
13928 for (i
= 0; i
< els
; i
++)
13930 unsigned thisarg
= va_arg (ap
, unsigned);
13931 if (thisarg
== N_IGNORE_TYPE
)
13936 types
[i
] = thisarg
;
13937 if ((thisarg
& N_KEY
) != 0)
13942 if (inst
.vectype
.elems
> 0)
13943 for (i
= 0; i
< els
; i
++)
13944 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13946 first_error (_("types specified in both the mnemonic and operands"));
13950 /* Duplicate inst.vectype elements here as necessary.
13951 FIXME: No idea if this is exactly the same as the ARM assembler,
13952 particularly when an insn takes one register and one non-register
13954 if (inst
.vectype
.elems
== 1 && els
> 1)
13957 inst
.vectype
.elems
= els
;
13958 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13959 for (j
= 0; j
< els
; j
++)
13961 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13964 else if (inst
.vectype
.elems
== 0 && els
> 0)
13967 /* No types were given after the mnemonic, so look for types specified
13968 after each operand. We allow some flexibility here; as long as the
13969 "key" operand has a type, we can infer the others. */
13970 for (j
= 0; j
< els
; j
++)
13971 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13972 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13974 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13976 for (j
= 0; j
< els
; j
++)
13977 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13978 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13983 first_error (_("operand types can't be inferred"));
13987 else if (inst
.vectype
.elems
!= els
)
13989 first_error (_("type specifier has the wrong number of parts"));
13993 for (pass
= 0; pass
< 2; pass
++)
13995 for (i
= 0; i
< els
; i
++)
13997 unsigned thisarg
= types
[i
];
13998 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13999 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14000 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14001 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14003 /* Decay more-specific signed & unsigned types to sign-insensitive
14004 integer types if sign-specific variants are unavailable. */
14005 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14006 && (types_allowed
& N_SU_ALL
) == 0)
14007 g_type
= NT_integer
;
14009 /* If only untyped args are allowed, decay any more specific types to
14010 them. Some instructions only care about signs for some element
14011 sizes, so handle that properly. */
14012 if (((types_allowed
& N_UNT
) == 0)
14013 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14014 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14015 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14016 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14017 g_type
= NT_untyped
;
14021 if ((thisarg
& N_KEY
) != 0)
14025 key_allowed
= thisarg
& ~N_KEY
;
14027 /* Check architecture constraint on FP16 extension. */
14029 && k_type
== NT_float
14030 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14032 inst
.error
= _(BAD_FP16
);
14039 if ((thisarg
& N_VFP
) != 0)
14041 enum neon_shape_el regshape
;
14042 unsigned regwidth
, match
;
14044 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14047 first_error (_("invalid instruction shape"));
14050 regshape
= neon_shape_tab
[ns
].el
[i
];
14051 regwidth
= neon_shape_el_size
[regshape
];
14053 /* In VFP mode, operands must match register widths. If we
14054 have a key operand, use its width, else use the width of
14055 the current operand. */
14061 /* FP16 will use a single precision register. */
14062 if (regwidth
== 32 && match
== 16)
14064 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14068 inst
.error
= _(BAD_FP16
);
14073 if (regwidth
!= match
)
14075 first_error (_("operand size must match register width"));
14080 if ((thisarg
& N_EQK
) == 0)
14082 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14084 if ((given_type
& types_allowed
) == 0)
14086 first_error (_("bad type in Neon instruction"));
14092 enum neon_el_type mod_k_type
= k_type
;
14093 unsigned mod_k_size
= k_size
;
14094 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14095 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14097 first_error (_("inconsistent types in Neon instruction"));
14105 return inst
.vectype
.el
[key_el
];
14108 /* Neon-style VFP instruction forwarding. */
14110 /* Thumb VFP instructions have 0xE in the condition field. */
14113 do_vfp_cond_or_thumb (void)
14118 inst
.instruction
|= 0xe0000000;
14120 inst
.instruction
|= inst
.cond
<< 28;
14123 /* Look up and encode a simple mnemonic, for use as a helper function for the
14124 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14125 etc. It is assumed that operand parsing has already been done, and that the
14126 operands are in the form expected by the given opcode (this isn't necessarily
14127 the same as the form in which they were parsed, hence some massaging must
14128 take place before this function is called).
14129 Checks current arch version against that in the looked-up opcode. */
14132 do_vfp_nsyn_opcode (const char *opname
)
14134 const struct asm_opcode
*opcode
;
14136 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14141 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14142 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14149 inst
.instruction
= opcode
->tvalue
;
14150 opcode
->tencode ();
14154 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14155 opcode
->aencode ();
14160 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14162 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14164 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14167 do_vfp_nsyn_opcode ("fadds");
14169 do_vfp_nsyn_opcode ("fsubs");
14171 /* ARMv8.2 fp16 instruction. */
14173 do_scalar_fp16_v82_encode ();
14178 do_vfp_nsyn_opcode ("faddd");
14180 do_vfp_nsyn_opcode ("fsubd");
14184 /* Check operand types to see if this is a VFP instruction, and if so call
14188 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14190 enum neon_shape rs
;
14191 struct neon_type_el et
;
14196 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14197 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14201 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14202 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14203 N_F_ALL
| N_KEY
| N_VFP
);
14210 if (et
.type
!= NT_invtype
)
14221 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14223 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14225 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14228 do_vfp_nsyn_opcode ("fmacs");
14230 do_vfp_nsyn_opcode ("fnmacs");
14232 /* ARMv8.2 fp16 instruction. */
14234 do_scalar_fp16_v82_encode ();
14239 do_vfp_nsyn_opcode ("fmacd");
14241 do_vfp_nsyn_opcode ("fnmacd");
14246 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14248 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14250 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14253 do_vfp_nsyn_opcode ("ffmas");
14255 do_vfp_nsyn_opcode ("ffnmas");
14257 /* ARMv8.2 fp16 instruction. */
14259 do_scalar_fp16_v82_encode ();
14264 do_vfp_nsyn_opcode ("ffmad");
14266 do_vfp_nsyn_opcode ("ffnmad");
14271 do_vfp_nsyn_mul (enum neon_shape rs
)
14273 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14275 do_vfp_nsyn_opcode ("fmuls");
14277 /* ARMv8.2 fp16 instruction. */
14279 do_scalar_fp16_v82_encode ();
14282 do_vfp_nsyn_opcode ("fmuld");
14286 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14288 int is_neg
= (inst
.instruction
& 0x80) != 0;
14289 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14291 if (rs
== NS_FF
|| rs
== NS_HH
)
14294 do_vfp_nsyn_opcode ("fnegs");
14296 do_vfp_nsyn_opcode ("fabss");
14298 /* ARMv8.2 fp16 instruction. */
14300 do_scalar_fp16_v82_encode ();
14305 do_vfp_nsyn_opcode ("fnegd");
14307 do_vfp_nsyn_opcode ("fabsd");
14311 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14312 insns belong to Neon, and are handled elsewhere. */
14315 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14317 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14321 do_vfp_nsyn_opcode ("fldmdbs");
14323 do_vfp_nsyn_opcode ("fldmias");
14328 do_vfp_nsyn_opcode ("fstmdbs");
14330 do_vfp_nsyn_opcode ("fstmias");
14335 do_vfp_nsyn_sqrt (void)
14337 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14338 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14340 if (rs
== NS_FF
|| rs
== NS_HH
)
14342 do_vfp_nsyn_opcode ("fsqrts");
14344 /* ARMv8.2 fp16 instruction. */
14346 do_scalar_fp16_v82_encode ();
14349 do_vfp_nsyn_opcode ("fsqrtd");
14353 do_vfp_nsyn_div (void)
14355 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14356 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14357 N_F_ALL
| N_KEY
| N_VFP
);
14359 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14361 do_vfp_nsyn_opcode ("fdivs");
14363 /* ARMv8.2 fp16 instruction. */
14365 do_scalar_fp16_v82_encode ();
14368 do_vfp_nsyn_opcode ("fdivd");
14372 do_vfp_nsyn_nmul (void)
14374 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14375 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14376 N_F_ALL
| N_KEY
| N_VFP
);
14378 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14380 NEON_ENCODE (SINGLE
, inst
);
14381 do_vfp_sp_dyadic ();
14383 /* ARMv8.2 fp16 instruction. */
14385 do_scalar_fp16_v82_encode ();
14389 NEON_ENCODE (DOUBLE
, inst
);
14390 do_vfp_dp_rd_rn_rm ();
14392 do_vfp_cond_or_thumb ();
14397 do_vfp_nsyn_cmp (void)
14399 enum neon_shape rs
;
14400 if (inst
.operands
[1].isreg
)
14402 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14403 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14405 if (rs
== NS_FF
|| rs
== NS_HH
)
14407 NEON_ENCODE (SINGLE
, inst
);
14408 do_vfp_sp_monadic ();
14412 NEON_ENCODE (DOUBLE
, inst
);
14413 do_vfp_dp_rd_rm ();
14418 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14419 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14421 switch (inst
.instruction
& 0x0fffffff)
14424 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14427 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14433 if (rs
== NS_FI
|| rs
== NS_HI
)
14435 NEON_ENCODE (SINGLE
, inst
);
14436 do_vfp_sp_compare_z ();
14440 NEON_ENCODE (DOUBLE
, inst
);
14444 do_vfp_cond_or_thumb ();
14446 /* ARMv8.2 fp16 instruction. */
14447 if (rs
== NS_HI
|| rs
== NS_HH
)
14448 do_scalar_fp16_v82_encode ();
14452 nsyn_insert_sp (void)
14454 inst
.operands
[1] = inst
.operands
[0];
14455 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14456 inst
.operands
[0].reg
= REG_SP
;
14457 inst
.operands
[0].isreg
= 1;
14458 inst
.operands
[0].writeback
= 1;
14459 inst
.operands
[0].present
= 1;
14463 do_vfp_nsyn_push (void)
14467 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14468 _("register list must contain at least 1 and at most 16 "
14471 if (inst
.operands
[1].issingle
)
14472 do_vfp_nsyn_opcode ("fstmdbs");
14474 do_vfp_nsyn_opcode ("fstmdbd");
14478 do_vfp_nsyn_pop (void)
14482 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14483 _("register list must contain at least 1 and at most 16 "
14486 if (inst
.operands
[1].issingle
)
14487 do_vfp_nsyn_opcode ("fldmias");
14489 do_vfp_nsyn_opcode ("fldmiad");
14492 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14493 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14496 neon_dp_fixup (struct arm_it
* insn
)
14498 unsigned int i
= insn
->instruction
;
14503 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14514 insn
->instruction
= i
;
14517 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14521 neon_logbits (unsigned x
)
14523 return ffs (x
) - 4;
14526 #define LOW4(R) ((R) & 0xf)
14527 #define HI1(R) (((R) >> 4) & 1)
14529 /* Encode insns with bit pattern:
14531 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14532 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14534 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14535 different meaning for some instruction. */
14538 neon_three_same (int isquad
, int ubit
, int size
)
14540 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14541 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14542 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14543 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14544 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14545 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14546 inst
.instruction
|= (isquad
!= 0) << 6;
14547 inst
.instruction
|= (ubit
!= 0) << 24;
14549 inst
.instruction
|= neon_logbits (size
) << 20;
14551 neon_dp_fixup (&inst
);
14554 /* Encode instructions of the form:
14556 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14557 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14559 Don't write size if SIZE == -1. */
14562 neon_two_same (int qbit
, int ubit
, int size
)
14564 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14565 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14566 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14567 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14568 inst
.instruction
|= (qbit
!= 0) << 6;
14569 inst
.instruction
|= (ubit
!= 0) << 24;
14572 inst
.instruction
|= neon_logbits (size
) << 18;
14574 neon_dp_fixup (&inst
);
14577 /* Neon instruction encoders, in approximate order of appearance. */
14580 do_neon_dyadic_i_su (void)
14582 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14583 struct neon_type_el et
= neon_check_type (3, rs
,
14584 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14585 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14589 do_neon_dyadic_i64_su (void)
14591 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14592 struct neon_type_el et
= neon_check_type (3, rs
,
14593 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14594 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14598 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14601 unsigned size
= et
.size
>> 3;
14602 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14603 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14604 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14605 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14606 inst
.instruction
|= (isquad
!= 0) << 6;
14607 inst
.instruction
|= immbits
<< 16;
14608 inst
.instruction
|= (size
>> 3) << 7;
14609 inst
.instruction
|= (size
& 0x7) << 19;
14611 inst
.instruction
|= (uval
!= 0) << 24;
14613 neon_dp_fixup (&inst
);
14617 do_neon_shl_imm (void)
14619 if (!inst
.operands
[2].isreg
)
14621 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14622 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14623 int imm
= inst
.operands
[2].imm
;
14625 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14626 _("immediate out of range for shift"));
14627 NEON_ENCODE (IMMED
, inst
);
14628 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14632 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14633 struct neon_type_el et
= neon_check_type (3, rs
,
14634 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14637 /* VSHL/VQSHL 3-register variants have syntax such as:
14639 whereas other 3-register operations encoded by neon_three_same have
14642 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14644 tmp
= inst
.operands
[2].reg
;
14645 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14646 inst
.operands
[1].reg
= tmp
;
14647 NEON_ENCODE (INTEGER
, inst
);
14648 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14653 do_neon_qshl_imm (void)
14655 if (!inst
.operands
[2].isreg
)
14657 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14658 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14659 int imm
= inst
.operands
[2].imm
;
14661 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14662 _("immediate out of range for shift"));
14663 NEON_ENCODE (IMMED
, inst
);
14664 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14668 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14669 struct neon_type_el et
= neon_check_type (3, rs
,
14670 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14673 /* See note in do_neon_shl_imm. */
14674 tmp
= inst
.operands
[2].reg
;
14675 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14676 inst
.operands
[1].reg
= tmp
;
14677 NEON_ENCODE (INTEGER
, inst
);
14678 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14683 do_neon_rshl (void)
14685 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14686 struct neon_type_el et
= neon_check_type (3, rs
,
14687 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14690 tmp
= inst
.operands
[2].reg
;
14691 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14692 inst
.operands
[1].reg
= tmp
;
14693 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14697 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14699 /* Handle .I8 pseudo-instructions. */
14702 /* Unfortunately, this will make everything apart from zero out-of-range.
14703 FIXME is this the intended semantics? There doesn't seem much point in
14704 accepting .I8 if so. */
14705 immediate
|= immediate
<< 8;
14711 if (immediate
== (immediate
& 0x000000ff))
14713 *immbits
= immediate
;
14716 else if (immediate
== (immediate
& 0x0000ff00))
14718 *immbits
= immediate
>> 8;
14721 else if (immediate
== (immediate
& 0x00ff0000))
14723 *immbits
= immediate
>> 16;
14726 else if (immediate
== (immediate
& 0xff000000))
14728 *immbits
= immediate
>> 24;
14731 if ((immediate
& 0xffff) != (immediate
>> 16))
14732 goto bad_immediate
;
14733 immediate
&= 0xffff;
14736 if (immediate
== (immediate
& 0x000000ff))
14738 *immbits
= immediate
;
14741 else if (immediate
== (immediate
& 0x0000ff00))
14743 *immbits
= immediate
>> 8;
14748 first_error (_("immediate value out of range"));
14753 do_neon_logic (void)
14755 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14757 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14758 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14759 /* U bit and size field were set as part of the bitmask. */
14760 NEON_ENCODE (INTEGER
, inst
);
14761 neon_three_same (neon_quad (rs
), 0, -1);
14765 const int three_ops_form
= (inst
.operands
[2].present
14766 && !inst
.operands
[2].isreg
);
14767 const int immoperand
= (three_ops_form
? 2 : 1);
14768 enum neon_shape rs
= (three_ops_form
14769 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14770 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14771 struct neon_type_el et
= neon_check_type (2, rs
,
14772 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14773 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14777 if (et
.type
== NT_invtype
)
14780 if (three_ops_form
)
14781 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14782 _("first and second operands shall be the same register"));
14784 NEON_ENCODE (IMMED
, inst
);
14786 immbits
= inst
.operands
[immoperand
].imm
;
14789 /* .i64 is a pseudo-op, so the immediate must be a repeating
14791 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14792 inst
.operands
[immoperand
].reg
: 0))
14794 /* Set immbits to an invalid constant. */
14795 immbits
= 0xdeadbeef;
14802 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14806 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14810 /* Pseudo-instruction for VBIC. */
14811 neon_invert_size (&immbits
, 0, et
.size
);
14812 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14816 /* Pseudo-instruction for VORR. */
14817 neon_invert_size (&immbits
, 0, et
.size
);
14818 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14828 inst
.instruction
|= neon_quad (rs
) << 6;
14829 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14830 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14831 inst
.instruction
|= cmode
<< 8;
14832 neon_write_immbits (immbits
);
14834 neon_dp_fixup (&inst
);
14839 do_neon_bitfield (void)
14841 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14842 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14843 neon_three_same (neon_quad (rs
), 0, -1);
14847 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14850 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14851 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14853 if (et
.type
== NT_float
)
14855 NEON_ENCODE (FLOAT
, inst
);
14856 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
14860 NEON_ENCODE (INTEGER
, inst
);
14861 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14866 do_neon_dyadic_if_su (void)
14868 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14872 do_neon_dyadic_if_su_d (void)
14874 /* This version only allow D registers, but that constraint is enforced during
14875 operand parsing so we don't need to do anything extra here. */
14876 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14880 do_neon_dyadic_if_i_d (void)
14882 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14883 affected if we specify unsigned args. */
14884 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14887 enum vfp_or_neon_is_neon_bits
14890 NEON_CHECK_ARCH
= 2,
14891 NEON_CHECK_ARCH8
= 4
14894 /* Call this function if an instruction which may have belonged to the VFP or
14895 Neon instruction sets, but turned out to be a Neon instruction (due to the
14896 operand types involved, etc.). We have to check and/or fix-up a couple of
14899 - Make sure the user hasn't attempted to make a Neon instruction
14901 - Alter the value in the condition code field if necessary.
14902 - Make sure that the arch supports Neon instructions.
14904 Which of these operations take place depends on bits from enum
14905 vfp_or_neon_is_neon_bits.
14907 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14908 current instruction's condition is COND_ALWAYS, the condition field is
14909 changed to inst.uncond_value. This is necessary because instructions shared
14910 between VFP and Neon may be conditional for the VFP variants only, and the
14911 unconditional Neon version must have, e.g., 0xF in the condition field. */
14914 vfp_or_neon_is_neon (unsigned check
)
14916 /* Conditions are always legal in Thumb mode (IT blocks). */
14917 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14919 if (inst
.cond
!= COND_ALWAYS
)
14921 first_error (_(BAD_COND
));
14924 if (inst
.uncond_value
!= -1)
14925 inst
.instruction
|= inst
.uncond_value
<< 28;
14928 if ((check
& NEON_CHECK_ARCH
)
14929 && !mark_feature_used (&fpu_neon_ext_v1
))
14931 first_error (_(BAD_FPU
));
14935 if ((check
& NEON_CHECK_ARCH8
)
14936 && !mark_feature_used (&fpu_neon_ext_armv8
))
14938 first_error (_(BAD_FPU
));
14946 do_neon_addsub_if_i (void)
14948 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14951 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14954 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14955 affected if we specify unsigned args. */
14956 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14959 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14961 V<op> A,B (A is operand 0, B is operand 2)
14966 so handle that case specially. */
14969 neon_exchange_operands (void)
14971 if (inst
.operands
[1].present
)
14973 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
14975 /* Swap operands[1] and operands[2]. */
14976 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14977 inst
.operands
[1] = inst
.operands
[2];
14978 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14983 inst
.operands
[1] = inst
.operands
[2];
14984 inst
.operands
[2] = inst
.operands
[0];
14989 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14991 if (inst
.operands
[2].isreg
)
14994 neon_exchange_operands ();
14995 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14999 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15000 struct neon_type_el et
= neon_check_type (2, rs
,
15001 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
15003 NEON_ENCODE (IMMED
, inst
);
15004 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15005 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15006 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15007 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15008 inst
.instruction
|= neon_quad (rs
) << 6;
15009 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15010 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15012 neon_dp_fixup (&inst
);
15019 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
15023 do_neon_cmp_inv (void)
15025 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
15031 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
15034 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15035 scalars, which are encoded in 5 bits, M : Rm.
15036 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15037 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15041 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
15043 unsigned regno
= NEON_SCALAR_REG (scalar
);
15044 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15049 if (regno
> 7 || elno
> 3)
15051 return regno
| (elno
<< 3);
15054 if (regno
> 15 || elno
> 1)
15056 return regno
| (elno
<< 4);
15060 first_error (_("scalar out of range for multiply instruction"));
15066 /* Encode multiply / multiply-accumulate scalar instructions. */
15069 neon_mul_mac (struct neon_type_el et
, int ubit
)
15073 /* Give a more helpful error message if we have an invalid type. */
15074 if (et
.type
== NT_invtype
)
15077 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15078 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15079 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15080 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15081 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15082 inst
.instruction
|= LOW4 (scalar
);
15083 inst
.instruction
|= HI1 (scalar
) << 5;
15084 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15085 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15086 inst
.instruction
|= (ubit
!= 0) << 24;
15088 neon_dp_fixup (&inst
);
15092 do_neon_mac_maybe_scalar (void)
15094 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15097 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15100 if (inst
.operands
[2].isscalar
)
15102 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15103 struct neon_type_el et
= neon_check_type (3, rs
,
15104 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15105 NEON_ENCODE (SCALAR
, inst
);
15106 neon_mul_mac (et
, neon_quad (rs
));
15110 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15111 affected if we specify unsigned args. */
15112 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15117 do_neon_fmac (void)
15119 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15122 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15125 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15131 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15132 struct neon_type_el et
= neon_check_type (3, rs
,
15133 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15134 neon_three_same (neon_quad (rs
), 0, et
.size
);
15137 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15138 same types as the MAC equivalents. The polynomial type for this instruction
15139 is encoded the same as the integer type. */
15144 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15147 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15150 if (inst
.operands
[2].isscalar
)
15151 do_neon_mac_maybe_scalar ();
15153 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15157 do_neon_qdmulh (void)
15159 if (inst
.operands
[2].isscalar
)
15161 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15162 struct neon_type_el et
= neon_check_type (3, rs
,
15163 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15164 NEON_ENCODE (SCALAR
, inst
);
15165 neon_mul_mac (et
, neon_quad (rs
));
15169 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15170 struct neon_type_el et
= neon_check_type (3, rs
,
15171 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15172 NEON_ENCODE (INTEGER
, inst
);
15173 /* The U bit (rounding) comes from bit mask. */
15174 neon_three_same (neon_quad (rs
), 0, et
.size
);
15179 do_neon_qrdmlah (void)
15181 /* Check we're on the correct architecture. */
15182 if (!mark_feature_used (&fpu_neon_ext_armv8
))
15184 _("instruction form not available on this architecture.");
15185 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
15187 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15188 record_feature_use (&fpu_neon_ext_v8_1
);
15191 if (inst
.operands
[2].isscalar
)
15193 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15194 struct neon_type_el et
= neon_check_type (3, rs
,
15195 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15196 NEON_ENCODE (SCALAR
, inst
);
15197 neon_mul_mac (et
, neon_quad (rs
));
15201 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15202 struct neon_type_el et
= neon_check_type (3, rs
,
15203 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15204 NEON_ENCODE (INTEGER
, inst
);
15205 /* The U bit (rounding) comes from bit mask. */
15206 neon_three_same (neon_quad (rs
), 0, et
.size
);
15211 do_neon_fcmp_absolute (void)
15213 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15214 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15215 N_F_16_32
| N_KEY
);
15216 /* Size field comes from bit mask. */
15217 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
15221 do_neon_fcmp_absolute_inv (void)
15223 neon_exchange_operands ();
15224 do_neon_fcmp_absolute ();
15228 do_neon_step (void)
15230 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15231 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15232 N_F_16_32
| N_KEY
);
15233 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15237 do_neon_abs_neg (void)
15239 enum neon_shape rs
;
15240 struct neon_type_el et
;
15242 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15245 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15248 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15249 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
15251 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15252 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15253 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15254 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15255 inst
.instruction
|= neon_quad (rs
) << 6;
15256 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15257 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15259 neon_dp_fixup (&inst
);
15265 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15266 struct neon_type_el et
= neon_check_type (2, rs
,
15267 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15268 int imm
= inst
.operands
[2].imm
;
15269 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15270 _("immediate out of range for insert"));
15271 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15277 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15278 struct neon_type_el et
= neon_check_type (2, rs
,
15279 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15280 int imm
= inst
.operands
[2].imm
;
15281 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15282 _("immediate out of range for insert"));
15283 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15287 do_neon_qshlu_imm (void)
15289 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15290 struct neon_type_el et
= neon_check_type (2, rs
,
15291 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15292 int imm
= inst
.operands
[2].imm
;
15293 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15294 _("immediate out of range for shift"));
15295 /* Only encodes the 'U present' variant of the instruction.
15296 In this case, signed types have OP (bit 8) set to 0.
15297 Unsigned types have OP set to 1. */
15298 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15299 /* The rest of the bits are the same as other immediate shifts. */
15300 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15304 do_neon_qmovn (void)
15306 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15307 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15308 /* Saturating move where operands can be signed or unsigned, and the
15309 destination has the same signedness. */
15310 NEON_ENCODE (INTEGER
, inst
);
15311 if (et
.type
== NT_unsigned
)
15312 inst
.instruction
|= 0xc0;
15314 inst
.instruction
|= 0x80;
15315 neon_two_same (0, 1, et
.size
/ 2);
15319 do_neon_qmovun (void)
15321 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15322 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15323 /* Saturating move with unsigned results. Operands must be signed. */
15324 NEON_ENCODE (INTEGER
, inst
);
15325 neon_two_same (0, 1, et
.size
/ 2);
15329 do_neon_rshift_sat_narrow (void)
15331 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15332 or unsigned. If operands are unsigned, results must also be unsigned. */
15333 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15334 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15335 int imm
= inst
.operands
[2].imm
;
15336 /* This gets the bounds check, size encoding and immediate bits calculation
15340 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15341 VQMOVN.I<size> <Dd>, <Qm>. */
15344 inst
.operands
[2].present
= 0;
15345 inst
.instruction
= N_MNEM_vqmovn
;
15350 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15351 _("immediate out of range"));
15352 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15356 do_neon_rshift_sat_narrow_u (void)
15358 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15359 or unsigned. If operands are unsigned, results must also be unsigned. */
15360 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15361 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15362 int imm
= inst
.operands
[2].imm
;
15363 /* This gets the bounds check, size encoding and immediate bits calculation
15367 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15368 VQMOVUN.I<size> <Dd>, <Qm>. */
15371 inst
.operands
[2].present
= 0;
15372 inst
.instruction
= N_MNEM_vqmovun
;
15377 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15378 _("immediate out of range"));
15379 /* FIXME: The manual is kind of unclear about what value U should have in
15380 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15382 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15386 do_neon_movn (void)
15388 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15389 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15390 NEON_ENCODE (INTEGER
, inst
);
15391 neon_two_same (0, 1, et
.size
/ 2);
15395 do_neon_rshift_narrow (void)
15397 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15398 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15399 int imm
= inst
.operands
[2].imm
;
15400 /* This gets the bounds check, size encoding and immediate bits calculation
15404 /* If immediate is zero then we are a pseudo-instruction for
15405 VMOVN.I<size> <Dd>, <Qm> */
15408 inst
.operands
[2].present
= 0;
15409 inst
.instruction
= N_MNEM_vmovn
;
15414 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15415 _("immediate out of range for narrowing operation"));
15416 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15420 do_neon_shll (void)
15422 /* FIXME: Type checking when lengthening. */
15423 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15424 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15425 unsigned imm
= inst
.operands
[2].imm
;
15427 if (imm
== et
.size
)
15429 /* Maximum shift variant. */
15430 NEON_ENCODE (INTEGER
, inst
);
15431 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15432 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15433 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15434 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15435 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15437 neon_dp_fixup (&inst
);
15441 /* A more-specific type check for non-max versions. */
15442 et
= neon_check_type (2, NS_QDI
,
15443 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15444 NEON_ENCODE (IMMED
, inst
);
15445 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15449 /* Check the various types for the VCVT instruction, and return which version
15450 the current instruction is. */
15452 #define CVT_FLAVOUR_VAR \
15453 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15454 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15455 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15456 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15457 /* Half-precision conversions. */ \
15458 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15459 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15460 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15461 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15462 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15463 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15464 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15465 Compared with single/double precision variants, only the co-processor \
15466 field is different, so the encoding flow is reused here. */ \
15467 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15468 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15469 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15470 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15471 /* VFP instructions. */ \
15472 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15473 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15474 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15475 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15476 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15477 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15478 /* VFP instructions with bitshift. */ \
15479 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15480 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15481 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15482 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15483 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15484 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15485 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15486 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15488 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15489 neon_cvt_flavour_##C,
15491 /* The different types of conversions we can do. */
15492 enum neon_cvt_flavour
15495 neon_cvt_flavour_invalid
,
15496 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15501 static enum neon_cvt_flavour
15502 get_neon_cvt_flavour (enum neon_shape rs
)
15504 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15505 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15506 if (et.type != NT_invtype) \
15508 inst.error = NULL; \
15509 return (neon_cvt_flavour_##C); \
15512 struct neon_type_el et
;
15513 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15514 || rs
== NS_FF
) ? N_VFP
: 0;
15515 /* The instruction versions which take an immediate take one register
15516 argument, which is extended to the width of the full register. Thus the
15517 "source" and "destination" registers must have the same width. Hack that
15518 here by making the size equal to the key (wider, in this case) operand. */
15519 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15523 return neon_cvt_flavour_invalid
;
15538 /* Neon-syntax VFP conversions. */
15541 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15543 const char *opname
= 0;
15545 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
15546 || rs
== NS_FHI
|| rs
== NS_HFI
)
15548 /* Conversions with immediate bitshift. */
15549 const char *enc
[] =
15551 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15557 if (flavour
< (int) ARRAY_SIZE (enc
))
15559 opname
= enc
[flavour
];
15560 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15561 _("operands 0 and 1 must be the same register"));
15562 inst
.operands
[1] = inst
.operands
[2];
15563 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15568 /* Conversions without bitshift. */
15569 const char *enc
[] =
15571 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15577 if (flavour
< (int) ARRAY_SIZE (enc
))
15578 opname
= enc
[flavour
];
15582 do_vfp_nsyn_opcode (opname
);
15584 /* ARMv8.2 fp16 VCVT instruction. */
15585 if (flavour
== neon_cvt_flavour_s32_f16
15586 || flavour
== neon_cvt_flavour_u32_f16
15587 || flavour
== neon_cvt_flavour_f16_u32
15588 || flavour
== neon_cvt_flavour_f16_s32
)
15589 do_scalar_fp16_v82_encode ();
15593 do_vfp_nsyn_cvtz (void)
15595 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
15596 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15597 const char *enc
[] =
15599 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15605 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15606 do_vfp_nsyn_opcode (enc
[flavour
]);
15610 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15611 enum neon_cvt_mode mode
)
15616 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15617 D register operands. */
15618 if (flavour
== neon_cvt_flavour_s32_f64
15619 || flavour
== neon_cvt_flavour_u32_f64
)
15620 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15623 if (flavour
== neon_cvt_flavour_s32_f16
15624 || flavour
== neon_cvt_flavour_u32_f16
)
15625 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
15628 set_it_insn_type (OUTSIDE_IT_INSN
);
15632 case neon_cvt_flavour_s32_f64
:
15636 case neon_cvt_flavour_s32_f32
:
15640 case neon_cvt_flavour_s32_f16
:
15644 case neon_cvt_flavour_u32_f64
:
15648 case neon_cvt_flavour_u32_f32
:
15652 case neon_cvt_flavour_u32_f16
:
15657 first_error (_("invalid instruction shape"));
15663 case neon_cvt_mode_a
: rm
= 0; break;
15664 case neon_cvt_mode_n
: rm
= 1; break;
15665 case neon_cvt_mode_p
: rm
= 2; break;
15666 case neon_cvt_mode_m
: rm
= 3; break;
15667 default: first_error (_("invalid rounding mode")); return;
15670 NEON_ENCODE (FPV8
, inst
);
15671 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15672 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15673 inst
.instruction
|= sz
<< 8;
15675 /* ARMv8.2 fp16 VCVT instruction. */
15676 if (flavour
== neon_cvt_flavour_s32_f16
15677 ||flavour
== neon_cvt_flavour_u32_f16
)
15678 do_scalar_fp16_v82_encode ();
15679 inst
.instruction
|= op
<< 7;
15680 inst
.instruction
|= rm
<< 16;
15681 inst
.instruction
|= 0xf0000000;
15682 inst
.is_neon
= TRUE
;
15686 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15688 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15689 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
15690 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
15692 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15694 if (flavour
== neon_cvt_flavour_invalid
)
15697 /* PR11109: Handle round-to-zero for VCVT conversions. */
15698 if (mode
== neon_cvt_mode_z
15699 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15700 && (flavour
== neon_cvt_flavour_s16_f16
15701 || flavour
== neon_cvt_flavour_u16_f16
15702 || flavour
== neon_cvt_flavour_s32_f32
15703 || flavour
== neon_cvt_flavour_u32_f32
15704 || flavour
== neon_cvt_flavour_s32_f64
15705 || flavour
== neon_cvt_flavour_u32_f64
)
15706 && (rs
== NS_FD
|| rs
== NS_FF
))
15708 do_vfp_nsyn_cvtz ();
15712 /* ARMv8.2 fp16 VCVT conversions. */
15713 if (mode
== neon_cvt_mode_z
15714 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
15715 && (flavour
== neon_cvt_flavour_s32_f16
15716 || flavour
== neon_cvt_flavour_u32_f16
)
15719 do_vfp_nsyn_cvtz ();
15720 do_scalar_fp16_v82_encode ();
15724 /* VFP rather than Neon conversions. */
15725 if (flavour
>= neon_cvt_flavour_first_fp
)
15727 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15728 do_vfp_nsyn_cvt (rs
, flavour
);
15730 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15741 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15742 0x0000100, 0x1000100, 0x0, 0x1000000};
15744 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15747 /* Fixed-point conversion with #0 immediate is encoded as an
15748 integer conversion. */
15749 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15751 NEON_ENCODE (IMMED
, inst
);
15752 if (flavour
!= neon_cvt_flavour_invalid
)
15753 inst
.instruction
|= enctab
[flavour
];
15754 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15755 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15756 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15757 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15758 inst
.instruction
|= neon_quad (rs
) << 6;
15759 inst
.instruction
|= 1 << 21;
15760 if (flavour
< neon_cvt_flavour_s16_f16
)
15762 inst
.instruction
|= 1 << 21;
15763 immbits
= 32 - inst
.operands
[2].imm
;
15764 inst
.instruction
|= immbits
<< 16;
15768 inst
.instruction
|= 3 << 20;
15769 immbits
= 16 - inst
.operands
[2].imm
;
15770 inst
.instruction
|= immbits
<< 16;
15771 inst
.instruction
&= ~(1 << 9);
15774 neon_dp_fixup (&inst
);
15780 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15782 NEON_ENCODE (FLOAT
, inst
);
15783 set_it_insn_type (OUTSIDE_IT_INSN
);
15785 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15788 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15789 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15790 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15791 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15792 inst
.instruction
|= neon_quad (rs
) << 6;
15793 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
15794 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
15795 inst
.instruction
|= mode
<< 8;
15796 if (flavour
== neon_cvt_flavour_u16_f16
15797 || flavour
== neon_cvt_flavour_s16_f16
)
15798 /* Mask off the original size bits and reencode them. */
15799 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
15802 inst
.instruction
|= 0xfc000000;
15804 inst
.instruction
|= 0xf0000000;
15810 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
15811 0x100, 0x180, 0x0, 0x080};
15813 NEON_ENCODE (INTEGER
, inst
);
15815 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15818 if (flavour
!= neon_cvt_flavour_invalid
)
15819 inst
.instruction
|= enctab
[flavour
];
15821 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15822 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15823 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15824 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15825 inst
.instruction
|= neon_quad (rs
) << 6;
15826 if (flavour
>= neon_cvt_flavour_s16_f16
15827 && flavour
<= neon_cvt_flavour_f16_u16
)
15828 /* Half precision. */
15829 inst
.instruction
|= 1 << 18;
15831 inst
.instruction
|= 2 << 18;
15833 neon_dp_fixup (&inst
);
15838 /* Half-precision conversions for Advanced SIMD -- neon. */
15843 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15845 as_bad (_("operand size must match register width"));
15850 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15852 as_bad (_("operand size must match register width"));
15857 inst
.instruction
= 0x3b60600;
15859 inst
.instruction
= 0x3b60700;
15861 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15862 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15863 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15864 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15865 neon_dp_fixup (&inst
);
15869 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15870 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15871 do_vfp_nsyn_cvt (rs
, flavour
);
15873 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15878 do_neon_cvtr (void)
15880 do_neon_cvt_1 (neon_cvt_mode_x
);
15886 do_neon_cvt_1 (neon_cvt_mode_z
);
15890 do_neon_cvta (void)
15892 do_neon_cvt_1 (neon_cvt_mode_a
);
15896 do_neon_cvtn (void)
15898 do_neon_cvt_1 (neon_cvt_mode_n
);
15902 do_neon_cvtp (void)
15904 do_neon_cvt_1 (neon_cvt_mode_p
);
15908 do_neon_cvtm (void)
15910 do_neon_cvt_1 (neon_cvt_mode_m
);
15914 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15917 mark_feature_used (&fpu_vfp_ext_armv8
);
15919 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15920 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15921 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15922 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15923 inst
.instruction
|= to
? 0x10000 : 0;
15924 inst
.instruction
|= t
? 0x80 : 0;
15925 inst
.instruction
|= is_double
? 0x100 : 0;
15926 do_vfp_cond_or_thumb ();
15930 do_neon_cvttb_1 (bfd_boolean t
)
15932 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
15933 NS_DF
, NS_DH
, NS_NULL
);
15937 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15940 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15942 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15945 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15947 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15949 /* The VCVTB and VCVTT instructions with D-register operands
15950 don't work for SP only targets. */
15951 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15955 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15957 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15959 /* The VCVTB and VCVTT instructions with D-register operands
15960 don't work for SP only targets. */
15961 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15965 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15972 do_neon_cvtb (void)
15974 do_neon_cvttb_1 (FALSE
);
15979 do_neon_cvtt (void)
15981 do_neon_cvttb_1 (TRUE
);
15985 neon_move_immediate (void)
15987 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15988 struct neon_type_el et
= neon_check_type (2, rs
,
15989 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15990 unsigned immlo
, immhi
= 0, immbits
;
15991 int op
, cmode
, float_p
;
15993 constraint (et
.type
== NT_invtype
,
15994 _("operand size must be specified for immediate VMOV"));
15996 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15997 op
= (inst
.instruction
& (1 << 5)) != 0;
15999 immlo
= inst
.operands
[1].imm
;
16000 if (inst
.operands
[1].regisimm
)
16001 immhi
= inst
.operands
[1].reg
;
16003 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
16004 _("immediate has bits set outside the operand size"));
16006 float_p
= inst
.operands
[1].immisfloat
;
16008 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
16009 et
.size
, et
.type
)) == FAIL
)
16011 /* Invert relevant bits only. */
16012 neon_invert_size (&immlo
, &immhi
, et
.size
);
16013 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16014 with one or the other; those cases are caught by
16015 neon_cmode_for_move_imm. */
16017 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
16018 &op
, et
.size
, et
.type
)) == FAIL
)
16020 first_error (_("immediate out of range"));
16025 inst
.instruction
&= ~(1 << 5);
16026 inst
.instruction
|= op
<< 5;
16028 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16029 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16030 inst
.instruction
|= neon_quad (rs
) << 6;
16031 inst
.instruction
|= cmode
<< 8;
16033 neon_write_immbits (immbits
);
16039 if (inst
.operands
[1].isreg
)
16041 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16043 NEON_ENCODE (INTEGER
, inst
);
16044 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16045 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16046 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16047 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16048 inst
.instruction
|= neon_quad (rs
) << 6;
16052 NEON_ENCODE (IMMED
, inst
);
16053 neon_move_immediate ();
16056 neon_dp_fixup (&inst
);
16059 /* Encode instructions of form:
16061 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16062 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16065 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16067 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16068 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16069 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16070 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16071 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16072 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16073 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16074 inst
.instruction
|= neon_logbits (size
) << 20;
16076 neon_dp_fixup (&inst
);
16080 do_neon_dyadic_long (void)
16082 /* FIXME: Type checking for lengthening op. */
16083 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16084 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16085 neon_mixed_length (et
, et
.size
);
16089 do_neon_abal (void)
16091 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16092 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16093 neon_mixed_length (et
, et
.size
);
16097 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16099 if (inst
.operands
[2].isscalar
)
16101 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16102 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16103 NEON_ENCODE (SCALAR
, inst
);
16104 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16108 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16109 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16110 NEON_ENCODE (INTEGER
, inst
);
16111 neon_mixed_length (et
, et
.size
);
16116 do_neon_mac_maybe_scalar_long (void)
16118 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16122 do_neon_dyadic_wide (void)
16124 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
16125 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16126 neon_mixed_length (et
, et
.size
);
16130 do_neon_dyadic_narrow (void)
16132 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16133 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
16134 /* Operand sign is unimportant, and the U bit is part of the opcode,
16135 so force the operand type to integer. */
16136 et
.type
= NT_integer
;
16137 neon_mixed_length (et
, et
.size
/ 2);
16141 do_neon_mul_sat_scalar_long (void)
16143 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
16147 do_neon_vmull (void)
16149 if (inst
.operands
[2].isscalar
)
16150 do_neon_mac_maybe_scalar_long ();
16153 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16154 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
16156 if (et
.type
== NT_poly
)
16157 NEON_ENCODE (POLY
, inst
);
16159 NEON_ENCODE (INTEGER
, inst
);
16161 /* For polynomial encoding the U bit must be zero, and the size must
16162 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16163 obviously, as 0b10). */
16166 /* Check we're on the correct architecture. */
16167 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
16169 _("Instruction form not available on this architecture.");
16174 neon_mixed_length (et
, et
.size
);
16181 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
16182 struct neon_type_el et
= neon_check_type (3, rs
,
16183 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16184 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
16186 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
16187 _("shift out of range"));
16188 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16189 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16190 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16191 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16192 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16193 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16194 inst
.instruction
|= neon_quad (rs
) << 6;
16195 inst
.instruction
|= imm
<< 8;
16197 neon_dp_fixup (&inst
);
16203 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16204 struct neon_type_el et
= neon_check_type (2, rs
,
16205 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16206 unsigned op
= (inst
.instruction
>> 7) & 3;
16207 /* N (width of reversed regions) is encoded as part of the bitmask. We
16208 extract it here to check the elements to be reversed are smaller.
16209 Otherwise we'd get a reserved instruction. */
16210 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
16211 gas_assert (elsize
!= 0);
16212 constraint (et
.size
>= elsize
,
16213 _("elements must be smaller than reversal region"));
16214 neon_two_same (neon_quad (rs
), 1, et
.size
);
16220 if (inst
.operands
[1].isscalar
)
16222 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16223 struct neon_type_el et
= neon_check_type (2, rs
,
16224 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16225 unsigned sizebits
= et
.size
>> 3;
16226 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16227 int logsize
= neon_logbits (et
.size
);
16228 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16230 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16233 NEON_ENCODE (SCALAR
, inst
);
16234 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16235 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16236 inst
.instruction
|= LOW4 (dm
);
16237 inst
.instruction
|= HI1 (dm
) << 5;
16238 inst
.instruction
|= neon_quad (rs
) << 6;
16239 inst
.instruction
|= x
<< 17;
16240 inst
.instruction
|= sizebits
<< 16;
16242 neon_dp_fixup (&inst
);
16246 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16247 struct neon_type_el et
= neon_check_type (2, rs
,
16248 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16249 /* Duplicate ARM register to lanes of vector. */
16250 NEON_ENCODE (ARMREG
, inst
);
16253 case 8: inst
.instruction
|= 0x400000; break;
16254 case 16: inst
.instruction
|= 0x000020; break;
16255 case 32: inst
.instruction
|= 0x000000; break;
16258 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16259 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16260 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16261 inst
.instruction
|= neon_quad (rs
) << 21;
16262 /* The encoding for this instruction is identical for the ARM and Thumb
16263 variants, except for the condition field. */
16264 do_vfp_cond_or_thumb ();
16268 /* VMOV has particularly many variations. It can be one of:
16269 0. VMOV<c><q> <Qd>, <Qm>
16270 1. VMOV<c><q> <Dd>, <Dm>
16271 (Register operations, which are VORR with Rm = Rn.)
16272 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16273 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16275 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16276 (ARM register to scalar.)
16277 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16278 (Two ARM registers to vector.)
16279 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16280 (Scalar to ARM register.)
16281 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16282 (Vector to two ARM registers.)
16283 8. VMOV.F32 <Sd>, <Sm>
16284 9. VMOV.F64 <Dd>, <Dm>
16285 (VFP register moves.)
16286 10. VMOV.F32 <Sd>, #imm
16287 11. VMOV.F64 <Dd>, #imm
16288 (VFP float immediate load.)
16289 12. VMOV <Rd>, <Sm>
16290 (VFP single to ARM reg.)
16291 13. VMOV <Sd>, <Rm>
16292 (ARM reg to VFP single.)
16293 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16294 (Two ARM regs to two VFP singles.)
16295 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16296 (Two VFP singles to two ARM regs.)
16298 These cases can be disambiguated using neon_select_shape, except cases 1/9
16299 and 3/11 which depend on the operand type too.
16301 All the encoded bits are hardcoded by this function.
16303 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16304 Cases 5, 7 may be used with VFPv2 and above.
16306 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16307 can specify a type where it doesn't make sense to, and is ignored). */
16312 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16313 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16314 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16315 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16316 struct neon_type_el et
;
16317 const char *ldconst
= 0;
16321 case NS_DD
: /* case 1/9. */
16322 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16323 /* It is not an error here if no type is given. */
16325 if (et
.type
== NT_float
&& et
.size
== 64)
16327 do_vfp_nsyn_opcode ("fcpyd");
16330 /* fall through. */
16332 case NS_QQ
: /* case 0/1. */
16334 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16336 /* The architecture manual I have doesn't explicitly state which
16337 value the U bit should have for register->register moves, but
16338 the equivalent VORR instruction has U = 0, so do that. */
16339 inst
.instruction
= 0x0200110;
16340 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16341 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16342 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16343 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16344 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16345 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16346 inst
.instruction
|= neon_quad (rs
) << 6;
16348 neon_dp_fixup (&inst
);
16352 case NS_DI
: /* case 3/11. */
16353 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16355 if (et
.type
== NT_float
&& et
.size
== 64)
16357 /* case 11 (fconstd). */
16358 ldconst
= "fconstd";
16359 goto encode_fconstd
;
16361 /* fall through. */
16363 case NS_QI
: /* case 2/3. */
16364 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16366 inst
.instruction
= 0x0800010;
16367 neon_move_immediate ();
16368 neon_dp_fixup (&inst
);
16371 case NS_SR
: /* case 4. */
16373 unsigned bcdebits
= 0;
16375 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
16376 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
16378 /* .<size> is optional here, defaulting to .32. */
16379 if (inst
.vectype
.elems
== 0
16380 && inst
.operands
[0].vectype
.type
== NT_invtype
16381 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16383 inst
.vectype
.el
[0].type
= NT_untyped
;
16384 inst
.vectype
.el
[0].size
= 32;
16385 inst
.vectype
.elems
= 1;
16388 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16389 logsize
= neon_logbits (et
.size
);
16391 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16393 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16394 && et
.size
!= 32, _(BAD_FPU
));
16395 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16396 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16400 case 8: bcdebits
= 0x8; break;
16401 case 16: bcdebits
= 0x1; break;
16402 case 32: bcdebits
= 0x0; break;
16406 bcdebits
|= x
<< logsize
;
16408 inst
.instruction
= 0xe000b10;
16409 do_vfp_cond_or_thumb ();
16410 inst
.instruction
|= LOW4 (dn
) << 16;
16411 inst
.instruction
|= HI1 (dn
) << 7;
16412 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16413 inst
.instruction
|= (bcdebits
& 3) << 5;
16414 inst
.instruction
|= (bcdebits
>> 2) << 21;
16418 case NS_DRR
: /* case 5 (fmdrr). */
16419 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16422 inst
.instruction
= 0xc400b10;
16423 do_vfp_cond_or_thumb ();
16424 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16425 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16426 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16427 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16430 case NS_RS
: /* case 6. */
16433 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16434 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16435 unsigned abcdebits
= 0;
16437 /* .<dt> is optional here, defaulting to .32. */
16438 if (inst
.vectype
.elems
== 0
16439 && inst
.operands
[0].vectype
.type
== NT_invtype
16440 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16442 inst
.vectype
.el
[0].type
= NT_untyped
;
16443 inst
.vectype
.el
[0].size
= 32;
16444 inst
.vectype
.elems
= 1;
16447 et
= neon_check_type (2, NS_NULL
,
16448 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16449 logsize
= neon_logbits (et
.size
);
16451 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16453 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16454 && et
.size
!= 32, _(BAD_FPU
));
16455 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16456 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16460 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16461 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16462 case 32: abcdebits
= 0x00; break;
16466 abcdebits
|= x
<< logsize
;
16467 inst
.instruction
= 0xe100b10;
16468 do_vfp_cond_or_thumb ();
16469 inst
.instruction
|= LOW4 (dn
) << 16;
16470 inst
.instruction
|= HI1 (dn
) << 7;
16471 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16472 inst
.instruction
|= (abcdebits
& 3) << 5;
16473 inst
.instruction
|= (abcdebits
>> 2) << 21;
16477 case NS_RRD
: /* case 7 (fmrrd). */
16478 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16481 inst
.instruction
= 0xc500b10;
16482 do_vfp_cond_or_thumb ();
16483 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16484 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16485 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16486 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16489 case NS_FF
: /* case 8 (fcpys). */
16490 do_vfp_nsyn_opcode ("fcpys");
16494 case NS_FI
: /* case 10 (fconsts). */
16495 ldconst
= "fconsts";
16497 if (is_quarter_float (inst
.operands
[1].imm
))
16499 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16500 do_vfp_nsyn_opcode (ldconst
);
16502 /* ARMv8.2 fp16 vmov.f16 instruction. */
16504 do_scalar_fp16_v82_encode ();
16507 first_error (_("immediate out of range"));
16511 case NS_RF
: /* case 12 (fmrs). */
16512 do_vfp_nsyn_opcode ("fmrs");
16513 /* ARMv8.2 fp16 vmov.f16 instruction. */
16515 do_scalar_fp16_v82_encode ();
16519 case NS_FR
: /* case 13 (fmsr). */
16520 do_vfp_nsyn_opcode ("fmsr");
16521 /* ARMv8.2 fp16 vmov.f16 instruction. */
16523 do_scalar_fp16_v82_encode ();
16526 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16527 (one of which is a list), but we have parsed four. Do some fiddling to
16528 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16530 case NS_RRFF
: /* case 14 (fmrrs). */
16531 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16532 _("VFP registers must be adjacent"));
16533 inst
.operands
[2].imm
= 2;
16534 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16535 do_vfp_nsyn_opcode ("fmrrs");
16538 case NS_FFRR
: /* case 15 (fmsrr). */
16539 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16540 _("VFP registers must be adjacent"));
16541 inst
.operands
[1] = inst
.operands
[2];
16542 inst
.operands
[2] = inst
.operands
[3];
16543 inst
.operands
[0].imm
= 2;
16544 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16545 do_vfp_nsyn_opcode ("fmsrr");
16549 /* neon_select_shape has determined that the instruction
16550 shape is wrong and has already set the error message. */
16559 do_neon_rshift_round_imm (void)
16561 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16562 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16563 int imm
= inst
.operands
[2].imm
;
16565 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16568 inst
.operands
[2].present
= 0;
16573 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16574 _("immediate out of range for shift"));
16575 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16580 do_neon_movhf (void)
16582 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
16583 constraint (rs
!= NS_HH
, _("invalid suffix"));
16585 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16588 do_vfp_sp_monadic ();
16591 inst
.instruction
|= 0xf0000000;
16595 do_neon_movl (void)
16597 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16598 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16599 unsigned sizebits
= et
.size
>> 3;
16600 inst
.instruction
|= sizebits
<< 19;
16601 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16607 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16608 struct neon_type_el et
= neon_check_type (2, rs
,
16609 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16610 NEON_ENCODE (INTEGER
, inst
);
16611 neon_two_same (neon_quad (rs
), 1, et
.size
);
16615 do_neon_zip_uzp (void)
16617 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16618 struct neon_type_el et
= neon_check_type (2, rs
,
16619 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16620 if (rs
== NS_DD
&& et
.size
== 32)
16622 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16623 inst
.instruction
= N_MNEM_vtrn
;
16627 neon_two_same (neon_quad (rs
), 1, et
.size
);
16631 do_neon_sat_abs_neg (void)
16633 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16634 struct neon_type_el et
= neon_check_type (2, rs
,
16635 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16636 neon_two_same (neon_quad (rs
), 1, et
.size
);
16640 do_neon_pair_long (void)
16642 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16643 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16644 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16645 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16646 neon_two_same (neon_quad (rs
), 1, et
.size
);
16650 do_neon_recip_est (void)
16652 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16653 struct neon_type_el et
= neon_check_type (2, rs
,
16654 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
16655 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16656 neon_two_same (neon_quad (rs
), 1, et
.size
);
16662 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16663 struct neon_type_el et
= neon_check_type (2, rs
,
16664 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16665 neon_two_same (neon_quad (rs
), 1, et
.size
);
16671 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16672 struct neon_type_el et
= neon_check_type (2, rs
,
16673 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16674 neon_two_same (neon_quad (rs
), 1, et
.size
);
16680 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16681 struct neon_type_el et
= neon_check_type (2, rs
,
16682 N_EQK
| N_INT
, N_8
| N_KEY
);
16683 neon_two_same (neon_quad (rs
), 1, et
.size
);
16689 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16690 neon_two_same (neon_quad (rs
), 1, -1);
16694 do_neon_tbl_tbx (void)
16696 unsigned listlenbits
;
16697 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16699 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16701 first_error (_("bad list length for table lookup"));
16705 listlenbits
= inst
.operands
[1].imm
- 1;
16706 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16707 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16708 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16709 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16710 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16711 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16712 inst
.instruction
|= listlenbits
<< 8;
16714 neon_dp_fixup (&inst
);
16718 do_neon_ldm_stm (void)
16720 /* P, U and L bits are part of bitmask. */
16721 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16722 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16724 if (inst
.operands
[1].issingle
)
16726 do_vfp_nsyn_ldm_stm (is_dbmode
);
16730 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16731 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16733 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16734 _("register list must contain at least 1 and at most 16 "
16737 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16738 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16739 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16740 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16742 inst
.instruction
|= offsetbits
;
16744 do_vfp_cond_or_thumb ();
16748 do_neon_ldr_str (void)
16750 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16752 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16753 And is UNPREDICTABLE in thumb mode. */
16755 && inst
.operands
[1].reg
== REG_PC
16756 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16759 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16760 else if (warn_on_deprecated
)
16761 as_tsktsk (_("Use of PC here is deprecated"));
16764 if (inst
.operands
[0].issingle
)
16767 do_vfp_nsyn_opcode ("flds");
16769 do_vfp_nsyn_opcode ("fsts");
16771 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16772 if (inst
.vectype
.el
[0].size
== 16)
16773 do_scalar_fp16_v82_encode ();
16778 do_vfp_nsyn_opcode ("fldd");
16780 do_vfp_nsyn_opcode ("fstd");
16784 /* "interleave" version also handles non-interleaving register VLD1/VST1
16788 do_neon_ld_st_interleave (void)
16790 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16791 N_8
| N_16
| N_32
| N_64
);
16792 unsigned alignbits
= 0;
16794 /* The bits in this table go:
16795 0: register stride of one (0) or two (1)
16796 1,2: register list length, minus one (1, 2, 3, 4).
16797 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16798 We use -1 for invalid entries. */
16799 const int typetable
[] =
16801 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16802 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16803 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16804 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16808 if (et
.type
== NT_invtype
)
16811 if (inst
.operands
[1].immisalign
)
16812 switch (inst
.operands
[1].imm
>> 8)
16814 case 64: alignbits
= 1; break;
16816 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16817 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16818 goto bad_alignment
;
16822 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16823 goto bad_alignment
;
16828 first_error (_("bad alignment"));
16832 inst
.instruction
|= alignbits
<< 4;
16833 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16835 /* Bits [4:6] of the immediate in a list specifier encode register stride
16836 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16837 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16838 up the right value for "type" in a table based on this value and the given
16839 list style, then stick it back. */
16840 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16841 | (((inst
.instruction
>> 8) & 3) << 3);
16843 typebits
= typetable
[idx
];
16845 constraint (typebits
== -1, _("bad list type for instruction"));
16846 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16847 _("bad element type for instruction"));
16849 inst
.instruction
&= ~0xf00;
16850 inst
.instruction
|= typebits
<< 8;
16853 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16854 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16855 otherwise. The variable arguments are a list of pairs of legal (size, align)
16856 values, terminated with -1. */
16859 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
16862 int result
= FAIL
, thissize
, thisalign
;
16864 if (!inst
.operands
[1].immisalign
)
16870 va_start (ap
, do_alignment
);
16874 thissize
= va_arg (ap
, int);
16875 if (thissize
== -1)
16877 thisalign
= va_arg (ap
, int);
16879 if (size
== thissize
&& align
== thisalign
)
16882 while (result
!= SUCCESS
);
16886 if (result
== SUCCESS
)
16889 first_error (_("unsupported alignment for instruction"));
16895 do_neon_ld_st_lane (void)
16897 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16898 int align_good
, do_alignment
= 0;
16899 int logsize
= neon_logbits (et
.size
);
16900 int align
= inst
.operands
[1].imm
>> 8;
16901 int n
= (inst
.instruction
>> 8) & 3;
16902 int max_el
= 64 / et
.size
;
16904 if (et
.type
== NT_invtype
)
16907 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16908 _("bad list length"));
16909 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16910 _("scalar index out of range"));
16911 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16913 _("stride of 2 unavailable when element size is 8"));
16917 case 0: /* VLD1 / VST1. */
16918 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
16920 if (align_good
== FAIL
)
16924 unsigned alignbits
= 0;
16927 case 16: alignbits
= 0x1; break;
16928 case 32: alignbits
= 0x3; break;
16931 inst
.instruction
|= alignbits
<< 4;
16935 case 1: /* VLD2 / VST2. */
16936 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
16937 16, 32, 32, 64, -1);
16938 if (align_good
== FAIL
)
16941 inst
.instruction
|= 1 << 4;
16944 case 2: /* VLD3 / VST3. */
16945 constraint (inst
.operands
[1].immisalign
,
16946 _("can't use alignment with this instruction"));
16949 case 3: /* VLD4 / VST4. */
16950 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
16951 16, 64, 32, 64, 32, 128, -1);
16952 if (align_good
== FAIL
)
16956 unsigned alignbits
= 0;
16959 case 8: alignbits
= 0x1; break;
16960 case 16: alignbits
= 0x1; break;
16961 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16964 inst
.instruction
|= alignbits
<< 4;
16971 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16972 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16973 inst
.instruction
|= 1 << (4 + logsize
);
16975 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16976 inst
.instruction
|= logsize
<< 10;
16979 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16982 do_neon_ld_dup (void)
16984 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16985 int align_good
, do_alignment
= 0;
16987 if (et
.type
== NT_invtype
)
16990 switch ((inst
.instruction
>> 8) & 3)
16992 case 0: /* VLD1. */
16993 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16994 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16995 &do_alignment
, 16, 16, 32, 32, -1);
16996 if (align_good
== FAIL
)
16998 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
17001 case 2: inst
.instruction
|= 1 << 5; break;
17002 default: first_error (_("bad list length")); return;
17004 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17007 case 1: /* VLD2. */
17008 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17009 &do_alignment
, 8, 16, 16, 32, 32, 64,
17011 if (align_good
== FAIL
)
17013 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
17014 _("bad list length"));
17015 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17016 inst
.instruction
|= 1 << 5;
17017 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17020 case 2: /* VLD3. */
17021 constraint (inst
.operands
[1].immisalign
,
17022 _("can't use alignment with this instruction"));
17023 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
17024 _("bad list length"));
17025 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17026 inst
.instruction
|= 1 << 5;
17027 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17030 case 3: /* VLD4. */
17032 int align
= inst
.operands
[1].imm
>> 8;
17033 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17034 16, 64, 32, 64, 32, 128, -1);
17035 if (align_good
== FAIL
)
17037 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
17038 _("bad list length"));
17039 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17040 inst
.instruction
|= 1 << 5;
17041 if (et
.size
== 32 && align
== 128)
17042 inst
.instruction
|= 0x3 << 6;
17044 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17051 inst
.instruction
|= do_alignment
<< 4;
17054 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17055 apart from bits [11:4]. */
17058 do_neon_ldx_stx (void)
17060 if (inst
.operands
[1].isreg
)
17061 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
17063 switch (NEON_LANE (inst
.operands
[0].imm
))
17065 case NEON_INTERLEAVE_LANES
:
17066 NEON_ENCODE (INTERLV
, inst
);
17067 do_neon_ld_st_interleave ();
17070 case NEON_ALL_LANES
:
17071 NEON_ENCODE (DUP
, inst
);
17072 if (inst
.instruction
== N_INV
)
17074 first_error ("only loads support such operands");
17081 NEON_ENCODE (LANE
, inst
);
17082 do_neon_ld_st_lane ();
17085 /* L bit comes from bit mask. */
17086 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17087 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17088 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17090 if (inst
.operands
[1].postind
)
17092 int postreg
= inst
.operands
[1].imm
& 0xf;
17093 constraint (!inst
.operands
[1].immisreg
,
17094 _("post-index must be a register"));
17095 constraint (postreg
== 0xd || postreg
== 0xf,
17096 _("bad register for post-index"));
17097 inst
.instruction
|= postreg
;
17101 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17102 constraint (inst
.reloc
.exp
.X_op
!= O_constant
17103 || inst
.reloc
.exp
.X_add_number
!= 0,
17106 if (inst
.operands
[1].writeback
)
17108 inst
.instruction
|= 0xd;
17111 inst
.instruction
|= 0xf;
17115 inst
.instruction
|= 0xf9000000;
17117 inst
.instruction
|= 0xf4000000;
17122 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
17124 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17125 D register operands. */
17126 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17127 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17130 NEON_ENCODE (FPV8
, inst
);
17132 if (rs
== NS_FFF
|| rs
== NS_HHH
)
17134 do_vfp_sp_dyadic ();
17136 /* ARMv8.2 fp16 instruction. */
17138 do_scalar_fp16_v82_encode ();
17141 do_vfp_dp_rd_rn_rm ();
17144 inst
.instruction
|= 0x100;
17146 inst
.instruction
|= 0xf0000000;
17152 set_it_insn_type (OUTSIDE_IT_INSN
);
17154 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
17155 first_error (_("invalid instruction shape"));
17161 set_it_insn_type (OUTSIDE_IT_INSN
);
17163 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
17166 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17169 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
17173 do_vrint_1 (enum neon_cvt_mode mode
)
17175 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
17176 struct neon_type_el et
;
17181 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17182 D register operands. */
17183 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17184 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17187 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
17189 if (et
.type
!= NT_invtype
)
17191 /* VFP encodings. */
17192 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17193 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
17194 set_it_insn_type (OUTSIDE_IT_INSN
);
17196 NEON_ENCODE (FPV8
, inst
);
17197 if (rs
== NS_FF
|| rs
== NS_HH
)
17198 do_vfp_sp_monadic ();
17200 do_vfp_dp_rd_rm ();
17204 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
17205 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
17206 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
17207 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
17208 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
17209 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
17210 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
17214 inst
.instruction
|= (rs
== NS_DD
) << 8;
17215 do_vfp_cond_or_thumb ();
17217 /* ARMv8.2 fp16 vrint instruction. */
17219 do_scalar_fp16_v82_encode ();
17223 /* Neon encodings (or something broken...). */
17225 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
17227 if (et
.type
== NT_invtype
)
17230 set_it_insn_type (OUTSIDE_IT_INSN
);
17231 NEON_ENCODE (FLOAT
, inst
);
17233 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17236 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17237 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17238 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17239 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17240 inst
.instruction
|= neon_quad (rs
) << 6;
17241 /* Mask off the original size bits and reencode them. */
17242 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
17243 | neon_logbits (et
.size
) << 18);
17247 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17248 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17249 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17250 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17251 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17252 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17253 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17258 inst
.instruction
|= 0xfc000000;
17260 inst
.instruction
|= 0xf0000000;
17267 do_vrint_1 (neon_cvt_mode_x
);
17273 do_vrint_1 (neon_cvt_mode_z
);
17279 do_vrint_1 (neon_cvt_mode_r
);
17285 do_vrint_1 (neon_cvt_mode_a
);
17291 do_vrint_1 (neon_cvt_mode_n
);
17297 do_vrint_1 (neon_cvt_mode_p
);
17303 do_vrint_1 (neon_cvt_mode_m
);
17307 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
17309 unsigned regno
= NEON_SCALAR_REG (opnd
);
17310 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
17312 if (elsize
== 16 && elno
< 2 && regno
< 16)
17313 return regno
| (elno
<< 4);
17314 else if (elsize
== 32 && elno
== 0)
17317 first_error (_("scalar out of range"));
17324 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17326 constraint (inst
.reloc
.exp
.X_op
!= O_constant
, _("expression too complex"));
17327 unsigned rot
= inst
.reloc
.exp
.X_add_number
;
17328 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
17329 _("immediate out of range"));
17331 if (inst
.operands
[2].isscalar
)
17333 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
17334 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17335 N_KEY
| N_F16
| N_F32
).size
;
17336 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
17338 inst
.instruction
= 0xfe000800;
17339 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17340 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17341 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17342 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17343 inst
.instruction
|= LOW4 (m
);
17344 inst
.instruction
|= HI1 (m
) << 5;
17345 inst
.instruction
|= neon_quad (rs
) << 6;
17346 inst
.instruction
|= rot
<< 20;
17347 inst
.instruction
|= (size
== 32) << 23;
17351 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17352 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17353 N_KEY
| N_F16
| N_F32
).size
;
17354 neon_three_same (neon_quad (rs
), 0, -1);
17355 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
17356 inst
.instruction
|= 0xfc200800;
17357 inst
.instruction
|= rot
<< 23;
17358 inst
.instruction
|= (size
== 32) << 20;
17365 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17367 constraint (inst
.reloc
.exp
.X_op
!= O_constant
, _("expression too complex"));
17368 unsigned rot
= inst
.reloc
.exp
.X_add_number
;
17369 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
17370 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17371 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17372 N_KEY
| N_F16
| N_F32
).size
;
17373 neon_three_same (neon_quad (rs
), 0, -1);
17374 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
17375 inst
.instruction
|= 0xfc800800;
17376 inst
.instruction
|= (rot
== 270) << 24;
17377 inst
.instruction
|= (size
== 32) << 20;
17380 /* Crypto v1 instructions. */
17382 do_crypto_2op_1 (unsigned elttype
, int op
)
17384 set_it_insn_type (OUTSIDE_IT_INSN
);
17386 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
17392 NEON_ENCODE (INTEGER
, inst
);
17393 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17394 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17395 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17396 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17398 inst
.instruction
|= op
<< 6;
17401 inst
.instruction
|= 0xfc000000;
17403 inst
.instruction
|= 0xf0000000;
17407 do_crypto_3op_1 (int u
, int op
)
17409 set_it_insn_type (OUTSIDE_IT_INSN
);
17411 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
17412 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
17417 NEON_ENCODE (INTEGER
, inst
);
17418 neon_three_same (1, u
, 8 << op
);
17424 do_crypto_2op_1 (N_8
, 0);
17430 do_crypto_2op_1 (N_8
, 1);
17436 do_crypto_2op_1 (N_8
, 2);
17442 do_crypto_2op_1 (N_8
, 3);
17448 do_crypto_3op_1 (0, 0);
17454 do_crypto_3op_1 (0, 1);
17460 do_crypto_3op_1 (0, 2);
17466 do_crypto_3op_1 (0, 3);
17472 do_crypto_3op_1 (1, 0);
17478 do_crypto_3op_1 (1, 1);
17482 do_sha256su1 (void)
17484 do_crypto_3op_1 (1, 2);
17490 do_crypto_2op_1 (N_32
, -1);
17496 do_crypto_2op_1 (N_32
, 0);
17500 do_sha256su0 (void)
17502 do_crypto_2op_1 (N_32
, 1);
17506 do_crc32_1 (unsigned int poly
, unsigned int sz
)
17508 unsigned int Rd
= inst
.operands
[0].reg
;
17509 unsigned int Rn
= inst
.operands
[1].reg
;
17510 unsigned int Rm
= inst
.operands
[2].reg
;
17512 set_it_insn_type (OUTSIDE_IT_INSN
);
17513 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
17514 inst
.instruction
|= LOW4 (Rn
) << 16;
17515 inst
.instruction
|= LOW4 (Rm
);
17516 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
17517 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
17519 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
17520 as_warn (UNPRED_REG ("r15"));
17521 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
17522 as_warn (UNPRED_REG ("r13"));
17564 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17566 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
17567 do_vfp_sp_dp_cvt ();
17568 do_vfp_cond_or_thumb ();
17572 /* Overall per-instruction processing. */
17574 /* We need to be able to fix up arbitrary expressions in some statements.
17575 This is so that we can handle symbols that are an arbitrary distance from
17576 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17577 which returns part of an address in a form which will be valid for
17578 a data instruction. We do this by pushing the expression into a symbol
17579 in the expr_section, and creating a fix for that. */
17582 fix_new_arm (fragS
* frag
,
17596 /* Create an absolute valued symbol, so we have something to
17597 refer to in the object file. Unfortunately for us, gas's
17598 generic expression parsing will already have folded out
17599 any use of .set foo/.type foo %function that may have
17600 been used to set type information of the target location,
17601 that's being specified symbolically. We have to presume
17602 the user knows what they are doing. */
17606 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17608 symbol
= symbol_find_or_make (name
);
17609 S_SET_SEGMENT (symbol
, absolute_section
);
17610 symbol_set_frag (symbol
, &zero_address_frag
);
17611 S_SET_VALUE (symbol
, exp
->X_add_number
);
17612 exp
->X_op
= O_symbol
;
17613 exp
->X_add_symbol
= symbol
;
17614 exp
->X_add_number
= 0;
17620 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17621 (enum bfd_reloc_code_real
) reloc
);
17625 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17626 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17630 /* Mark whether the fix is to a THUMB instruction, or an ARM
17632 new_fix
->tc_fix_data
= thumb_mode
;
17635 /* Create a frg for an instruction requiring relaxation. */
17637 output_relax_insn (void)
17643 /* The size of the instruction is unknown, so tie the debug info to the
17644 start of the instruction. */
17645 dwarf2_emit_insn (0);
17647 switch (inst
.reloc
.exp
.X_op
)
17650 sym
= inst
.reloc
.exp
.X_add_symbol
;
17651 offset
= inst
.reloc
.exp
.X_add_number
;
17655 offset
= inst
.reloc
.exp
.X_add_number
;
17658 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17662 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17663 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17664 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17667 /* Write a 32-bit thumb instruction to buf. */
17669 put_thumb32_insn (char * buf
, unsigned long insn
)
17671 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17672 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17676 output_inst (const char * str
)
17682 as_bad ("%s -- `%s'", inst
.error
, str
);
17687 output_relax_insn ();
17690 if (inst
.size
== 0)
17693 to
= frag_more (inst
.size
);
17694 /* PR 9814: Record the thumb mode into the current frag so that we know
17695 what type of NOP padding to use, if necessary. We override any previous
17696 setting so that if the mode has changed then the NOPS that we use will
17697 match the encoding of the last instruction in the frag. */
17698 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17700 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17702 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17703 put_thumb32_insn (to
, inst
.instruction
);
17705 else if (inst
.size
> INSN_SIZE
)
17707 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17708 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17709 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17712 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17714 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17715 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17716 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17719 dwarf2_emit_insn (inst
.size
);
17723 output_it_inst (int cond
, int mask
, char * to
)
17725 unsigned long instruction
= 0xbf00;
17728 instruction
|= mask
;
17729 instruction
|= cond
<< 4;
17733 to
= frag_more (2);
17735 dwarf2_emit_insn (2);
17739 md_number_to_chars (to
, instruction
, 2);
17744 /* Tag values used in struct asm_opcode's tag field. */
17747 OT_unconditional
, /* Instruction cannot be conditionalized.
17748 The ARM condition field is still 0xE. */
17749 OT_unconditionalF
, /* Instruction cannot be conditionalized
17750 and carries 0xF in its ARM condition field. */
17751 OT_csuffix
, /* Instruction takes a conditional suffix. */
17752 OT_csuffixF
, /* Some forms of the instruction take a conditional
17753 suffix, others place 0xF where the condition field
17755 OT_cinfix3
, /* Instruction takes a conditional infix,
17756 beginning at character index 3. (In
17757 unified mode, it becomes a suffix.) */
17758 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17759 tsts, cmps, cmns, and teqs. */
17760 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17761 character index 3, even in unified mode. Used for
17762 legacy instructions where suffix and infix forms
17763 may be ambiguous. */
17764 OT_csuf_or_in3
, /* Instruction takes either a conditional
17765 suffix or an infix at character index 3. */
17766 OT_odd_infix_unc
, /* This is the unconditional variant of an
17767 instruction that takes a conditional infix
17768 at an unusual position. In unified mode,
17769 this variant will accept a suffix. */
17770 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17771 are the conditional variants of instructions that
17772 take conditional infixes in unusual positions.
17773 The infix appears at character index
17774 (tag - OT_odd_infix_0). These are not accepted
17775 in unified mode. */
17778 /* Subroutine of md_assemble, responsible for looking up the primary
17779 opcode from the mnemonic the user wrote. STR points to the
17780 beginning of the mnemonic.
17782 This is not simply a hash table lookup, because of conditional
17783 variants. Most instructions have conditional variants, which are
17784 expressed with a _conditional affix_ to the mnemonic. If we were
17785 to encode each conditional variant as a literal string in the opcode
17786 table, it would have approximately 20,000 entries.
17788 Most mnemonics take this affix as a suffix, and in unified syntax,
17789 'most' is upgraded to 'all'. However, in the divided syntax, some
17790 instructions take the affix as an infix, notably the s-variants of
17791 the arithmetic instructions. Of those instructions, all but six
17792 have the infix appear after the third character of the mnemonic.
17794 Accordingly, the algorithm for looking up primary opcodes given
17797 1. Look up the identifier in the opcode table.
17798 If we find a match, go to step U.
17800 2. Look up the last two characters of the identifier in the
17801 conditions table. If we find a match, look up the first N-2
17802 characters of the identifier in the opcode table. If we
17803 find a match, go to step CE.
17805 3. Look up the fourth and fifth characters of the identifier in
17806 the conditions table. If we find a match, extract those
17807 characters from the identifier, and look up the remaining
17808 characters in the opcode table. If we find a match, go
17813 U. Examine the tag field of the opcode structure, in case this is
17814 one of the six instructions with its conditional infix in an
17815 unusual place. If it is, the tag tells us where to find the
17816 infix; look it up in the conditions table and set inst.cond
17817 accordingly. Otherwise, this is an unconditional instruction.
17818 Again set inst.cond accordingly. Return the opcode structure.
17820 CE. Examine the tag field to make sure this is an instruction that
17821 should receive a conditional suffix. If it is not, fail.
17822 Otherwise, set inst.cond from the suffix we already looked up,
17823 and return the opcode structure.
17825 CM. Examine the tag field to make sure this is an instruction that
17826 should receive a conditional infix after the third character.
17827 If it is not, fail. Otherwise, undo the edits to the current
17828 line of input and proceed as for case CE. */
17830 static const struct asm_opcode
*
17831 opcode_lookup (char **str
)
17835 const struct asm_opcode
*opcode
;
17836 const struct asm_cond
*cond
;
17839 /* Scan up to the end of the mnemonic, which must end in white space,
17840 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17841 for (base
= end
= *str
; *end
!= '\0'; end
++)
17842 if (*end
== ' ' || *end
== '.')
17848 /* Handle a possible width suffix and/or Neon type suffix. */
17853 /* The .w and .n suffixes are only valid if the unified syntax is in
17855 if (unified_syntax
&& end
[1] == 'w')
17857 else if (unified_syntax
&& end
[1] == 'n')
17862 inst
.vectype
.elems
= 0;
17864 *str
= end
+ offset
;
17866 if (end
[offset
] == '.')
17868 /* See if we have a Neon type suffix (possible in either unified or
17869 non-unified ARM syntax mode). */
17870 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17873 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17879 /* Look for unaffixed or special-case affixed mnemonic. */
17880 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17885 if (opcode
->tag
< OT_odd_infix_0
)
17887 inst
.cond
= COND_ALWAYS
;
17891 if (warn_on_deprecated
&& unified_syntax
)
17892 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17893 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17894 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17897 inst
.cond
= cond
->value
;
17901 /* Cannot have a conditional suffix on a mnemonic of less than two
17903 if (end
- base
< 3)
17906 /* Look for suffixed mnemonic. */
17908 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17909 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17911 if (opcode
&& cond
)
17914 switch (opcode
->tag
)
17916 case OT_cinfix3_legacy
:
17917 /* Ignore conditional suffixes matched on infix only mnemonics. */
17921 case OT_cinfix3_deprecated
:
17922 case OT_odd_infix_unc
:
17923 if (!unified_syntax
)
17925 /* Fall through. */
17929 case OT_csuf_or_in3
:
17930 inst
.cond
= cond
->value
;
17933 case OT_unconditional
:
17934 case OT_unconditionalF
:
17936 inst
.cond
= cond
->value
;
17939 /* Delayed diagnostic. */
17940 inst
.error
= BAD_COND
;
17941 inst
.cond
= COND_ALWAYS
;
17950 /* Cannot have a usual-position infix on a mnemonic of less than
17951 six characters (five would be a suffix). */
17952 if (end
- base
< 6)
17955 /* Look for infixed mnemonic in the usual position. */
17957 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17961 memcpy (save
, affix
, 2);
17962 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17963 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17965 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17966 memcpy (affix
, save
, 2);
17969 && (opcode
->tag
== OT_cinfix3
17970 || opcode
->tag
== OT_cinfix3_deprecated
17971 || opcode
->tag
== OT_csuf_or_in3
17972 || opcode
->tag
== OT_cinfix3_legacy
))
17975 if (warn_on_deprecated
&& unified_syntax
17976 && (opcode
->tag
== OT_cinfix3
17977 || opcode
->tag
== OT_cinfix3_deprecated
))
17978 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17980 inst
.cond
= cond
->value
;
17987 /* This function generates an initial IT instruction, leaving its block
17988 virtually open for the new instructions. Eventually,
17989 the mask will be updated by now_it_add_mask () each time
17990 a new instruction needs to be included in the IT block.
17991 Finally, the block is closed with close_automatic_it_block ().
17992 The block closure can be requested either from md_assemble (),
17993 a tencode (), or due to a label hook. */
17996 new_automatic_it_block (int cond
)
17998 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17999 now_it
.mask
= 0x18;
18001 now_it
.block_length
= 1;
18002 mapping_state (MAP_THUMB
);
18003 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
18004 now_it
.warn_deprecated
= FALSE
;
18005 now_it
.insn_cond
= TRUE
;
18008 /* Close an automatic IT block.
18009 See comments in new_automatic_it_block (). */
18012 close_automatic_it_block (void)
18014 now_it
.mask
= 0x10;
18015 now_it
.block_length
= 0;
18018 /* Update the mask of the current automatically-generated IT
18019 instruction. See comments in new_automatic_it_block (). */
18022 now_it_add_mask (int cond
)
18024 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18025 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18026 | ((bitvalue) << (nbit)))
18027 const int resulting_bit
= (cond
& 1);
18029 now_it
.mask
&= 0xf;
18030 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18032 (5 - now_it
.block_length
));
18033 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18035 ((5 - now_it
.block_length
) - 1) );
18036 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
18039 #undef SET_BIT_VALUE
18042 /* The IT blocks handling machinery is accessed through the these functions:
18043 it_fsm_pre_encode () from md_assemble ()
18044 set_it_insn_type () optional, from the tencode functions
18045 set_it_insn_type_last () ditto
18046 in_it_block () ditto
18047 it_fsm_post_encode () from md_assemble ()
18048 force_automatic_it_block_close () from label handling functions
18051 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18052 initializing the IT insn type with a generic initial value depending
18053 on the inst.condition.
18054 2) During the tencode function, two things may happen:
18055 a) The tencode function overrides the IT insn type by
18056 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18057 b) The tencode function queries the IT block state by
18058 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18060 Both set_it_insn_type and in_it_block run the internal FSM state
18061 handling function (handle_it_state), because: a) setting the IT insn
18062 type may incur in an invalid state (exiting the function),
18063 and b) querying the state requires the FSM to be updated.
18064 Specifically we want to avoid creating an IT block for conditional
18065 branches, so it_fsm_pre_encode is actually a guess and we can't
18066 determine whether an IT block is required until the tencode () routine
18067 has decided what type of instruction this actually it.
18068 Because of this, if set_it_insn_type and in_it_block have to be used,
18069 set_it_insn_type has to be called first.
18071 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18072 determines the insn IT type depending on the inst.cond code.
18073 When a tencode () routine encodes an instruction that can be
18074 either outside an IT block, or, in the case of being inside, has to be
18075 the last one, set_it_insn_type_last () will determine the proper
18076 IT instruction type based on the inst.cond code. Otherwise,
18077 set_it_insn_type can be called for overriding that logic or
18078 for covering other cases.
18080 Calling handle_it_state () may not transition the IT block state to
18081 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18082 still queried. Instead, if the FSM determines that the state should
18083 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18084 after the tencode () function: that's what it_fsm_post_encode () does.
18086 Since in_it_block () calls the state handling function to get an
18087 updated state, an error may occur (due to invalid insns combination).
18088 In that case, inst.error is set.
18089 Therefore, inst.error has to be checked after the execution of
18090 the tencode () routine.
18092 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18093 any pending state change (if any) that didn't take place in
18094 handle_it_state () as explained above. */
18097 it_fsm_pre_encode (void)
18099 if (inst
.cond
!= COND_ALWAYS
)
18100 inst
.it_insn_type
= INSIDE_IT_INSN
;
18102 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
18104 now_it
.state_handled
= 0;
18107 /* IT state FSM handling function. */
18110 handle_it_state (void)
18112 now_it
.state_handled
= 1;
18113 now_it
.insn_cond
= FALSE
;
18115 switch (now_it
.state
)
18117 case OUTSIDE_IT_BLOCK
:
18118 switch (inst
.it_insn_type
)
18120 case OUTSIDE_IT_INSN
:
18123 case INSIDE_IT_INSN
:
18124 case INSIDE_IT_LAST_INSN
:
18125 if (thumb_mode
== 0)
18128 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
18129 as_tsktsk (_("Warning: conditional outside an IT block"\
18134 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
18135 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
18137 /* Automatically generate the IT instruction. */
18138 new_automatic_it_block (inst
.cond
);
18139 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
18140 close_automatic_it_block ();
18144 inst
.error
= BAD_OUT_IT
;
18150 case IF_INSIDE_IT_LAST_INSN
:
18151 case NEUTRAL_IT_INSN
:
18155 now_it
.state
= MANUAL_IT_BLOCK
;
18156 now_it
.block_length
= 0;
18161 case AUTOMATIC_IT_BLOCK
:
18162 /* Three things may happen now:
18163 a) We should increment current it block size;
18164 b) We should close current it block (closing insn or 4 insns);
18165 c) We should close current it block and start a new one (due
18166 to incompatible conditions or
18167 4 insns-length block reached). */
18169 switch (inst
.it_insn_type
)
18171 case OUTSIDE_IT_INSN
:
18172 /* The closure of the block shall happen immediately,
18173 so any in_it_block () call reports the block as closed. */
18174 force_automatic_it_block_close ();
18177 case INSIDE_IT_INSN
:
18178 case INSIDE_IT_LAST_INSN
:
18179 case IF_INSIDE_IT_LAST_INSN
:
18180 now_it
.block_length
++;
18182 if (now_it
.block_length
> 4
18183 || !now_it_compatible (inst
.cond
))
18185 force_automatic_it_block_close ();
18186 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
18187 new_automatic_it_block (inst
.cond
);
18191 now_it
.insn_cond
= TRUE
;
18192 now_it_add_mask (inst
.cond
);
18195 if (now_it
.state
== AUTOMATIC_IT_BLOCK
18196 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
18197 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
18198 close_automatic_it_block ();
18201 case NEUTRAL_IT_INSN
:
18202 now_it
.block_length
++;
18203 now_it
.insn_cond
= TRUE
;
18205 if (now_it
.block_length
> 4)
18206 force_automatic_it_block_close ();
18208 now_it_add_mask (now_it
.cc
& 1);
18212 close_automatic_it_block ();
18213 now_it
.state
= MANUAL_IT_BLOCK
;
18218 case MANUAL_IT_BLOCK
:
18220 /* Check conditional suffixes. */
18221 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
18224 now_it
.mask
&= 0x1f;
18225 is_last
= (now_it
.mask
== 0x10);
18226 now_it
.insn_cond
= TRUE
;
18228 switch (inst
.it_insn_type
)
18230 case OUTSIDE_IT_INSN
:
18231 inst
.error
= BAD_NOT_IT
;
18234 case INSIDE_IT_INSN
:
18235 if (cond
!= inst
.cond
)
18237 inst
.error
= BAD_IT_COND
;
18242 case INSIDE_IT_LAST_INSN
:
18243 case IF_INSIDE_IT_LAST_INSN
:
18244 if (cond
!= inst
.cond
)
18246 inst
.error
= BAD_IT_COND
;
18251 inst
.error
= BAD_BRANCH
;
18256 case NEUTRAL_IT_INSN
:
18257 /* The BKPT instruction is unconditional even in an IT block. */
18261 inst
.error
= BAD_IT_IT
;
18271 struct depr_insn_mask
18273 unsigned long pattern
;
18274 unsigned long mask
;
18275 const char* description
;
18278 /* List of 16-bit instruction patterns deprecated in an IT block in
18280 static const struct depr_insn_mask depr_it_insns
[] = {
18281 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18282 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18283 { 0xa000, 0xb800, N_("ADR") },
18284 { 0x4800, 0xf800, N_("Literal loads") },
18285 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18286 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18287 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18288 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18289 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18294 it_fsm_post_encode (void)
18298 if (!now_it
.state_handled
)
18299 handle_it_state ();
18301 if (now_it
.insn_cond
18302 && !now_it
.warn_deprecated
18303 && warn_on_deprecated
18304 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
18306 if (inst
.instruction
>= 0x10000)
18308 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18309 "deprecated in ARMv8"));
18310 now_it
.warn_deprecated
= TRUE
;
18314 const struct depr_insn_mask
*p
= depr_it_insns
;
18316 while (p
->mask
!= 0)
18318 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
18320 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18321 "of the following class are deprecated in ARMv8: "
18322 "%s"), p
->description
);
18323 now_it
.warn_deprecated
= TRUE
;
18331 if (now_it
.block_length
> 1)
18333 as_tsktsk (_("IT blocks containing more than one conditional "
18334 "instruction are deprecated in ARMv8"));
18335 now_it
.warn_deprecated
= TRUE
;
18339 is_last
= (now_it
.mask
== 0x10);
18342 now_it
.state
= OUTSIDE_IT_BLOCK
;
18348 force_automatic_it_block_close (void)
18350 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
18352 close_automatic_it_block ();
18353 now_it
.state
= OUTSIDE_IT_BLOCK
;
18361 if (!now_it
.state_handled
)
18362 handle_it_state ();
18364 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
18367 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18368 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18369 here, hence the "known" in the function name. */
18372 known_t32_only_insn (const struct asm_opcode
*opcode
)
18374 /* Original Thumb-1 wide instruction. */
18375 if (opcode
->tencode
== do_t_blx
18376 || opcode
->tencode
== do_t_branch23
18377 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
18378 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
18381 /* Wide-only instruction added to ARMv8-M Baseline. */
18382 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
18383 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
18384 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
18385 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
18391 /* Whether wide instruction variant can be used if available for a valid OPCODE
18395 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
18397 if (known_t32_only_insn (opcode
))
18400 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18401 of variant T3 of B.W is checked in do_t_branch. */
18402 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18403 && opcode
->tencode
== do_t_branch
)
18406 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18407 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18408 && opcode
->tencode
== do_t_mov_cmp
18409 /* Make sure CMP instruction is not affected. */
18410 && opcode
->aencode
== do_mov
)
18413 /* Wide instruction variants of all instructions with narrow *and* wide
18414 variants become available with ARMv6t2. Other opcodes are either
18415 narrow-only or wide-only and are thus available if OPCODE is valid. */
18416 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
18419 /* OPCODE with narrow only instruction variant or wide variant not
18425 md_assemble (char *str
)
18428 const struct asm_opcode
* opcode
;
18430 /* Align the previous label if needed. */
18431 if (last_label_seen
!= NULL
)
18433 symbol_set_frag (last_label_seen
, frag_now
);
18434 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
18435 S_SET_SEGMENT (last_label_seen
, now_seg
);
18438 memset (&inst
, '\0', sizeof (inst
));
18439 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
18441 opcode
= opcode_lookup (&p
);
18444 /* It wasn't an instruction, but it might be a register alias of
18445 the form alias .req reg, or a Neon .dn/.qn directive. */
18446 if (! create_register_alias (str
, p
)
18447 && ! create_neon_reg_alias (str
, p
))
18448 as_bad (_("bad instruction `%s'"), str
);
18453 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
18454 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18456 /* The value which unconditional instructions should have in place of the
18457 condition field. */
18458 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
18462 arm_feature_set variant
;
18464 variant
= cpu_variant
;
18465 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18466 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
18467 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
18468 /* Check that this instruction is supported for this CPU. */
18469 if (!opcode
->tvariant
18470 || (thumb_mode
== 1
18471 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
18473 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
18476 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
18477 && opcode
->tencode
!= do_t_branch
)
18479 as_bad (_("Thumb does not support conditional execution"));
18483 /* Two things are addressed here:
18484 1) Implicit require narrow instructions on Thumb-1.
18485 This avoids relaxation accidentally introducing Thumb-2
18487 2) Reject wide instructions in non Thumb-2 cores.
18489 Only instructions with narrow and wide variants need to be handled
18490 but selecting all non wide-only instructions is easier. */
18491 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
18492 && !t32_insn_ok (variant
, opcode
))
18494 if (inst
.size_req
== 0)
18496 else if (inst
.size_req
== 4)
18498 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
18499 as_bad (_("selected processor does not support 32bit wide "
18500 "variant of instruction `%s'"), str
);
18502 as_bad (_("selected processor does not support `%s' in "
18503 "Thumb-2 mode"), str
);
18508 inst
.instruction
= opcode
->tvalue
;
18510 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
18512 /* Prepare the it_insn_type for those encodings that don't set
18514 it_fsm_pre_encode ();
18516 opcode
->tencode ();
18518 it_fsm_post_encode ();
18521 if (!(inst
.error
|| inst
.relax
))
18523 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
18524 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
18525 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
18527 as_bad (_("cannot honor width suffix -- `%s'"), str
);
18532 /* Something has gone badly wrong if we try to relax a fixed size
18534 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
18536 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18537 *opcode
->tvariant
);
18538 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18539 set those bits when Thumb-2 32-bit instructions are seen. The impact
18540 of relaxable instructions will be considered later after we finish all
18542 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
18543 variant
= arm_arch_none
;
18545 variant
= cpu_variant
;
18546 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
18547 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18550 check_neon_suffixes
;
18554 mapping_state (MAP_THUMB
);
18557 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18561 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18562 is_bx
= (opcode
->aencode
== do_bx
);
18564 /* Check that this instruction is supported for this CPU. */
18565 if (!(is_bx
&& fix_v4bx
)
18566 && !(opcode
->avariant
&&
18567 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18569 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18574 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18578 inst
.instruction
= opcode
->avalue
;
18579 if (opcode
->tag
== OT_unconditionalF
)
18580 inst
.instruction
|= 0xFU
<< 28;
18582 inst
.instruction
|= inst
.cond
<< 28;
18583 inst
.size
= INSN_SIZE
;
18584 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18586 it_fsm_pre_encode ();
18587 opcode
->aencode ();
18588 it_fsm_post_encode ();
18590 /* Arm mode bx is marked as both v4T and v5 because it's still required
18591 on a hypothetical non-thumb v5 core. */
18593 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18595 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18596 *opcode
->avariant
);
18598 check_neon_suffixes
;
18602 mapping_state (MAP_ARM
);
18607 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18615 check_it_blocks_finished (void)
18620 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18621 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18622 == MANUAL_IT_BLOCK
)
18624 as_warn (_("section '%s' finished with an open IT block."),
18628 if (now_it
.state
== MANUAL_IT_BLOCK
)
18629 as_warn (_("file finished with an open IT block."));
18633 /* Various frobbings of labels and their addresses. */
18636 arm_start_line_hook (void)
18638 last_label_seen
= NULL
;
18642 arm_frob_label (symbolS
* sym
)
18644 last_label_seen
= sym
;
18646 ARM_SET_THUMB (sym
, thumb_mode
);
18648 #if defined OBJ_COFF || defined OBJ_ELF
18649 ARM_SET_INTERWORK (sym
, support_interwork
);
18652 force_automatic_it_block_close ();
18654 /* Note - do not allow local symbols (.Lxxx) to be labelled
18655 as Thumb functions. This is because these labels, whilst
18656 they exist inside Thumb code, are not the entry points for
18657 possible ARM->Thumb calls. Also, these labels can be used
18658 as part of a computed goto or switch statement. eg gcc
18659 can generate code that looks like this:
18661 ldr r2, [pc, .Laaa]
18671 The first instruction loads the address of the jump table.
18672 The second instruction converts a table index into a byte offset.
18673 The third instruction gets the jump address out of the table.
18674 The fourth instruction performs the jump.
18676 If the address stored at .Laaa is that of a symbol which has the
18677 Thumb_Func bit set, then the linker will arrange for this address
18678 to have the bottom bit set, which in turn would mean that the
18679 address computation performed by the third instruction would end
18680 up with the bottom bit set. Since the ARM is capable of unaligned
18681 word loads, the instruction would then load the incorrect address
18682 out of the jump table, and chaos would ensue. */
18683 if (label_is_thumb_function_name
18684 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18685 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18687 /* When the address of a Thumb function is taken the bottom
18688 bit of that address should be set. This will allow
18689 interworking between Arm and Thumb functions to work
18692 THUMB_SET_FUNC (sym
, 1);
18694 label_is_thumb_function_name
= FALSE
;
18697 dwarf2_emit_label (sym
);
18701 arm_data_in_code (void)
18703 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18705 *input_line_pointer
= '/';
18706 input_line_pointer
+= 5;
18707 *input_line_pointer
= 0;
18715 arm_canonicalize_symbol_name (char * name
)
18719 if (thumb_mode
&& (len
= strlen (name
)) > 5
18720 && streq (name
+ len
- 5, "/data"))
18721 *(name
+ len
- 5) = 0;
18726 /* Table of all register names defined by default. The user can
18727 define additional names with .req. Note that all register names
18728 should appear in both upper and lowercase variants. Some registers
18729 also have mixed-case names. */
18731 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18732 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18733 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18734 #define REGSET(p,t) \
18735 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18736 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18737 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18738 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18739 #define REGSETH(p,t) \
18740 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18741 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18742 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18743 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18744 #define REGSET2(p,t) \
18745 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18746 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18747 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18748 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18749 #define SPLRBANK(base,bank,t) \
18750 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18751 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18752 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18753 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18754 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18755 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18757 static const struct reg_entry reg_names
[] =
18759 /* ARM integer registers. */
18760 REGSET(r
, RN
), REGSET(R
, RN
),
18762 /* ATPCS synonyms. */
18763 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18764 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18765 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18767 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18768 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18769 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18771 /* Well-known aliases. */
18772 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18773 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18775 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18776 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18778 /* Coprocessor numbers. */
18779 REGSET(p
, CP
), REGSET(P
, CP
),
18781 /* Coprocessor register numbers. The "cr" variants are for backward
18783 REGSET(c
, CN
), REGSET(C
, CN
),
18784 REGSET(cr
, CN
), REGSET(CR
, CN
),
18786 /* ARM banked registers. */
18787 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18788 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18789 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18790 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18791 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18792 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18793 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18795 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18796 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18797 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18798 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18799 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18800 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18801 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18802 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18804 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18805 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18806 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18807 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18808 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18809 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18810 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18811 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18812 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18814 /* FPA registers. */
18815 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18816 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18818 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18819 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18821 /* VFP SP registers. */
18822 REGSET(s
,VFS
), REGSET(S
,VFS
),
18823 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18825 /* VFP DP Registers. */
18826 REGSET(d
,VFD
), REGSET(D
,VFD
),
18827 /* Extra Neon DP registers. */
18828 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18830 /* Neon QP registers. */
18831 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18833 /* VFP control registers. */
18834 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18835 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18836 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18837 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18838 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18839 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18841 /* Maverick DSP coprocessor registers. */
18842 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18843 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18845 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18846 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18847 REGDEF(dspsc
,0,DSPSC
),
18849 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18850 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18851 REGDEF(DSPSC
,0,DSPSC
),
18853 /* iWMMXt data registers - p0, c0-15. */
18854 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18856 /* iWMMXt control registers - p1, c0-3. */
18857 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18858 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18859 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18860 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18862 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18863 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18864 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18865 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18866 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18868 /* XScale accumulator registers. */
18869 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18875 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18876 within psr_required_here. */
18877 static const struct asm_psr psrs
[] =
18879 /* Backward compatibility notation. Note that "all" is no longer
18880 truly all possible PSR bits. */
18881 {"all", PSR_c
| PSR_f
},
18885 /* Individual flags. */
18891 /* Combinations of flags. */
18892 {"fs", PSR_f
| PSR_s
},
18893 {"fx", PSR_f
| PSR_x
},
18894 {"fc", PSR_f
| PSR_c
},
18895 {"sf", PSR_s
| PSR_f
},
18896 {"sx", PSR_s
| PSR_x
},
18897 {"sc", PSR_s
| PSR_c
},
18898 {"xf", PSR_x
| PSR_f
},
18899 {"xs", PSR_x
| PSR_s
},
18900 {"xc", PSR_x
| PSR_c
},
18901 {"cf", PSR_c
| PSR_f
},
18902 {"cs", PSR_c
| PSR_s
},
18903 {"cx", PSR_c
| PSR_x
},
18904 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18905 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18906 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18907 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18908 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18909 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18910 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18911 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18912 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18913 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18914 {"scf", PSR_s
| PSR_c
| PSR_f
},
18915 {"scx", PSR_s
| PSR_c
| PSR_x
},
18916 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18917 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18918 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18919 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18920 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18921 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18922 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18923 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18924 {"csf", PSR_c
| PSR_s
| PSR_f
},
18925 {"csx", PSR_c
| PSR_s
| PSR_x
},
18926 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18927 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18928 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18929 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18930 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18931 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18932 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18933 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18934 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18935 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18936 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18937 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18938 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18939 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18940 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18941 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18942 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18943 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18944 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18945 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18946 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18947 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18948 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18949 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18950 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18951 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18954 /* Table of V7M psr names. */
18955 static const struct asm_psr v7m_psrs
[] =
18957 {"apsr", 0x0 }, {"APSR", 0x0 },
18958 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
18959 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
18960 {"psr", 0x3 }, {"PSR", 0x3 },
18961 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
18962 {"ipsr", 0x5 }, {"IPSR", 0x5 },
18963 {"epsr", 0x6 }, {"EPSR", 0x6 },
18964 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
18965 {"msp", 0x8 }, {"MSP", 0x8 },
18966 {"psp", 0x9 }, {"PSP", 0x9 },
18967 {"msplim", 0xa }, {"MSPLIM", 0xa },
18968 {"psplim", 0xb }, {"PSPLIM", 0xb },
18969 {"primask", 0x10}, {"PRIMASK", 0x10},
18970 {"basepri", 0x11}, {"BASEPRI", 0x11},
18971 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
18972 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
18973 {"control", 0x14}, {"CONTROL", 0x14},
18974 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
18975 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
18976 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
18977 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
18978 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
18979 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
18980 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18981 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
18982 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
18985 /* Table of all shift-in-operand names. */
18986 static const struct asm_shift_name shift_names
[] =
18988 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18989 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18990 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18991 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18992 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18993 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18996 /* Table of all explicit relocation names. */
18998 static struct reloc_entry reloc_names
[] =
19000 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
19001 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
19002 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
19003 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
19004 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
19005 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
19006 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
19007 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
19008 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
19009 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
19010 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
19011 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
19012 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
19013 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
19014 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
19015 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
19016 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
19017 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
19021 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19022 static const struct asm_cond conds
[] =
19026 {"cs", 0x2}, {"hs", 0x2},
19027 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19041 #define UL_BARRIER(L,U,CODE,FEAT) \
19042 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19043 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19045 static struct asm_barrier_opt barrier_opt_names
[] =
19047 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
19048 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
19049 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
19050 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
19051 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
19052 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
19053 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
19054 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
19055 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
19056 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
19057 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
19058 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
19059 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
19060 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
19061 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
19062 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
19067 /* Table of ARM-format instructions. */
19069 /* Macros for gluing together operand strings. N.B. In all cases
19070 other than OPS0, the trailing OP_stop comes from default
19071 zero-initialization of the unspecified elements of the array. */
19072 #define OPS0() { OP_stop, }
19073 #define OPS1(a) { OP_##a, }
19074 #define OPS2(a,b) { OP_##a,OP_##b, }
19075 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19076 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19077 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19078 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19080 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19081 This is useful when mixing operands for ARM and THUMB, i.e. using the
19082 MIX_ARM_THUMB_OPERANDS macro.
19083 In order to use these macros, prefix the number of operands with _
19085 #define OPS_1(a) { a, }
19086 #define OPS_2(a,b) { a,b, }
19087 #define OPS_3(a,b,c) { a,b,c, }
19088 #define OPS_4(a,b,c,d) { a,b,c,d, }
19089 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19090 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19092 /* These macros abstract out the exact format of the mnemonic table and
19093 save some repeated characters. */
19095 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19096 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19097 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19098 THUMB_VARIANT, do_##ae, do_##te }
19100 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19101 a T_MNEM_xyz enumerator. */
19102 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19103 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19104 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19105 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19107 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19108 infix after the third character. */
19109 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19110 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19111 THUMB_VARIANT, do_##ae, do_##te }
19112 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19113 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19114 THUMB_VARIANT, do_##ae, do_##te }
19115 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19116 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19117 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19118 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19119 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19120 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19121 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19122 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19124 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19125 field is still 0xE. Many of the Thumb variants can be executed
19126 conditionally, so this is checked separately. */
19127 #define TUE(mnem, op, top, nops, ops, ae, te) \
19128 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19129 THUMB_VARIANT, do_##ae, do_##te }
19131 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19132 Used by mnemonics that have very minimal differences in the encoding for
19133 ARM and Thumb variants and can be handled in a common function. */
19134 #define TUEc(mnem, op, top, nops, ops, en) \
19135 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19136 THUMB_VARIANT, do_##en, do_##en }
19138 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19139 condition code field. */
19140 #define TUF(mnem, op, top, nops, ops, ae, te) \
19141 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19142 THUMB_VARIANT, do_##ae, do_##te }
19144 /* ARM-only variants of all the above. */
19145 #define CE(mnem, op, nops, ops, ae) \
19146 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19148 #define C3(mnem, op, nops, ops, ae) \
19149 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19151 /* Legacy mnemonics that always have conditional infix after the third
19153 #define CL(mnem, op, nops, ops, ae) \
19154 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19155 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19157 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19158 #define cCE(mnem, op, nops, ops, ae) \
19159 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19161 /* Legacy coprocessor instructions where conditional infix and conditional
19162 suffix are ambiguous. For consistency this includes all FPA instructions,
19163 not just the potentially ambiguous ones. */
19164 #define cCL(mnem, op, nops, ops, ae) \
19165 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19166 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19168 /* Coprocessor, takes either a suffix or a position-3 infix
19169 (for an FPA corner case). */
19170 #define C3E(mnem, op, nops, ops, ae) \
19171 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19172 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19174 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19175 { m1 #m2 m3, OPS##nops ops, \
19176 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19177 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19179 #define CM(m1, m2, op, nops, ops, ae) \
19180 xCM_ (m1, , m2, op, nops, ops, ae), \
19181 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19182 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19183 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19184 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19185 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19186 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19187 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19188 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19189 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19190 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19191 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19192 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19193 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19194 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19195 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19196 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19197 xCM_ (m1, le, m2, op, nops, ops, ae), \
19198 xCM_ (m1, al, m2, op, nops, ops, ae)
19200 #define UE(mnem, op, nops, ops, ae) \
19201 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19203 #define UF(mnem, op, nops, ops, ae) \
19204 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19206 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19207 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19208 use the same encoding function for each. */
19209 #define NUF(mnem, op, nops, ops, enc) \
19210 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19211 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19213 /* Neon data processing, version which indirects through neon_enc_tab for
19214 the various overloaded versions of opcodes. */
19215 #define nUF(mnem, op, nops, ops, enc) \
19216 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19217 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19219 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19221 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19222 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19223 THUMB_VARIANT, do_##enc, do_##enc }
19225 #define NCE(mnem, op, nops, ops, enc) \
19226 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19228 #define NCEF(mnem, op, nops, ops, enc) \
19229 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19231 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19232 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19233 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19234 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19236 #define nCE(mnem, op, nops, ops, enc) \
19237 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19239 #define nCEF(mnem, op, nops, ops, enc) \
19240 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19244 static const struct asm_opcode insns
[] =
19246 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19247 #define THUMB_VARIANT & arm_ext_v4t
19248 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19249 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19250 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19251 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19252 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19253 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19254 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19255 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19256 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19257 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19258 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19259 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19260 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19261 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19262 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19263 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19265 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19266 for setting PSR flag bits. They are obsolete in V6 and do not
19267 have Thumb equivalents. */
19268 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19269 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19270 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
19271 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19272 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19273 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
19274 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19275 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19276 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
19278 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
19279 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
19280 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19281 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19283 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
19284 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19285 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
19287 OP_ADDRGLDR
),ldst
, t_ldst
),
19288 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19290 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19291 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19292 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19293 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19294 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19295 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19297 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19298 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19299 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
19300 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
19303 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
19304 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
19305 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
19306 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
19308 /* Thumb-compatibility pseudo ops. */
19309 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19310 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19311 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19312 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19313 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19314 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19315 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19316 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19317 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
19318 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
19319 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
19320 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
19322 /* These may simplify to neg. */
19323 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19324 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19326 #undef THUMB_VARIANT
19327 #define THUMB_VARIANT & arm_ext_v6
19329 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
19331 /* V1 instructions with no Thumb analogue prior to V6T2. */
19332 #undef THUMB_VARIANT
19333 #define THUMB_VARIANT & arm_ext_v6t2
19335 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19336 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19337 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
19339 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19340 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19341 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
19342 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19344 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19345 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19347 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19348 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19350 /* V1 instructions with no Thumb analogue at all. */
19351 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
19352 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
19354 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19355 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19356 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19357 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19358 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19359 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19360 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19361 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19364 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19365 #undef THUMB_VARIANT
19366 #define THUMB_VARIANT & arm_ext_v4t
19368 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19369 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19371 #undef THUMB_VARIANT
19372 #define THUMB_VARIANT & arm_ext_v6t2
19374 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19375 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
19377 /* Generic coprocessor instructions. */
19378 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19379 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19380 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19381 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19382 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19383 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19384 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19387 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19389 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19390 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19393 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19394 #undef THUMB_VARIANT
19395 #define THUMB_VARIANT & arm_ext_msr
19397 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
19398 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
19401 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19402 #undef THUMB_VARIANT
19403 #define THUMB_VARIANT & arm_ext_v6t2
19405 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19406 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19407 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19408 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19409 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19410 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19411 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19412 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19415 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19416 #undef THUMB_VARIANT
19417 #define THUMB_VARIANT & arm_ext_v4t
19419 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19420 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19421 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19422 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19423 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19424 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19427 #define ARM_VARIANT & arm_ext_v4t_5
19429 /* ARM Architecture 4T. */
19430 /* Note: bx (and blx) are required on V5, even if the processor does
19431 not support Thumb. */
19432 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
19435 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19436 #undef THUMB_VARIANT
19437 #define THUMB_VARIANT & arm_ext_v5t
19439 /* Note: blx has 2 variants; the .value coded here is for
19440 BLX(2). Only this variant has conditional execution. */
19441 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
19442 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
19444 #undef THUMB_VARIANT
19445 #define THUMB_VARIANT & arm_ext_v6t2
19447 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
19448 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19449 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19450 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19451 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19452 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19453 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19454 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19457 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19458 #undef THUMB_VARIANT
19459 #define THUMB_VARIANT & arm_ext_v5exp
19461 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19462 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19463 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19464 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19466 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19467 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19469 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19470 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19471 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19472 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19474 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19475 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19476 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19477 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19479 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19480 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19482 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19483 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19484 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19485 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19488 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19489 #undef THUMB_VARIANT
19490 #define THUMB_VARIANT & arm_ext_v6t2
19492 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
19493 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
19495 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
19496 ADDRGLDRS
), ldrd
, t_ldstd
),
19498 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19499 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19502 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19504 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
19507 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19508 #undef THUMB_VARIANT
19509 #define THUMB_VARIANT & arm_ext_v6
19511 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19512 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19513 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19514 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19515 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19516 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19517 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19518 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19519 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19520 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
19522 #undef THUMB_VARIANT
19523 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19525 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
19526 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19528 #undef THUMB_VARIANT
19529 #define THUMB_VARIANT & arm_ext_v6t2
19531 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19532 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19534 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
19535 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
19537 /* ARM V6 not included in V7M. */
19538 #undef THUMB_VARIANT
19539 #define THUMB_VARIANT & arm_ext_v6_notm
19540 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19541 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19542 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
19543 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
19544 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19545 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19546 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
19547 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19548 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
19549 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19550 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19551 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19552 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19553 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19554 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
19555 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
19556 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19557 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19558 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
19560 /* ARM V6 not included in V7M (eg. integer SIMD). */
19561 #undef THUMB_VARIANT
19562 #define THUMB_VARIANT & arm_ext_v6_dsp
19563 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19564 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19565 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19566 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19567 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19568 /* Old name for QASX. */
19569 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19570 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19571 /* Old name for QSAX. */
19572 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19573 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19574 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19575 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19576 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19577 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19578 /* Old name for SASX. */
19579 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19580 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19581 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19582 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19583 /* Old name for SHASX. */
19584 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19585 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19586 /* Old name for SHSAX. */
19587 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19588 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19589 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19590 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19591 /* Old name for SSAX. */
19592 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19593 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19594 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19595 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19596 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19597 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19598 /* Old name for UASX. */
19599 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19600 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19601 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19602 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19603 /* Old name for UHASX. */
19604 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19605 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19606 /* Old name for UHSAX. */
19607 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19608 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19609 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19610 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19611 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19612 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19613 /* Old name for UQASX. */
19614 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19615 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19616 /* Old name for UQSAX. */
19617 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19618 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19619 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19620 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19621 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19622 /* Old name for USAX. */
19623 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19624 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19625 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19626 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19627 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19628 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19629 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19630 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19631 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19632 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19633 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19634 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19635 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19636 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19637 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19638 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19639 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19640 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19641 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19642 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19643 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19644 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19645 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19646 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19647 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19648 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19649 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19650 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19651 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19652 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19653 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19654 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19655 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19656 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19659 #define ARM_VARIANT & arm_ext_v6k
19660 #undef THUMB_VARIANT
19661 #define THUMB_VARIANT & arm_ext_v6k
19663 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19664 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19665 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19666 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19668 #undef THUMB_VARIANT
19669 #define THUMB_VARIANT & arm_ext_v6_notm
19670 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19672 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19673 RRnpcb
), strexd
, t_strexd
),
19675 #undef THUMB_VARIANT
19676 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19677 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19679 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19681 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19683 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19685 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19688 #define ARM_VARIANT & arm_ext_sec
19689 #undef THUMB_VARIANT
19690 #define THUMB_VARIANT & arm_ext_sec
19692 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19695 #define ARM_VARIANT & arm_ext_virt
19696 #undef THUMB_VARIANT
19697 #define THUMB_VARIANT & arm_ext_virt
19699 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19700 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19703 #define ARM_VARIANT & arm_ext_pan
19704 #undef THUMB_VARIANT
19705 #define THUMB_VARIANT & arm_ext_pan
19707 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19710 #define ARM_VARIANT & arm_ext_v6t2
19711 #undef THUMB_VARIANT
19712 #define THUMB_VARIANT & arm_ext_v6t2
19714 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19715 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19716 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19717 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19719 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19720 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19722 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19723 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19724 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19725 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19727 #undef THUMB_VARIANT
19728 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19729 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19730 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19732 /* Thumb-only instructions. */
19734 #define ARM_VARIANT NULL
19735 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19736 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19738 /* ARM does not really have an IT instruction, so always allow it.
19739 The opcode is copied from Thumb in order to allow warnings in
19740 -mimplicit-it=[never | arm] modes. */
19742 #define ARM_VARIANT & arm_ext_v1
19743 #undef THUMB_VARIANT
19744 #define THUMB_VARIANT & arm_ext_v6t2
19746 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19747 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19748 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19749 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19750 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19751 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19752 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19753 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19754 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19755 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19756 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19757 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19758 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19759 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19760 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19761 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19762 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19763 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19765 /* Thumb2 only instructions. */
19767 #define ARM_VARIANT NULL
19769 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19770 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19771 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19772 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19773 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19774 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19776 /* Hardware division instructions. */
19778 #define ARM_VARIANT & arm_ext_adiv
19779 #undef THUMB_VARIANT
19780 #define THUMB_VARIANT & arm_ext_div
19782 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19783 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19785 /* ARM V6M/V7 instructions. */
19787 #define ARM_VARIANT & arm_ext_barrier
19788 #undef THUMB_VARIANT
19789 #define THUMB_VARIANT & arm_ext_barrier
19791 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19792 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19793 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19795 /* ARM V7 instructions. */
19797 #define ARM_VARIANT & arm_ext_v7
19798 #undef THUMB_VARIANT
19799 #define THUMB_VARIANT & arm_ext_v7
19801 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19802 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19805 #define ARM_VARIANT & arm_ext_mp
19806 #undef THUMB_VARIANT
19807 #define THUMB_VARIANT & arm_ext_mp
19809 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19811 /* AArchv8 instructions. */
19813 #define ARM_VARIANT & arm_ext_v8
19815 /* Instructions shared between armv8-a and armv8-m. */
19816 #undef THUMB_VARIANT
19817 #define THUMB_VARIANT & arm_ext_atomics
19819 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19820 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19821 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19822 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19823 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19824 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19825 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19826 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19827 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19828 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19830 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19832 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19834 #undef THUMB_VARIANT
19835 #define THUMB_VARIANT & arm_ext_v8
19837 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19838 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19839 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19841 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19843 /* ARMv8 T32 only. */
19845 #define ARM_VARIANT NULL
19846 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19847 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19848 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19850 /* FP for ARMv8. */
19852 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19853 #undef THUMB_VARIANT
19854 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19856 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19857 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19858 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19859 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19860 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19861 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19862 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19863 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19864 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19865 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19866 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19867 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19868 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19869 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19870 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19871 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19872 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19874 /* Crypto v1 extensions. */
19876 #define ARM_VARIANT & fpu_crypto_ext_armv8
19877 #undef THUMB_VARIANT
19878 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19880 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19881 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19882 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19883 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19884 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19885 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19886 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19887 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19888 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19889 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19890 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19891 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19892 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19893 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19896 #define ARM_VARIANT & crc_ext_armv8
19897 #undef THUMB_VARIANT
19898 #define THUMB_VARIANT & crc_ext_armv8
19899 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19900 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19901 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19902 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19903 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19904 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19906 /* ARMv8.2 RAS extension. */
19908 #define ARM_VARIANT & arm_ext_ras
19909 #undef THUMB_VARIANT
19910 #define THUMB_VARIANT & arm_ext_ras
19911 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
19914 #define ARM_VARIANT & arm_ext_v8_3
19915 #undef THUMB_VARIANT
19916 #define THUMB_VARIANT & arm_ext_v8_3
19917 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
19918 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
19919 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
19922 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19923 #undef THUMB_VARIANT
19924 #define THUMB_VARIANT NULL
19926 cCE("wfs", e200110
, 1, (RR
), rd
),
19927 cCE("rfs", e300110
, 1, (RR
), rd
),
19928 cCE("wfc", e400110
, 1, (RR
), rd
),
19929 cCE("rfc", e500110
, 1, (RR
), rd
),
19931 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19932 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19933 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19934 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19936 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19937 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19938 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19939 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19941 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19942 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19943 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19944 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19945 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19946 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19947 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19948 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19949 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19950 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19951 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19952 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19954 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19955 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19956 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19957 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19958 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19959 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19960 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19961 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19962 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19963 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19964 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19965 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19967 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19968 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19969 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19970 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19971 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19972 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19973 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19974 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19975 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19976 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19977 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19978 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19980 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19981 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19982 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19983 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19984 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19985 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19986 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19987 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19988 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19989 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19990 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19991 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19993 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19994 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19995 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19996 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19997 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19998 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19999 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
20000 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
20001 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
20002 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
20003 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
20004 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
20006 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
20007 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
20008 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
20009 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
20010 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
20011 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
20012 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
20013 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
20014 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
20015 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
20016 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
20017 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
20019 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
20020 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
20021 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
20022 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
20023 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
20024 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
20025 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
20026 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
20027 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
20028 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
20029 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
20030 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
20032 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
20033 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
20034 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
20035 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
20036 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
20037 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
20038 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
20039 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
20040 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
20041 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
20042 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
20043 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
20045 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
20046 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
20047 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
20048 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
20049 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
20050 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
20051 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
20052 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
20053 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
20054 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
20055 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
20056 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
20058 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
20059 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
20060 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
20061 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
20062 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
20063 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
20064 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
20065 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
20066 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
20067 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
20068 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
20069 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
20071 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
20072 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
20073 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
20074 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
20075 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
20076 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
20077 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
20078 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
20079 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
20080 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
20081 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
20082 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
20084 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
20085 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
20086 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
20087 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
20088 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
20089 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
20090 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
20091 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
20092 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
20093 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
20094 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
20095 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
20097 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
20098 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
20099 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
20100 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
20101 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
20102 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
20103 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
20104 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
20105 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
20106 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
20107 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
20108 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
20110 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
20111 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
20112 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
20113 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
20114 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
20115 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
20116 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
20117 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
20118 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
20119 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
20120 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
20121 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
20123 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
20124 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
20125 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
20126 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
20127 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
20128 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
20129 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
20130 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
20131 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
20132 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
20133 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
20134 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
20136 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
20137 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
20138 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
20139 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
20140 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
20141 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
20142 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
20143 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
20144 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
20145 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
20146 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
20147 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
20149 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20150 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20151 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20152 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20153 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20154 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20155 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20156 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20157 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20158 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20159 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20160 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20162 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20163 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20164 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20165 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20166 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20167 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20168 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20169 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20170 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20171 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20172 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20173 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20175 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20176 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20177 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20178 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20179 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20180 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20181 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20182 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20183 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20184 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20185 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20186 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20188 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20189 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20190 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20191 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20192 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20193 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20194 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20195 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20196 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20197 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20198 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20199 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20201 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20202 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20203 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20204 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20205 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20206 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20207 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20208 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20209 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20210 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20211 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20212 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20214 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20215 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20216 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20217 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20218 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20219 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20220 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20221 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20222 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20223 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20224 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20225 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20227 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20228 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20229 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20230 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20231 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20232 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20233 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20234 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20235 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20236 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20237 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20238 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20240 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20241 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20242 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20243 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20244 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20245 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20246 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20247 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20248 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20249 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20250 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20251 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20253 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20254 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20255 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20256 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20257 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20258 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20259 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20260 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20261 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20262 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20263 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20264 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20266 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20267 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20268 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20269 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20270 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20271 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20272 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20273 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20274 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20275 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20276 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20277 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20279 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20280 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20281 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20282 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20283 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20284 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20285 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20286 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20287 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20288 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20289 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20290 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20292 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20293 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20294 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20295 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20296 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20297 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20298 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20299 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20300 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20301 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20302 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20303 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20305 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20306 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20307 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20308 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20309 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20310 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20311 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20312 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20313 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20314 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20315 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20316 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20318 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20319 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20320 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20321 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20323 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
20324 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
20325 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
20326 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
20327 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
20328 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
20329 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
20330 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
20331 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
20332 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
20333 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
20334 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
20336 /* The implementation of the FIX instruction is broken on some
20337 assemblers, in that it accepts a precision specifier as well as a
20338 rounding specifier, despite the fact that this is meaningless.
20339 To be more compatible, we accept it as well, though of course it
20340 does not set any bits. */
20341 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
20342 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
20343 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
20344 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
20345 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
20346 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
20347 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
20348 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
20349 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
20350 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
20351 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
20352 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
20353 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
20355 /* Instructions that were new with the real FPA, call them V2. */
20357 #define ARM_VARIANT & fpu_fpa_ext_v2
20359 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20360 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20361 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20362 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20363 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20364 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20367 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20369 /* Moves and type conversions. */
20370 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20371 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
20372 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
20373 cCE("fmstat", ef1fa10
, 0, (), noargs
),
20374 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
20375 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
20376 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20377 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20378 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20379 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20380 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20381 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20382 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
20383 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
20385 /* Memory operations. */
20386 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20387 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20388 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20389 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20390 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20391 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20392 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20393 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20394 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20395 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20396 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20397 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20398 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20399 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20400 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20401 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20402 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20403 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20405 /* Monadic operations. */
20406 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20407 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20408 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20410 /* Dyadic operations. */
20411 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20412 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20413 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20414 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20415 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20416 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20417 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20418 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20419 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20422 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20423 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
20424 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20425 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
20427 /* Double precision load/store are still present on single precision
20428 implementations. */
20429 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20430 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20431 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20432 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20433 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20434 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20435 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20436 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20437 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20438 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20441 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20443 /* Moves and type conversions. */
20444 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20445 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20446 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20447 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20448 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20449 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20450 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20451 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20452 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20453 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20454 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20455 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20456 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20458 /* Monadic operations. */
20459 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20460 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20461 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20463 /* Dyadic operations. */
20464 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20465 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20466 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20467 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20468 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20469 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20470 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20471 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20472 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20475 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20476 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
20477 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20478 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
20481 #define ARM_VARIANT & fpu_vfp_ext_v2
20483 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
20484 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
20485 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
20486 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
20488 /* Instructions which may belong to either the Neon or VFP instruction sets.
20489 Individual encoder functions perform additional architecture checks. */
20491 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20492 #undef THUMB_VARIANT
20493 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20495 /* These mnemonics are unique to VFP. */
20496 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
20497 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
20498 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20499 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20500 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20501 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20502 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20503 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
20504 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
20505 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
20507 /* Mnemonics shared by Neon and VFP. */
20508 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
20509 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20510 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20512 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20513 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20515 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20516 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20518 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20519 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20520 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20521 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20522 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20523 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20524 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20525 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20527 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
20528 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
20529 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
20530 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
20533 /* NOTE: All VMOV encoding is special-cased! */
20534 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
20535 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
20538 #define ARM_VARIANT & arm_ext_fp16
20539 #undef THUMB_VARIANT
20540 #define THUMB_VARIANT & arm_ext_fp16
20541 /* New instructions added from v8.2, allowing the extraction and insertion of
20542 the upper 16 bits of a 32-bit vector register. */
20543 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
20544 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
20546 #undef THUMB_VARIANT
20547 #define THUMB_VARIANT & fpu_neon_ext_v1
20549 #define ARM_VARIANT & fpu_neon_ext_v1
20551 /* Data processing with three registers of the same length. */
20552 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20553 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
20554 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
20555 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20556 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20557 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20558 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20559 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20560 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20561 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20562 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20563 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20564 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20565 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20566 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20567 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20568 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20569 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20570 /* If not immediate, fall back to neon_dyadic_i64_su.
20571 shl_imm should accept I8 I16 I32 I64,
20572 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20573 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
20574 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
20575 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
20576 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
20577 /* Logic ops, types optional & ignored. */
20578 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20579 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20580 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20581 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20582 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20583 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20584 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20585 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20586 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20587 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20588 /* Bitfield ops, untyped. */
20589 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20590 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20591 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20592 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20593 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20594 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20595 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20596 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20597 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20598 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20599 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20600 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20601 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20602 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20603 back to neon_dyadic_if_su. */
20604 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20605 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20606 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20607 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20608 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20609 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20610 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20611 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20612 /* Comparison. Type I8 I16 I32 F32. */
20613 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20614 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20615 /* As above, D registers only. */
20616 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20617 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20618 /* Int and float variants, signedness unimportant. */
20619 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20620 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20621 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20622 /* Add/sub take types I8 I16 I32 I64 F32. */
20623 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20624 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20625 /* vtst takes sizes 8, 16, 32. */
20626 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20627 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20628 /* VMUL takes I8 I16 I32 F32 P8. */
20629 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20630 /* VQD{R}MULH takes S16 S32. */
20631 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20632 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20633 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20634 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20635 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20636 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20637 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20638 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20639 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20640 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20641 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20642 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20643 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20644 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20645 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20646 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20647 /* ARM v8.1 extension. */
20648 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20649 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20650 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20651 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20653 /* Two address, int/float. Types S8 S16 S32 F32. */
20654 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20655 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20657 /* Data processing with two registers and a shift amount. */
20658 /* Right shifts, and variants with rounding.
20659 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20660 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20661 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20662 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20663 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20664 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20665 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20666 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20667 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20668 /* Shift and insert. Sizes accepted 8 16 32 64. */
20669 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20670 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20671 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20672 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20673 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20674 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20675 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20676 /* Right shift immediate, saturating & narrowing, with rounding variants.
20677 Types accepted S16 S32 S64 U16 U32 U64. */
20678 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20679 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20680 /* As above, unsigned. Types accepted S16 S32 S64. */
20681 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20682 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20683 /* Right shift narrowing. Types accepted I16 I32 I64. */
20684 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20685 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20686 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20687 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20688 /* CVT with optional immediate for fixed-point variant. */
20689 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20691 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20692 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20694 /* Data processing, three registers of different lengths. */
20695 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20696 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20697 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20698 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20699 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20700 /* If not scalar, fall back to neon_dyadic_long.
20701 Vector types as above, scalar types S16 S32 U16 U32. */
20702 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20703 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20704 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20705 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20706 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20707 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20708 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20709 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20710 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20711 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20712 /* Saturating doubling multiplies. Types S16 S32. */
20713 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20714 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20715 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20716 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20717 S16 S32 U16 U32. */
20718 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20720 /* Extract. Size 8. */
20721 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20722 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20724 /* Two registers, miscellaneous. */
20725 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20726 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20727 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20728 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20729 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20730 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20731 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20732 /* Vector replicate. Sizes 8 16 32. */
20733 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20734 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20735 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20736 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20737 /* VMOVN. Types I16 I32 I64. */
20738 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20739 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20740 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20741 /* VQMOVUN. Types S16 S32 S64. */
20742 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20743 /* VZIP / VUZP. Sizes 8 16 32. */
20744 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20745 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20746 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20747 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20748 /* VQABS / VQNEG. Types S8 S16 S32. */
20749 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20750 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20751 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20752 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20753 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20754 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20755 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20756 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20757 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20758 /* Reciprocal estimates. Types U32 F16 F32. */
20759 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20760 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20761 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20762 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20763 /* VCLS. Types S8 S16 S32. */
20764 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20765 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20766 /* VCLZ. Types I8 I16 I32. */
20767 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20768 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20769 /* VCNT. Size 8. */
20770 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20771 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20772 /* Two address, untyped. */
20773 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20774 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20775 /* VTRN. Sizes 8 16 32. */
20776 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20777 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20779 /* Table lookup. Size 8. */
20780 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20781 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20783 #undef THUMB_VARIANT
20784 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20786 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20788 /* Neon element/structure load/store. */
20789 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20790 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20791 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20792 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20793 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20794 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20795 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20796 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20798 #undef THUMB_VARIANT
20799 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20801 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20802 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20803 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20804 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20805 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20806 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20807 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20808 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20809 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20810 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20812 #undef THUMB_VARIANT
20813 #define THUMB_VARIANT & fpu_vfp_ext_v3
20815 #define ARM_VARIANT & fpu_vfp_ext_v3
20817 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20818 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20819 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20820 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20821 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20822 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20823 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20824 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20825 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20828 #define ARM_VARIANT & fpu_vfp_ext_fma
20829 #undef THUMB_VARIANT
20830 #define THUMB_VARIANT & fpu_vfp_ext_fma
20831 /* Mnemonics shared by Neon and VFP. These are included in the
20832 VFP FMA variant; NEON and VFP FMA always includes the NEON
20833 FMA instructions. */
20834 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20835 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20836 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20837 the v form should always be used. */
20838 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20839 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20840 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20841 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20842 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20843 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20845 #undef THUMB_VARIANT
20847 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20849 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20850 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20851 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20852 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20853 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20854 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20855 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20856 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20859 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20861 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20862 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20863 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20864 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20865 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20866 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20867 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20868 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20869 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20870 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20871 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20872 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20873 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20874 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20875 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20876 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20877 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20878 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20879 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20880 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20881 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20882 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20883 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20884 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20885 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20886 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20887 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20888 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20889 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20890 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20891 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20892 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20893 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20894 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20895 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20896 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20897 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20898 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20899 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20900 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20901 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20902 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20903 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20904 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20905 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20906 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20907 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20908 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20909 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20910 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20911 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20912 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20913 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20914 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20915 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20916 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20917 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20918 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20919 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20920 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20921 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20922 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20923 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20924 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20925 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20926 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20927 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20928 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20929 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20930 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20931 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20932 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20933 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20934 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20935 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20936 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20937 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20938 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20939 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20940 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20941 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20942 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20943 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20944 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20945 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20946 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20947 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20948 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20949 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20950 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20951 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20952 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20953 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20954 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20955 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20956 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20957 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20958 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20959 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20960 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20961 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20962 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20963 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20964 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20965 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20966 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20967 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20968 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20969 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20970 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20971 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20972 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20973 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20974 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20975 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20976 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20977 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20978 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20979 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20980 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20981 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20982 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20983 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20984 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20985 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20986 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20987 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20988 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20989 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20990 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20991 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20992 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20993 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20994 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20995 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20996 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20997 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20998 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20999 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21000 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21001 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21002 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21003 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21004 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21005 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21006 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21007 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21008 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21009 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21010 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21011 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21012 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21013 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21014 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21015 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21016 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21017 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21018 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21019 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21020 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21021 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21022 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
21025 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21027 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
21028 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
21029 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
21030 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21031 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21032 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21033 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21034 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21035 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21036 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21037 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21038 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21039 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21040 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21041 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21042 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21043 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21044 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21045 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21046 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21047 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
21048 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21049 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21050 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21051 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21052 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21053 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21054 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21055 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21056 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21057 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21058 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21059 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21060 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21061 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21062 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21063 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21064 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21065 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21066 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21067 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21068 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21069 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21070 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21071 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21072 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21073 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21074 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21075 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21076 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21077 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21078 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21079 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21080 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21081 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21082 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21083 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21086 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21088 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21089 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21090 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21091 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21092 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21093 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21094 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21095 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21096 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
21097 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
21098 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
21099 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
21100 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
21101 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
21102 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
21103 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
21104 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
21105 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
21106 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
21107 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
21108 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
21109 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
21110 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
21111 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
21112 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
21113 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
21114 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
21115 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
21116 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
21117 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
21118 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
21119 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
21120 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
21121 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
21122 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
21123 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
21124 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
21125 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
21126 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
21127 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
21128 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
21129 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
21130 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
21131 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
21132 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
21133 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
21134 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
21135 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
21136 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
21137 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
21138 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
21139 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
21140 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
21141 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
21142 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21143 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21144 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21145 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21146 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21147 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21148 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
21149 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
21150 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
21151 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
21152 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21153 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21154 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21155 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21156 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21157 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21158 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21159 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21160 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21161 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21162 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21163 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21165 /* ARMv8-M instructions. */
21167 #define ARM_VARIANT NULL
21168 #undef THUMB_VARIANT
21169 #define THUMB_VARIANT & arm_ext_v8m
21170 TUE("sg", 0, e97fe97f
, 0, (), 0, noargs
),
21171 TUE("blxns", 0, 4784, 1, (RRnpc
), 0, t_blx
),
21172 TUE("bxns", 0, 4704, 1, (RRnpc
), 0, t_bx
),
21173 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
21174 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
21175 TUE("tta", 0, e840f080
, 2, (RRnpc
, RRnpc
), 0, tt
),
21176 TUE("ttat", 0, e840f0c0
, 2, (RRnpc
, RRnpc
), 0, tt
),
21178 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21179 instructions behave as nop if no VFP is present. */
21180 #undef THUMB_VARIANT
21181 #define THUMB_VARIANT & arm_ext_v8m_main
21182 TUEc("vlldm", 0, ec300a00
, 1, (RRnpc
), rn
),
21183 TUEc("vlstm", 0, ec200a00
, 1, (RRnpc
), rn
),
21186 #undef THUMB_VARIANT
21212 /* MD interface: bits in the object file. */
21214 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21215 for use in the a.out file, and stores them in the array pointed to by buf.
21216 This knows about the endian-ness of the target machine and does
21217 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21218 2 (short) and 4 (long) Floating numbers are put out as a series of
21219 LITTLENUMS (shorts, here at least). */
21222 md_number_to_chars (char * buf
, valueT val
, int n
)
21224 if (target_big_endian
)
21225 number_to_chars_bigendian (buf
, val
, n
);
21227 number_to_chars_littleendian (buf
, val
, n
);
21231 md_chars_to_number (char * buf
, int n
)
21234 unsigned char * where
= (unsigned char *) buf
;
21236 if (target_big_endian
)
21241 result
|= (*where
++ & 255);
21249 result
|= (where
[n
] & 255);
21256 /* MD interface: Sections. */
21258 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21259 that an rs_machine_dependent frag may reach. */
21262 arm_frag_max_var (fragS
*fragp
)
21264 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21265 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21267 Note that we generate relaxable instructions even for cases that don't
21268 really need it, like an immediate that's a trivial constant. So we're
21269 overestimating the instruction size for some of those cases. Rather
21270 than putting more intelligence here, it would probably be better to
21271 avoid generating a relaxation frag in the first place when it can be
21272 determined up front that a short instruction will suffice. */
21274 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
21278 /* Estimate the size of a frag before relaxing. Assume everything fits in
21282 md_estimate_size_before_relax (fragS
* fragp
,
21283 segT segtype ATTRIBUTE_UNUSED
)
21289 /* Convert a machine dependent frag. */
21292 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
21294 unsigned long insn
;
21295 unsigned long old_op
;
21303 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21305 old_op
= bfd_get_16(abfd
, buf
);
21306 if (fragp
->fr_symbol
)
21308 exp
.X_op
= O_symbol
;
21309 exp
.X_add_symbol
= fragp
->fr_symbol
;
21313 exp
.X_op
= O_constant
;
21315 exp
.X_add_number
= fragp
->fr_offset
;
21316 opcode
= fragp
->fr_subtype
;
21319 case T_MNEM_ldr_pc
:
21320 case T_MNEM_ldr_pc2
:
21321 case T_MNEM_ldr_sp
:
21322 case T_MNEM_str_sp
:
21329 if (fragp
->fr_var
== 4)
21331 insn
= THUMB_OP32 (opcode
);
21332 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
21334 insn
|= (old_op
& 0x700) << 4;
21338 insn
|= (old_op
& 7) << 12;
21339 insn
|= (old_op
& 0x38) << 13;
21341 insn
|= 0x00000c00;
21342 put_thumb32_insn (buf
, insn
);
21343 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
21347 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
21349 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
21352 if (fragp
->fr_var
== 4)
21354 insn
= THUMB_OP32 (opcode
);
21355 insn
|= (old_op
& 0xf0) << 4;
21356 put_thumb32_insn (buf
, insn
);
21357 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
21361 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21362 exp
.X_add_number
-= 4;
21370 if (fragp
->fr_var
== 4)
21372 int r0off
= (opcode
== T_MNEM_mov
21373 || opcode
== T_MNEM_movs
) ? 0 : 8;
21374 insn
= THUMB_OP32 (opcode
);
21375 insn
= (insn
& 0xe1ffffff) | 0x10000000;
21376 insn
|= (old_op
& 0x700) << r0off
;
21377 put_thumb32_insn (buf
, insn
);
21378 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21382 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
21387 if (fragp
->fr_var
== 4)
21389 insn
= THUMB_OP32(opcode
);
21390 put_thumb32_insn (buf
, insn
);
21391 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
21394 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
21398 if (fragp
->fr_var
== 4)
21400 insn
= THUMB_OP32(opcode
);
21401 insn
|= (old_op
& 0xf00) << 14;
21402 put_thumb32_insn (buf
, insn
);
21403 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
21406 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
21409 case T_MNEM_add_sp
:
21410 case T_MNEM_add_pc
:
21411 case T_MNEM_inc_sp
:
21412 case T_MNEM_dec_sp
:
21413 if (fragp
->fr_var
== 4)
21415 /* ??? Choose between add and addw. */
21416 insn
= THUMB_OP32 (opcode
);
21417 insn
|= (old_op
& 0xf0) << 4;
21418 put_thumb32_insn (buf
, insn
);
21419 if (opcode
== T_MNEM_add_pc
)
21420 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
21422 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21425 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21433 if (fragp
->fr_var
== 4)
21435 insn
= THUMB_OP32 (opcode
);
21436 insn
|= (old_op
& 0xf0) << 4;
21437 insn
|= (old_op
& 0xf) << 16;
21438 put_thumb32_insn (buf
, insn
);
21439 if (insn
& (1 << 20))
21440 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21442 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21445 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21451 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
21452 (enum bfd_reloc_code_real
) reloc_type
);
21453 fixp
->fx_file
= fragp
->fr_file
;
21454 fixp
->fx_line
= fragp
->fr_line
;
21455 fragp
->fr_fix
+= fragp
->fr_var
;
21457 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21458 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
21459 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
21460 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
21463 /* Return the size of a relaxable immediate operand instruction.
21464 SHIFT and SIZE specify the form of the allowable immediate. */
21466 relax_immediate (fragS
*fragp
, int size
, int shift
)
21472 /* ??? Should be able to do better than this. */
21473 if (fragp
->fr_symbol
)
21476 low
= (1 << shift
) - 1;
21477 mask
= (1 << (shift
+ size
)) - (1 << shift
);
21478 offset
= fragp
->fr_offset
;
21479 /* Force misaligned offsets to 32-bit variant. */
21482 if (offset
& ~mask
)
21487 /* Get the address of a symbol during relaxation. */
21489 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
21495 sym
= fragp
->fr_symbol
;
21496 sym_frag
= symbol_get_frag (sym
);
21497 know (S_GET_SEGMENT (sym
) != absolute_section
21498 || sym_frag
== &zero_address_frag
);
21499 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
21501 /* If frag has yet to be reached on this pass, assume it will
21502 move by STRETCH just as we did. If this is not so, it will
21503 be because some frag between grows, and that will force
21507 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
21511 /* Adjust stretch for any alignment frag. Note that if have
21512 been expanding the earlier code, the symbol may be
21513 defined in what appears to be an earlier frag. FIXME:
21514 This doesn't handle the fr_subtype field, which specifies
21515 a maximum number of bytes to skip when doing an
21517 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
21519 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
21522 stretch
= - ((- stretch
)
21523 & ~ ((1 << (int) f
->fr_offset
) - 1));
21525 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
21537 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21540 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
21545 /* Assume worst case for symbols not known to be in the same section. */
21546 if (fragp
->fr_symbol
== NULL
21547 || !S_IS_DEFINED (fragp
->fr_symbol
)
21548 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21549 || S_IS_WEAK (fragp
->fr_symbol
))
21552 val
= relaxed_symbol_addr (fragp
, stretch
);
21553 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
21554 addr
= (addr
+ 4) & ~3;
21555 /* Force misaligned targets to 32-bit variant. */
21559 if (val
< 0 || val
> 1020)
21564 /* Return the size of a relaxable add/sub immediate instruction. */
21566 relax_addsub (fragS
*fragp
, asection
*sec
)
21571 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21572 op
= bfd_get_16(sec
->owner
, buf
);
21573 if ((op
& 0xf) == ((op
>> 4) & 0xf))
21574 return relax_immediate (fragp
, 8, 0);
21576 return relax_immediate (fragp
, 3, 0);
21579 /* Return TRUE iff the definition of symbol S could be pre-empted
21580 (overridden) at link or load time. */
21582 symbol_preemptible (symbolS
*s
)
21584 /* Weak symbols can always be pre-empted. */
21588 /* Non-global symbols cannot be pre-empted. */
21589 if (! S_IS_EXTERNAL (s
))
21593 /* In ELF, a global symbol can be marked protected, or private. In that
21594 case it can't be pre-empted (other definitions in the same link unit
21595 would violate the ODR). */
21596 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21600 /* Other global symbols might be pre-empted. */
21604 /* Return the size of a relaxable branch instruction. BITS is the
21605 size of the offset field in the narrow instruction. */
21608 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21614 /* Assume worst case for symbols not known to be in the same section. */
21615 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21616 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21617 || S_IS_WEAK (fragp
->fr_symbol
))
21621 /* A branch to a function in ARM state will require interworking. */
21622 if (S_IS_DEFINED (fragp
->fr_symbol
)
21623 && ARM_IS_FUNC (fragp
->fr_symbol
))
21627 if (symbol_preemptible (fragp
->fr_symbol
))
21630 val
= relaxed_symbol_addr (fragp
, stretch
);
21631 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21634 /* Offset is a signed value *2 */
21636 if (val
>= limit
|| val
< -limit
)
21642 /* Relax a machine dependent frag. This returns the amount by which
21643 the current size of the frag should change. */
21646 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21651 oldsize
= fragp
->fr_var
;
21652 switch (fragp
->fr_subtype
)
21654 case T_MNEM_ldr_pc2
:
21655 newsize
= relax_adr (fragp
, sec
, stretch
);
21657 case T_MNEM_ldr_pc
:
21658 case T_MNEM_ldr_sp
:
21659 case T_MNEM_str_sp
:
21660 newsize
= relax_immediate (fragp
, 8, 2);
21664 newsize
= relax_immediate (fragp
, 5, 2);
21668 newsize
= relax_immediate (fragp
, 5, 1);
21672 newsize
= relax_immediate (fragp
, 5, 0);
21675 newsize
= relax_adr (fragp
, sec
, stretch
);
21681 newsize
= relax_immediate (fragp
, 8, 0);
21684 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21687 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21689 case T_MNEM_add_sp
:
21690 case T_MNEM_add_pc
:
21691 newsize
= relax_immediate (fragp
, 8, 2);
21693 case T_MNEM_inc_sp
:
21694 case T_MNEM_dec_sp
:
21695 newsize
= relax_immediate (fragp
, 7, 2);
21701 newsize
= relax_addsub (fragp
, sec
);
21707 fragp
->fr_var
= newsize
;
21708 /* Freeze wide instructions that are at or before the same location as
21709 in the previous pass. This avoids infinite loops.
21710 Don't freeze them unconditionally because targets may be artificially
21711 misaligned by the expansion of preceding frags. */
21712 if (stretch
<= 0 && newsize
> 2)
21714 md_convert_frag (sec
->owner
, sec
, fragp
);
21718 return newsize
- oldsize
;
21721 /* Round up a section size to the appropriate boundary. */
21724 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21727 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21728 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21730 /* For a.out, force the section size to be aligned. If we don't do
21731 this, BFD will align it for us, but it will not write out the
21732 final bytes of the section. This may be a bug in BFD, but it is
21733 easier to fix it here since that is how the other a.out targets
21737 align
= bfd_get_section_alignment (stdoutput
, segment
);
21738 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21745 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21746 of an rs_align_code fragment. */
21749 arm_handle_align (fragS
* fragP
)
21751 static unsigned char const arm_noop
[2][2][4] =
21754 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21755 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21758 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21759 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21762 static unsigned char const thumb_noop
[2][2][2] =
21765 {0xc0, 0x46}, /* LE */
21766 {0x46, 0xc0}, /* BE */
21769 {0x00, 0xbf}, /* LE */
21770 {0xbf, 0x00} /* BE */
21773 static unsigned char const wide_thumb_noop
[2][4] =
21774 { /* Wide Thumb-2 */
21775 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21776 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21779 unsigned bytes
, fix
, noop_size
;
21781 const unsigned char * noop
;
21782 const unsigned char *narrow_noop
= NULL
;
21787 if (fragP
->fr_type
!= rs_align_code
)
21790 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21791 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21794 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21795 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21797 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21799 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21801 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21802 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21804 narrow_noop
= thumb_noop
[1][target_big_endian
];
21805 noop
= wide_thumb_noop
[target_big_endian
];
21808 noop
= thumb_noop
[0][target_big_endian
];
21816 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21817 ? selected_cpu
: arm_arch_none
,
21819 [target_big_endian
];
21826 fragP
->fr_var
= noop_size
;
21828 if (bytes
& (noop_size
- 1))
21830 fix
= bytes
& (noop_size
- 1);
21832 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21834 memset (p
, 0, fix
);
21841 if (bytes
& noop_size
)
21843 /* Insert a narrow noop. */
21844 memcpy (p
, narrow_noop
, noop_size
);
21846 bytes
-= noop_size
;
21850 /* Use wide noops for the remainder */
21854 while (bytes
>= noop_size
)
21856 memcpy (p
, noop
, noop_size
);
21858 bytes
-= noop_size
;
21862 fragP
->fr_fix
+= fix
;
21865 /* Called from md_do_align. Used to create an alignment
21866 frag in a code section. */
21869 arm_frag_align_code (int n
, int max
)
21873 /* We assume that there will never be a requirement
21874 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21875 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21880 _("alignments greater than %d bytes not supported in .text sections."),
21881 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21882 as_fatal ("%s", err_msg
);
21885 p
= frag_var (rs_align_code
,
21886 MAX_MEM_FOR_RS_ALIGN_CODE
,
21888 (relax_substateT
) max
,
21895 /* Perform target specific initialisation of a frag.
21896 Note - despite the name this initialisation is not done when the frag
21897 is created, but only when its type is assigned. A frag can be created
21898 and used a long time before its type is set, so beware of assuming that
21899 this initialisation is performed first. */
21903 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21905 /* Record whether this frag is in an ARM or a THUMB area. */
21906 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21909 #else /* OBJ_ELF is defined. */
21911 arm_init_frag (fragS
* fragP
, int max_chars
)
21913 int frag_thumb_mode
;
21915 /* If the current ARM vs THUMB mode has not already
21916 been recorded into this frag then do so now. */
21917 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21918 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21920 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
21922 /* Record a mapping symbol for alignment frags. We will delete this
21923 later if the alignment ends up empty. */
21924 switch (fragP
->fr_type
)
21927 case rs_align_test
:
21929 mapping_state_2 (MAP_DATA
, max_chars
);
21931 case rs_align_code
:
21932 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21939 /* When we change sections we need to issue a new mapping symbol. */
21942 arm_elf_change_section (void)
21944 /* Link an unlinked unwind index table section to the .text section. */
21945 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21946 && elf_linked_to_section (now_seg
) == NULL
)
21947 elf_linked_to_section (now_seg
) = text_section
;
21951 arm_elf_section_type (const char * str
, size_t len
)
21953 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21954 return SHT_ARM_EXIDX
;
21959 /* Code to deal with unwinding tables. */
21961 static void add_unwind_adjustsp (offsetT
);
21963 /* Generate any deferred unwind frame offset. */
21966 flush_pending_unwind (void)
21970 offset
= unwind
.pending_offset
;
21971 unwind
.pending_offset
= 0;
21973 add_unwind_adjustsp (offset
);
21976 /* Add an opcode to this list for this function. Two-byte opcodes should
21977 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21981 add_unwind_opcode (valueT op
, int length
)
21983 /* Add any deferred stack adjustment. */
21984 if (unwind
.pending_offset
)
21985 flush_pending_unwind ();
21987 unwind
.sp_restored
= 0;
21989 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21991 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21992 if (unwind
.opcodes
)
21993 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
21994 unwind
.opcode_alloc
);
21996 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
22001 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
22003 unwind
.opcode_count
++;
22007 /* Add unwind opcodes to adjust the stack pointer. */
22010 add_unwind_adjustsp (offsetT offset
)
22014 if (offset
> 0x200)
22016 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22021 /* Long form: 0xb2, uleb128. */
22022 /* This might not fit in a word so add the individual bytes,
22023 remembering the list is built in reverse order. */
22024 o
= (valueT
) ((offset
- 0x204) >> 2);
22026 add_unwind_opcode (0, 1);
22028 /* Calculate the uleb128 encoding of the offset. */
22032 bytes
[n
] = o
& 0x7f;
22038 /* Add the insn. */
22040 add_unwind_opcode (bytes
[n
- 1], 1);
22041 add_unwind_opcode (0xb2, 1);
22043 else if (offset
> 0x100)
22045 /* Two short opcodes. */
22046 add_unwind_opcode (0x3f, 1);
22047 op
= (offset
- 0x104) >> 2;
22048 add_unwind_opcode (op
, 1);
22050 else if (offset
> 0)
22052 /* Short opcode. */
22053 op
= (offset
- 4) >> 2;
22054 add_unwind_opcode (op
, 1);
22056 else if (offset
< 0)
22059 while (offset
> 0x100)
22061 add_unwind_opcode (0x7f, 1);
22064 op
= ((offset
- 4) >> 2) | 0x40;
22065 add_unwind_opcode (op
, 1);
22069 /* Finish the list of unwind opcodes for this function. */
22071 finish_unwind_opcodes (void)
22075 if (unwind
.fp_used
)
22077 /* Adjust sp as necessary. */
22078 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
22079 flush_pending_unwind ();
22081 /* After restoring sp from the frame pointer. */
22082 op
= 0x90 | unwind
.fp_reg
;
22083 add_unwind_opcode (op
, 1);
22086 flush_pending_unwind ();
22090 /* Start an exception table entry. If idx is nonzero this is an index table
22094 start_unwind_section (const segT text_seg
, int idx
)
22096 const char * text_name
;
22097 const char * prefix
;
22098 const char * prefix_once
;
22099 const char * group_name
;
22107 prefix
= ELF_STRING_ARM_unwind
;
22108 prefix_once
= ELF_STRING_ARM_unwind_once
;
22109 type
= SHT_ARM_EXIDX
;
22113 prefix
= ELF_STRING_ARM_unwind_info
;
22114 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
22115 type
= SHT_PROGBITS
;
22118 text_name
= segment_name (text_seg
);
22119 if (streq (text_name
, ".text"))
22122 if (strncmp (text_name
, ".gnu.linkonce.t.",
22123 strlen (".gnu.linkonce.t.")) == 0)
22125 prefix
= prefix_once
;
22126 text_name
+= strlen (".gnu.linkonce.t.");
22129 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
22135 /* Handle COMDAT group. */
22136 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
22138 group_name
= elf_group_name (text_seg
);
22139 if (group_name
== NULL
)
22141 as_bad (_("Group section `%s' has no group signature"),
22142 segment_name (text_seg
));
22143 ignore_rest_of_line ();
22146 flags
|= SHF_GROUP
;
22150 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
22153 /* Set the section link for index tables. */
22155 elf_linked_to_section (now_seg
) = text_seg
;
22159 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22160 personality routine data. Returns zero, or the index table value for
22161 an inline entry. */
22164 create_unwind_entry (int have_data
)
22169 /* The current word of data. */
22171 /* The number of bytes left in this word. */
22174 finish_unwind_opcodes ();
22176 /* Remember the current text section. */
22177 unwind
.saved_seg
= now_seg
;
22178 unwind
.saved_subseg
= now_subseg
;
22180 start_unwind_section (now_seg
, 0);
22182 if (unwind
.personality_routine
== NULL
)
22184 if (unwind
.personality_index
== -2)
22187 as_bad (_("handlerdata in cantunwind frame"));
22188 return 1; /* EXIDX_CANTUNWIND. */
22191 /* Use a default personality routine if none is specified. */
22192 if (unwind
.personality_index
== -1)
22194 if (unwind
.opcode_count
> 3)
22195 unwind
.personality_index
= 1;
22197 unwind
.personality_index
= 0;
22200 /* Space for the personality routine entry. */
22201 if (unwind
.personality_index
== 0)
22203 if (unwind
.opcode_count
> 3)
22204 as_bad (_("too many unwind opcodes for personality routine 0"));
22208 /* All the data is inline in the index table. */
22211 while (unwind
.opcode_count
> 0)
22213 unwind
.opcode_count
--;
22214 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22218 /* Pad with "finish" opcodes. */
22220 data
= (data
<< 8) | 0xb0;
22227 /* We get two opcodes "free" in the first word. */
22228 size
= unwind
.opcode_count
- 2;
22232 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22233 if (unwind
.personality_index
!= -1)
22235 as_bad (_("attempt to recreate an unwind entry"));
22239 /* An extra byte is required for the opcode count. */
22240 size
= unwind
.opcode_count
+ 1;
22243 size
= (size
+ 3) >> 2;
22245 as_bad (_("too many unwind opcodes"));
22247 frag_align (2, 0, 0);
22248 record_alignment (now_seg
, 2);
22249 unwind
.table_entry
= expr_build_dot ();
22251 /* Allocate the table entry. */
22252 ptr
= frag_more ((size
<< 2) + 4);
22253 /* PR 13449: Zero the table entries in case some of them are not used. */
22254 memset (ptr
, 0, (size
<< 2) + 4);
22255 where
= frag_now_fix () - ((size
<< 2) + 4);
22257 switch (unwind
.personality_index
)
22260 /* ??? Should this be a PLT generating relocation? */
22261 /* Custom personality routine. */
22262 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
22263 BFD_RELOC_ARM_PREL31
);
22268 /* Set the first byte to the number of additional words. */
22269 data
= size
> 0 ? size
- 1 : 0;
22273 /* ABI defined personality routines. */
22275 /* Three opcodes bytes are packed into the first word. */
22282 /* The size and first two opcode bytes go in the first word. */
22283 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
22288 /* Should never happen. */
22292 /* Pack the opcodes into words (MSB first), reversing the list at the same
22294 while (unwind
.opcode_count
> 0)
22298 md_number_to_chars (ptr
, data
, 4);
22303 unwind
.opcode_count
--;
22305 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22308 /* Finish off the last word. */
22311 /* Pad with "finish" opcodes. */
22313 data
= (data
<< 8) | 0xb0;
22315 md_number_to_chars (ptr
, data
, 4);
22320 /* Add an empty descriptor if there is no user-specified data. */
22321 ptr
= frag_more (4);
22322 md_number_to_chars (ptr
, 0, 4);
22329 /* Initialize the DWARF-2 unwind information for this procedure. */
22332 tc_arm_frame_initial_instructions (void)
22334 cfi_add_CFA_def_cfa (REG_SP
, 0);
22336 #endif /* OBJ_ELF */
22338 /* Convert REGNAME to a DWARF-2 register number. */
22341 tc_arm_regname_to_dw2regnum (char *regname
)
22343 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
22347 /* PR 16694: Allow VFP registers as well. */
22348 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
22352 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
22361 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
22365 exp
.X_op
= O_secrel
;
22366 exp
.X_add_symbol
= symbol
;
22367 exp
.X_add_number
= 0;
22368 emit_expr (&exp
, size
);
22372 /* MD interface: Symbol and relocation handling. */
22374 /* Return the address within the segment that a PC-relative fixup is
22375 relative to. For ARM, PC-relative fixups applied to instructions
22376 are generally relative to the location of the fixup plus 8 bytes.
22377 Thumb branches are offset by 4, and Thumb loads relative to PC
22378 require special handling. */
22381 md_pcrel_from_section (fixS
* fixP
, segT seg
)
22383 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22385 /* If this is pc-relative and we are going to emit a relocation
22386 then we just want to put out any pipeline compensation that the linker
22387 will need. Otherwise we want to use the calculated base.
22388 For WinCE we skip the bias for externals as well, since this
22389 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22391 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22392 || (arm_force_relocation (fixP
)
22394 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
22400 switch (fixP
->fx_r_type
)
22402 /* PC relative addressing on the Thumb is slightly odd as the
22403 bottom two bits of the PC are forced to zero for the
22404 calculation. This happens *after* application of the
22405 pipeline offset. However, Thumb adrl already adjusts for
22406 this, so we need not do it again. */
22407 case BFD_RELOC_ARM_THUMB_ADD
:
22410 case BFD_RELOC_ARM_THUMB_OFFSET
:
22411 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22412 case BFD_RELOC_ARM_T32_ADD_PC12
:
22413 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
22414 return (base
+ 4) & ~3;
22416 /* Thumb branches are simply offset by +4. */
22417 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
22418 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
22419 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
22420 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22421 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
22424 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22426 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22427 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22428 && ARM_IS_FUNC (fixP
->fx_addsy
)
22429 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22430 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22433 /* BLX is like branches above, but forces the low two bits of PC to
22435 case BFD_RELOC_THUMB_PCREL_BLX
:
22437 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22438 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22439 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22440 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22441 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22442 return (base
+ 4) & ~3;
22444 /* ARM mode branches are offset by +8. However, the Windows CE
22445 loader expects the relocation not to take this into account. */
22446 case BFD_RELOC_ARM_PCREL_BLX
:
22448 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22449 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22450 && ARM_IS_FUNC (fixP
->fx_addsy
)
22451 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22452 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22455 case BFD_RELOC_ARM_PCREL_CALL
:
22457 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22458 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22459 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22460 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22461 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22464 case BFD_RELOC_ARM_PCREL_BRANCH
:
22465 case BFD_RELOC_ARM_PCREL_JUMP
:
22466 case BFD_RELOC_ARM_PLT32
:
22468 /* When handling fixups immediately, because we have already
22469 discovered the value of a symbol, or the address of the frag involved
22470 we must account for the offset by +8, as the OS loader will never see the reloc.
22471 see fixup_segment() in write.c
22472 The S_IS_EXTERNAL test handles the case of global symbols.
22473 Those need the calculated base, not just the pipe compensation the linker will need. */
22475 && fixP
->fx_addsy
!= NULL
22476 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22477 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
22485 /* ARM mode loads relative to PC are also offset by +8. Unlike
22486 branches, the Windows CE loader *does* expect the relocation
22487 to take this into account. */
22488 case BFD_RELOC_ARM_OFFSET_IMM
:
22489 case BFD_RELOC_ARM_OFFSET_IMM8
:
22490 case BFD_RELOC_ARM_HWLITERAL
:
22491 case BFD_RELOC_ARM_LITERAL
:
22492 case BFD_RELOC_ARM_CP_OFF_IMM
:
22496 /* Other PC-relative relocations are un-offset. */
22502 static bfd_boolean flag_warn_syms
= TRUE
;
22505 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
22507 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22508 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22509 does mean that the resulting code might be very confusing to the reader.
22510 Also this warning can be triggered if the user omits an operand before
22511 an immediate address, eg:
22515 GAS treats this as an assignment of the value of the symbol foo to a
22516 symbol LDR, and so (without this code) it will not issue any kind of
22517 warning or error message.
22519 Note - ARM instructions are case-insensitive but the strings in the hash
22520 table are all stored in lower case, so we must first ensure that name is
22522 if (flag_warn_syms
&& arm_ops_hsh
)
22524 char * nbuf
= strdup (name
);
22527 for (p
= nbuf
; *p
; p
++)
22529 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
22531 static struct hash_control
* already_warned
= NULL
;
22533 if (already_warned
== NULL
)
22534 already_warned
= hash_new ();
22535 /* Only warn about the symbol once. To keep the code
22536 simple we let hash_insert do the lookup for us. */
22537 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
22538 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
22547 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22548 Otherwise we have no need to default values of symbols. */
22551 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
22554 if (name
[0] == '_' && name
[1] == 'G'
22555 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
22559 if (symbol_find (name
))
22560 as_bad (_("GOT already in the symbol table"));
22562 GOT_symbol
= symbol_new (name
, undefined_section
,
22563 (valueT
) 0, & zero_address_frag
);
22573 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22574 computed as two separate immediate values, added together. We
22575 already know that this value cannot be computed by just one ARM
22578 static unsigned int
22579 validate_immediate_twopart (unsigned int val
,
22580 unsigned int * highpart
)
22585 for (i
= 0; i
< 32; i
+= 2)
22586 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22592 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22594 else if (a
& 0xff0000)
22596 if (a
& 0xff000000)
22598 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22602 gas_assert (a
& 0xff000000);
22603 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22606 return (a
& 0xff) | (i
<< 7);
22613 validate_offset_imm (unsigned int val
, int hwse
)
22615 if ((hwse
&& val
> 255) || val
> 4095)
22620 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22621 negative immediate constant by altering the instruction. A bit of
22626 by inverting the second operand, and
22629 by negating the second operand. */
22632 negate_data_op (unsigned long * instruction
,
22633 unsigned long value
)
22636 unsigned long negated
, inverted
;
22638 negated
= encode_arm_immediate (-value
);
22639 inverted
= encode_arm_immediate (~value
);
22641 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22644 /* First negates. */
22645 case OPCODE_SUB
: /* ADD <-> SUB */
22646 new_inst
= OPCODE_ADD
;
22651 new_inst
= OPCODE_SUB
;
22655 case OPCODE_CMP
: /* CMP <-> CMN */
22656 new_inst
= OPCODE_CMN
;
22661 new_inst
= OPCODE_CMP
;
22665 /* Now Inverted ops. */
22666 case OPCODE_MOV
: /* MOV <-> MVN */
22667 new_inst
= OPCODE_MVN
;
22672 new_inst
= OPCODE_MOV
;
22676 case OPCODE_AND
: /* AND <-> BIC */
22677 new_inst
= OPCODE_BIC
;
22682 new_inst
= OPCODE_AND
;
22686 case OPCODE_ADC
: /* ADC <-> SBC */
22687 new_inst
= OPCODE_SBC
;
22692 new_inst
= OPCODE_ADC
;
22696 /* We cannot do anything. */
22701 if (value
== (unsigned) FAIL
)
22704 *instruction
&= OPCODE_MASK
;
22705 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22709 /* Like negate_data_op, but for Thumb-2. */
22711 static unsigned int
22712 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22716 unsigned int negated
, inverted
;
22718 negated
= encode_thumb32_immediate (-value
);
22719 inverted
= encode_thumb32_immediate (~value
);
22721 rd
= (*instruction
>> 8) & 0xf;
22722 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22725 /* ADD <-> SUB. Includes CMP <-> CMN. */
22726 case T2_OPCODE_SUB
:
22727 new_inst
= T2_OPCODE_ADD
;
22731 case T2_OPCODE_ADD
:
22732 new_inst
= T2_OPCODE_SUB
;
22736 /* ORR <-> ORN. Includes MOV <-> MVN. */
22737 case T2_OPCODE_ORR
:
22738 new_inst
= T2_OPCODE_ORN
;
22742 case T2_OPCODE_ORN
:
22743 new_inst
= T2_OPCODE_ORR
;
22747 /* AND <-> BIC. TST has no inverted equivalent. */
22748 case T2_OPCODE_AND
:
22749 new_inst
= T2_OPCODE_BIC
;
22756 case T2_OPCODE_BIC
:
22757 new_inst
= T2_OPCODE_AND
;
22762 case T2_OPCODE_ADC
:
22763 new_inst
= T2_OPCODE_SBC
;
22767 case T2_OPCODE_SBC
:
22768 new_inst
= T2_OPCODE_ADC
;
22772 /* We cannot do anything. */
22777 if (value
== (unsigned int)FAIL
)
22780 *instruction
&= T2_OPCODE_MASK
;
22781 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22785 /* Read a 32-bit thumb instruction from buf. */
22786 static unsigned long
22787 get_thumb32_insn (char * buf
)
22789 unsigned long insn
;
22790 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22791 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22797 /* We usually want to set the low bit on the address of thumb function
22798 symbols. In particular .word foo - . should have the low bit set.
22799 Generic code tries to fold the difference of two symbols to
22800 a constant. Prevent this and force a relocation when the first symbols
22801 is a thumb function. */
22804 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22806 if (op
== O_subtract
22807 && l
->X_op
== O_symbol
22808 && r
->X_op
== O_symbol
22809 && THUMB_IS_FUNC (l
->X_add_symbol
))
22811 l
->X_op
= O_subtract
;
22812 l
->X_op_symbol
= r
->X_add_symbol
;
22813 l
->X_add_number
-= r
->X_add_number
;
22817 /* Process as normal. */
22821 /* Encode Thumb2 unconditional branches and calls. The encoding
22822 for the 2 are identical for the immediate values. */
22825 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22827 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22830 addressT S
, I1
, I2
, lo
, hi
;
22832 S
= (value
>> 24) & 0x01;
22833 I1
= (value
>> 23) & 0x01;
22834 I2
= (value
>> 22) & 0x01;
22835 hi
= (value
>> 12) & 0x3ff;
22836 lo
= (value
>> 1) & 0x7ff;
22837 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22838 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22839 newval
|= (S
<< 10) | hi
;
22840 newval2
&= ~T2I1I2MASK
;
22841 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22842 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22843 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22847 md_apply_fix (fixS
* fixP
,
22851 offsetT value
= * valP
;
22853 unsigned int newimm
;
22854 unsigned long temp
;
22856 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22858 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22860 /* Note whether this will delete the relocation. */
22862 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22865 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22866 consistency with the behaviour on 32-bit hosts. Remember value
22868 value
&= 0xffffffff;
22869 value
^= 0x80000000;
22870 value
-= 0x80000000;
22873 fixP
->fx_addnumber
= value
;
22875 /* Same treatment for fixP->fx_offset. */
22876 fixP
->fx_offset
&= 0xffffffff;
22877 fixP
->fx_offset
^= 0x80000000;
22878 fixP
->fx_offset
-= 0x80000000;
22880 switch (fixP
->fx_r_type
)
22882 case BFD_RELOC_NONE
:
22883 /* This will need to go in the object file. */
22887 case BFD_RELOC_ARM_IMMEDIATE
:
22888 /* We claim that this fixup has been processed here,
22889 even if in fact we generate an error because we do
22890 not have a reloc for it, so tc_gen_reloc will reject it. */
22893 if (fixP
->fx_addsy
)
22895 const char *msg
= 0;
22897 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22898 msg
= _("undefined symbol %s used as an immediate value");
22899 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22900 msg
= _("symbol %s is in a different section");
22901 else if (S_IS_WEAK (fixP
->fx_addsy
))
22902 msg
= _("symbol %s is weak and may be overridden later");
22906 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22907 msg
, S_GET_NAME (fixP
->fx_addsy
));
22912 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22914 /* If the offset is negative, we should use encoding A2 for ADR. */
22915 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22916 newimm
= negate_data_op (&temp
, value
);
22919 newimm
= encode_arm_immediate (value
);
22921 /* If the instruction will fail, see if we can fix things up by
22922 changing the opcode. */
22923 if (newimm
== (unsigned int) FAIL
)
22924 newimm
= negate_data_op (&temp
, value
);
22925 /* MOV accepts both ARM modified immediate (A1 encoding) and
22926 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
22927 When disassembling, MOV is preferred when there is no encoding
22929 if (newimm
== (unsigned int) FAIL
22930 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
22931 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
22932 && !((temp
>> SBIT_SHIFT
) & 0x1)
22933 && value
>= 0 && value
<= 0xffff)
22935 /* Clear bits[23:20] to change encoding from A1 to A2. */
22936 temp
&= 0xff0fffff;
22937 /* Encoding high 4bits imm. Code below will encode the remaining
22939 temp
|= (value
& 0x0000f000) << 4;
22940 newimm
= value
& 0x00000fff;
22944 if (newimm
== (unsigned int) FAIL
)
22946 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22947 _("invalid constant (%lx) after fixup"),
22948 (unsigned long) value
);
22952 newimm
|= (temp
& 0xfffff000);
22953 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22956 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22958 unsigned int highpart
= 0;
22959 unsigned int newinsn
= 0xe1a00000; /* nop. */
22961 if (fixP
->fx_addsy
)
22963 const char *msg
= 0;
22965 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22966 msg
= _("undefined symbol %s used as an immediate value");
22967 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22968 msg
= _("symbol %s is in a different section");
22969 else if (S_IS_WEAK (fixP
->fx_addsy
))
22970 msg
= _("symbol %s is weak and may be overridden later");
22974 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22975 msg
, S_GET_NAME (fixP
->fx_addsy
));
22980 newimm
= encode_arm_immediate (value
);
22981 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22983 /* If the instruction will fail, see if we can fix things up by
22984 changing the opcode. */
22985 if (newimm
== (unsigned int) FAIL
22986 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22988 /* No ? OK - try using two ADD instructions to generate
22990 newimm
= validate_immediate_twopart (value
, & highpart
);
22992 /* Yes - then make sure that the second instruction is
22994 if (newimm
!= (unsigned int) FAIL
)
22996 /* Still No ? Try using a negated value. */
22997 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22998 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22999 /* Otherwise - give up. */
23002 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23003 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23008 /* Replace the first operand in the 2nd instruction (which
23009 is the PC) with the destination register. We have
23010 already added in the PC in the first instruction and we
23011 do not want to do it again. */
23012 newinsn
&= ~ 0xf0000;
23013 newinsn
|= ((newinsn
& 0x0f000) << 4);
23016 newimm
|= (temp
& 0xfffff000);
23017 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23019 highpart
|= (newinsn
& 0xfffff000);
23020 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
23024 case BFD_RELOC_ARM_OFFSET_IMM
:
23025 if (!fixP
->fx_done
&& seg
->use_rela_p
)
23027 /* Fall through. */
23029 case BFD_RELOC_ARM_LITERAL
:
23035 if (validate_offset_imm (value
, 0) == FAIL
)
23037 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
23038 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23039 _("invalid literal constant: pool needs to be closer"));
23041 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23042 _("bad immediate value for offset (%ld)"),
23047 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23049 newval
&= 0xfffff000;
23052 newval
&= 0xff7ff000;
23053 newval
|= value
| (sign
? INDEX_UP
: 0);
23055 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23058 case BFD_RELOC_ARM_OFFSET_IMM8
:
23059 case BFD_RELOC_ARM_HWLITERAL
:
23065 if (validate_offset_imm (value
, 1) == FAIL
)
23067 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
23068 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23069 _("invalid literal constant: pool needs to be closer"));
23071 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23072 _("bad immediate value for 8-bit offset (%ld)"),
23077 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23079 newval
&= 0xfffff0f0;
23082 newval
&= 0xff7ff0f0;
23083 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
23085 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23088 case BFD_RELOC_ARM_T32_OFFSET_U8
:
23089 if (value
< 0 || value
> 1020 || value
% 4 != 0)
23090 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23091 _("bad immediate value for offset (%ld)"), (long) value
);
23094 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
23096 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
23099 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
23100 /* This is a complicated relocation used for all varieties of Thumb32
23101 load/store instruction with immediate offset:
23103 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23104 *4, optional writeback(W)
23105 (doubleword load/store)
23107 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23108 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23109 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23110 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23111 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23113 Uppercase letters indicate bits that are already encoded at
23114 this point. Lowercase letters are our problem. For the
23115 second block of instructions, the secondary opcode nybble
23116 (bits 8..11) is present, and bit 23 is zero, even if this is
23117 a PC-relative operation. */
23118 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23120 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
23122 if ((newval
& 0xf0000000) == 0xe0000000)
23124 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23126 newval
|= (1 << 23);
23129 if (value
% 4 != 0)
23131 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23132 _("offset not a multiple of 4"));
23138 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23139 _("offset out of range"));
23144 else if ((newval
& 0x000f0000) == 0x000f0000)
23146 /* PC-relative, 12-bit offset. */
23148 newval
|= (1 << 23);
23153 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23154 _("offset out of range"));
23159 else if ((newval
& 0x00000100) == 0x00000100)
23161 /* Writeback: 8-bit, +/- offset. */
23163 newval
|= (1 << 9);
23168 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23169 _("offset out of range"));
23174 else if ((newval
& 0x00000f00) == 0x00000e00)
23176 /* T-instruction: positive 8-bit offset. */
23177 if (value
< 0 || value
> 0xff)
23179 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23180 _("offset out of range"));
23188 /* Positive 12-bit or negative 8-bit offset. */
23192 newval
|= (1 << 23);
23202 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23203 _("offset out of range"));
23210 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
23211 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
23214 case BFD_RELOC_ARM_SHIFT_IMM
:
23215 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23216 if (((unsigned long) value
) > 32
23218 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
23220 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23221 _("shift expression is too large"));
23226 /* Shifts of zero must be done as lsl. */
23228 else if (value
== 32)
23230 newval
&= 0xfffff07f;
23231 newval
|= (value
& 0x1f) << 7;
23232 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23235 case BFD_RELOC_ARM_T32_IMMEDIATE
:
23236 case BFD_RELOC_ARM_T32_ADD_IMM
:
23237 case BFD_RELOC_ARM_T32_IMM12
:
23238 case BFD_RELOC_ARM_T32_ADD_PC12
:
23239 /* We claim that this fixup has been processed here,
23240 even if in fact we generate an error because we do
23241 not have a reloc for it, so tc_gen_reloc will reject it. */
23245 && ! S_IS_DEFINED (fixP
->fx_addsy
))
23247 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23248 _("undefined symbol %s used as an immediate value"),
23249 S_GET_NAME (fixP
->fx_addsy
));
23253 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23255 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
23258 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
23259 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23260 Thumb2 modified immediate encoding (T2). */
23261 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
23262 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23264 newimm
= encode_thumb32_immediate (value
);
23265 if (newimm
== (unsigned int) FAIL
)
23266 newimm
= thumb32_negate_data_op (&newval
, value
);
23268 if (newimm
== (unsigned int) FAIL
)
23270 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
23272 /* Turn add/sum into addw/subw. */
23273 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23274 newval
= (newval
& 0xfeffffff) | 0x02000000;
23275 /* No flat 12-bit imm encoding for addsw/subsw. */
23276 if ((newval
& 0x00100000) == 0)
23278 /* 12 bit immediate for addw/subw. */
23282 newval
^= 0x00a00000;
23285 newimm
= (unsigned int) FAIL
;
23292 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23293 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23294 disassembling, MOV is preferred when there is no encoding
23296 NOTE: MOV is using ORR opcode under Thumb 2 mode. */
23297 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
23298 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
23299 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
23300 && value
>= 0 && value
<=0xffff)
23302 /* Toggle bit[25] to change encoding from T2 to T3. */
23304 /* Clear bits[19:16]. */
23305 newval
&= 0xfff0ffff;
23306 /* Encoding high 4bits imm. Code below will encode the
23307 remaining low 12bits. */
23308 newval
|= (value
& 0x0000f000) << 4;
23309 newimm
= value
& 0x00000fff;
23314 if (newimm
== (unsigned int)FAIL
)
23316 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23317 _("invalid constant (%lx) after fixup"),
23318 (unsigned long) value
);
23322 newval
|= (newimm
& 0x800) << 15;
23323 newval
|= (newimm
& 0x700) << 4;
23324 newval
|= (newimm
& 0x0ff);
23326 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
23327 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
23330 case BFD_RELOC_ARM_SMC
:
23331 if (((unsigned long) value
) > 0xffff)
23332 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23333 _("invalid smc expression"));
23334 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23335 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23336 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23339 case BFD_RELOC_ARM_HVC
:
23340 if (((unsigned long) value
) > 0xffff)
23341 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23342 _("invalid hvc expression"));
23343 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23344 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23345 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23348 case BFD_RELOC_ARM_SWI
:
23349 if (fixP
->tc_fix_data
!= 0)
23351 if (((unsigned long) value
) > 0xff)
23352 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23353 _("invalid swi expression"));
23354 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23356 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23360 if (((unsigned long) value
) > 0x00ffffff)
23361 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23362 _("invalid swi expression"));
23363 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23365 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23369 case BFD_RELOC_ARM_MULTI
:
23370 if (((unsigned long) value
) > 0xffff)
23371 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23372 _("invalid expression in load/store multiple"));
23373 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
23374 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23378 case BFD_RELOC_ARM_PCREL_CALL
:
23380 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23382 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23383 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23384 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23385 /* Flip the bl to blx. This is a simple flip
23386 bit here because we generate PCREL_CALL for
23387 unconditional bls. */
23389 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23390 newval
= newval
| 0x10000000;
23391 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23397 goto arm_branch_common
;
23399 case BFD_RELOC_ARM_PCREL_JUMP
:
23400 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23402 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23403 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23404 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23406 /* This would map to a bl<cond>, b<cond>,
23407 b<always> to a Thumb function. We
23408 need to force a relocation for this particular
23410 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23413 /* Fall through. */
23415 case BFD_RELOC_ARM_PLT32
:
23417 case BFD_RELOC_ARM_PCREL_BRANCH
:
23419 goto arm_branch_common
;
23421 case BFD_RELOC_ARM_PCREL_BLX
:
23424 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23426 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23427 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23428 && ARM_IS_FUNC (fixP
->fx_addsy
))
23430 /* Flip the blx to a bl and warn. */
23431 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23432 newval
= 0xeb000000;
23433 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23434 _("blx to '%s' an ARM ISA state function changed to bl"),
23436 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23442 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23443 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
23447 /* We are going to store value (shifted right by two) in the
23448 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23449 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23450 also be be clear. */
23452 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23453 _("misaligned branch destination"));
23454 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
23455 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
23456 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23458 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23460 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23461 newval
|= (value
>> 2) & 0x00ffffff;
23462 /* Set the H bit on BLX instructions. */
23466 newval
|= 0x01000000;
23468 newval
&= ~0x01000000;
23470 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23474 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
23475 /* CBZ can only branch forward. */
23477 /* Attempts to use CBZ to branch to the next instruction
23478 (which, strictly speaking, are prohibited) will be turned into
23481 FIXME: It may be better to remove the instruction completely and
23482 perform relaxation. */
23485 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23486 newval
= 0xbf00; /* NOP encoding T1 */
23487 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23492 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23494 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23496 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23497 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
23498 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23503 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
23504 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
23505 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23507 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23509 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23510 newval
|= (value
& 0x1ff) >> 1;
23511 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23515 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
23516 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
23517 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23519 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23521 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23522 newval
|= (value
& 0xfff) >> 1;
23523 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23527 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23529 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23530 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23531 && ARM_IS_FUNC (fixP
->fx_addsy
)
23532 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23534 /* Force a relocation for a branch 20 bits wide. */
23537 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
23538 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23539 _("conditional branch out of range"));
23541 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23544 addressT S
, J1
, J2
, lo
, hi
;
23546 S
= (value
& 0x00100000) >> 20;
23547 J2
= (value
& 0x00080000) >> 19;
23548 J1
= (value
& 0x00040000) >> 18;
23549 hi
= (value
& 0x0003f000) >> 12;
23550 lo
= (value
& 0x00000ffe) >> 1;
23552 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23553 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23554 newval
|= (S
<< 10) | hi
;
23555 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
23556 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23557 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23561 case BFD_RELOC_THUMB_PCREL_BLX
:
23562 /* If there is a blx from a thumb state function to
23563 another thumb function flip this to a bl and warn
23567 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23568 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23569 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23571 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23572 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23573 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23575 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23576 newval
= newval
| 0x1000;
23577 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23578 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23583 goto thumb_bl_common
;
23585 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23586 /* A bl from Thumb state ISA to an internal ARM state function
23587 is converted to a blx. */
23589 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23590 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23591 && ARM_IS_FUNC (fixP
->fx_addsy
)
23592 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23594 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23595 newval
= newval
& ~0x1000;
23596 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23597 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
23603 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23604 /* For a BLX instruction, make sure that the relocation is rounded up
23605 to a word boundary. This follows the semantics of the instruction
23606 which specifies that bit 1 of the target address will come from bit
23607 1 of the base address. */
23608 value
= (value
+ 3) & ~ 3;
23611 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
23612 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23613 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23616 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
23618 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
23619 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23620 else if ((value
& ~0x1ffffff)
23621 && ((value
& ~0x1ffffff) != ~0x1ffffff))
23622 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23623 _("Thumb2 branch out of range"));
23626 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23627 encode_thumb2_b_bl_offset (buf
, value
);
23631 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23632 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
23633 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23635 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23636 encode_thumb2_b_bl_offset (buf
, value
);
23641 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23646 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23647 md_number_to_chars (buf
, value
, 2);
23651 case BFD_RELOC_ARM_TLS_CALL
:
23652 case BFD_RELOC_ARM_THM_TLS_CALL
:
23653 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23654 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23655 case BFD_RELOC_ARM_TLS_GOTDESC
:
23656 case BFD_RELOC_ARM_TLS_GD32
:
23657 case BFD_RELOC_ARM_TLS_LE32
:
23658 case BFD_RELOC_ARM_TLS_IE32
:
23659 case BFD_RELOC_ARM_TLS_LDM32
:
23660 case BFD_RELOC_ARM_TLS_LDO32
:
23661 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23664 case BFD_RELOC_ARM_GOT32
:
23665 case BFD_RELOC_ARM_GOTOFF
:
23668 case BFD_RELOC_ARM_GOT_PREL
:
23669 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23670 md_number_to_chars (buf
, value
, 4);
23673 case BFD_RELOC_ARM_TARGET2
:
23674 /* TARGET2 is not partial-inplace, so we need to write the
23675 addend here for REL targets, because it won't be written out
23676 during reloc processing later. */
23677 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23678 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23682 case BFD_RELOC_RVA
:
23684 case BFD_RELOC_ARM_TARGET1
:
23685 case BFD_RELOC_ARM_ROSEGREL32
:
23686 case BFD_RELOC_ARM_SBREL32
:
23687 case BFD_RELOC_32_PCREL
:
23689 case BFD_RELOC_32_SECREL
:
23691 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23693 /* For WinCE we only do this for pcrel fixups. */
23694 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23696 md_number_to_chars (buf
, value
, 4);
23700 case BFD_RELOC_ARM_PREL31
:
23701 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23703 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23704 if ((value
^ (value
>> 1)) & 0x40000000)
23706 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23707 _("rel31 relocation overflow"));
23709 newval
|= value
& 0x7fffffff;
23710 md_number_to_chars (buf
, newval
, 4);
23715 case BFD_RELOC_ARM_CP_OFF_IMM
:
23716 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23717 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
23718 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23720 newval
= get_thumb32_insn (buf
);
23721 if ((newval
& 0x0f200f00) == 0x0d000900)
23723 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23724 has permitted values that are multiples of 2, in the range 0
23726 if (value
< -510 || value
> 510 || (value
& 1))
23727 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23728 _("co-processor offset out of range"));
23730 else if (value
< -1023 || value
> 1023 || (value
& 3))
23731 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23732 _("co-processor offset out of range"));
23737 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23738 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23739 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23741 newval
= get_thumb32_insn (buf
);
23743 newval
&= 0xffffff00;
23746 newval
&= 0xff7fff00;
23747 if ((newval
& 0x0f200f00) == 0x0d000900)
23749 /* This is a fp16 vstr/vldr.
23751 It requires the immediate offset in the instruction is shifted
23752 left by 1 to be a half-word offset.
23754 Here, left shift by 1 first, and later right shift by 2
23755 should get the right offset. */
23758 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23760 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23761 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23762 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23764 put_thumb32_insn (buf
, newval
);
23767 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23768 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23769 if (value
< -255 || value
> 255)
23770 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23771 _("co-processor offset out of range"));
23773 goto cp_off_common
;
23775 case BFD_RELOC_ARM_THUMB_OFFSET
:
23776 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23777 /* Exactly what ranges, and where the offset is inserted depends
23778 on the type of instruction, we can establish this from the
23780 switch (newval
>> 12)
23782 case 4: /* PC load. */
23783 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23784 forced to zero for these loads; md_pcrel_from has already
23785 compensated for this. */
23787 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23788 _("invalid offset, target not word aligned (0x%08lX)"),
23789 (((unsigned long) fixP
->fx_frag
->fr_address
23790 + (unsigned long) fixP
->fx_where
) & ~3)
23791 + (unsigned long) value
);
23793 if (value
& ~0x3fc)
23794 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23795 _("invalid offset, value too big (0x%08lX)"),
23798 newval
|= value
>> 2;
23801 case 9: /* SP load/store. */
23802 if (value
& ~0x3fc)
23803 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23804 _("invalid offset, value too big (0x%08lX)"),
23806 newval
|= value
>> 2;
23809 case 6: /* Word load/store. */
23811 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23812 _("invalid offset, value too big (0x%08lX)"),
23814 newval
|= value
<< 4; /* 6 - 2. */
23817 case 7: /* Byte load/store. */
23819 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23820 _("invalid offset, value too big (0x%08lX)"),
23822 newval
|= value
<< 6;
23825 case 8: /* Halfword load/store. */
23827 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23828 _("invalid offset, value too big (0x%08lX)"),
23830 newval
|= value
<< 5; /* 6 - 1. */
23834 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23835 "Unable to process relocation for thumb opcode: %lx",
23836 (unsigned long) newval
);
23839 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23842 case BFD_RELOC_ARM_THUMB_ADD
:
23843 /* This is a complicated relocation, since we use it for all of
23844 the following immediate relocations:
23848 9bit ADD/SUB SP word-aligned
23849 10bit ADD PC/SP word-aligned
23851 The type of instruction being processed is encoded in the
23858 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23860 int rd
= (newval
>> 4) & 0xf;
23861 int rs
= newval
& 0xf;
23862 int subtract
= !!(newval
& 0x8000);
23864 /* Check for HI regs, only very restricted cases allowed:
23865 Adjusting SP, and using PC or SP to get an address. */
23866 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23867 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23868 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23869 _("invalid Hi register with immediate"));
23871 /* If value is negative, choose the opposite instruction. */
23875 subtract
= !subtract
;
23877 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23878 _("immediate value out of range"));
23883 if (value
& ~0x1fc)
23884 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23885 _("invalid immediate for stack address calculation"));
23886 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23887 newval
|= value
>> 2;
23889 else if (rs
== REG_PC
|| rs
== REG_SP
)
23891 /* PR gas/18541. If the addition is for a defined symbol
23892 within range of an ADR instruction then accept it. */
23895 && fixP
->fx_addsy
!= NULL
)
23899 if (! S_IS_DEFINED (fixP
->fx_addsy
)
23900 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
23901 || S_IS_WEAK (fixP
->fx_addsy
))
23903 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23904 _("address calculation needs a strongly defined nearby symbol"));
23908 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23910 /* Round up to the next 4-byte boundary. */
23915 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
23919 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23920 _("symbol too far away"));
23930 if (subtract
|| value
& ~0x3fc)
23931 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23932 _("invalid immediate for address calculation (value = 0x%08lX)"),
23933 (unsigned long) (subtract
? - value
: value
));
23934 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
23936 newval
|= value
>> 2;
23941 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23942 _("immediate value out of range"));
23943 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
23944 newval
|= (rd
<< 8) | value
;
23949 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23950 _("immediate value out of range"));
23951 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
23952 newval
|= rd
| (rs
<< 3) | (value
<< 6);
23955 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23958 case BFD_RELOC_ARM_THUMB_IMM
:
23959 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23960 if (value
< 0 || value
> 255)
23961 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23962 _("invalid immediate: %ld is out of range"),
23965 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23968 case BFD_RELOC_ARM_THUMB_SHIFT
:
23969 /* 5bit shift value (0..32). LSL cannot take 32. */
23970 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
23971 temp
= newval
& 0xf800;
23972 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
23973 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23974 _("invalid shift value: %ld"), (long) value
);
23975 /* Shifts of zero must be encoded as LSL. */
23977 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
23978 /* Shifts of 32 are encoded as zero. */
23979 else if (value
== 32)
23981 newval
|= value
<< 6;
23982 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23985 case BFD_RELOC_VTABLE_INHERIT
:
23986 case BFD_RELOC_VTABLE_ENTRY
:
23990 case BFD_RELOC_ARM_MOVW
:
23991 case BFD_RELOC_ARM_MOVT
:
23992 case BFD_RELOC_ARM_THUMB_MOVW
:
23993 case BFD_RELOC_ARM_THUMB_MOVT
:
23994 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23996 /* REL format relocations are limited to a 16-bit addend. */
23997 if (!fixP
->fx_done
)
23999 if (value
< -0x8000 || value
> 0x7fff)
24000 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24001 _("offset out of range"));
24003 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24004 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24009 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24010 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24012 newval
= get_thumb32_insn (buf
);
24013 newval
&= 0xfbf08f00;
24014 newval
|= (value
& 0xf000) << 4;
24015 newval
|= (value
& 0x0800) << 15;
24016 newval
|= (value
& 0x0700) << 4;
24017 newval
|= (value
& 0x00ff);
24018 put_thumb32_insn (buf
, newval
);
24022 newval
= md_chars_to_number (buf
, 4);
24023 newval
&= 0xfff0f000;
24024 newval
|= value
& 0x0fff;
24025 newval
|= (value
& 0xf000) << 4;
24026 md_number_to_chars (buf
, newval
, 4);
24031 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24032 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24033 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24034 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24035 gas_assert (!fixP
->fx_done
);
24038 bfd_boolean is_mov
;
24039 bfd_vma encoded_addend
= value
;
24041 /* Check that addend can be encoded in instruction. */
24042 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
24043 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24044 _("the offset 0x%08lX is not representable"),
24045 (unsigned long) encoded_addend
);
24047 /* Extract the instruction. */
24048 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
24049 is_mov
= (insn
& 0xf800) == 0x2000;
24054 if (!seg
->use_rela_p
)
24055 insn
|= encoded_addend
;
24061 /* Extract the instruction. */
24062 /* Encoding is the following
24067 /* The following conditions must be true :
24072 rd
= (insn
>> 4) & 0xf;
24074 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
24075 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24076 _("Unable to process relocation for thumb opcode: %lx"),
24077 (unsigned long) insn
);
24079 /* Encode as ADD immediate8 thumb 1 code. */
24080 insn
= 0x3000 | (rd
<< 8);
24082 /* Place the encoded addend into the first 8 bits of the
24084 if (!seg
->use_rela_p
)
24085 insn
|= encoded_addend
;
24088 /* Update the instruction. */
24089 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
24093 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
24094 case BFD_RELOC_ARM_ALU_PC_G0
:
24095 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
24096 case BFD_RELOC_ARM_ALU_PC_G1
:
24097 case BFD_RELOC_ARM_ALU_PC_G2
:
24098 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
24099 case BFD_RELOC_ARM_ALU_SB_G0
:
24100 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
24101 case BFD_RELOC_ARM_ALU_SB_G1
:
24102 case BFD_RELOC_ARM_ALU_SB_G2
:
24103 gas_assert (!fixP
->fx_done
);
24104 if (!seg
->use_rela_p
)
24107 bfd_vma encoded_addend
;
24108 bfd_vma addend_abs
= abs (value
);
24110 /* Check that the absolute value of the addend can be
24111 expressed as an 8-bit constant plus a rotation. */
24112 encoded_addend
= encode_arm_immediate (addend_abs
);
24113 if (encoded_addend
== (unsigned int) FAIL
)
24114 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24115 _("the offset 0x%08lX is not representable"),
24116 (unsigned long) addend_abs
);
24118 /* Extract the instruction. */
24119 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24121 /* If the addend is positive, use an ADD instruction.
24122 Otherwise use a SUB. Take care not to destroy the S bit. */
24123 insn
&= 0xff1fffff;
24129 /* Place the encoded addend into the first 12 bits of the
24131 insn
&= 0xfffff000;
24132 insn
|= encoded_addend
;
24134 /* Update the instruction. */
24135 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24139 case BFD_RELOC_ARM_LDR_PC_G0
:
24140 case BFD_RELOC_ARM_LDR_PC_G1
:
24141 case BFD_RELOC_ARM_LDR_PC_G2
:
24142 case BFD_RELOC_ARM_LDR_SB_G0
:
24143 case BFD_RELOC_ARM_LDR_SB_G1
:
24144 case BFD_RELOC_ARM_LDR_SB_G2
:
24145 gas_assert (!fixP
->fx_done
);
24146 if (!seg
->use_rela_p
)
24149 bfd_vma addend_abs
= abs (value
);
24151 /* Check that the absolute value of the addend can be
24152 encoded in 12 bits. */
24153 if (addend_abs
>= 0x1000)
24154 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24155 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24156 (unsigned long) addend_abs
);
24158 /* Extract the instruction. */
24159 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24161 /* If the addend is negative, clear bit 23 of the instruction.
24162 Otherwise set it. */
24164 insn
&= ~(1 << 23);
24168 /* Place the absolute value of the addend into the first 12 bits
24169 of the instruction. */
24170 insn
&= 0xfffff000;
24171 insn
|= addend_abs
;
24173 /* Update the instruction. */
24174 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24178 case BFD_RELOC_ARM_LDRS_PC_G0
:
24179 case BFD_RELOC_ARM_LDRS_PC_G1
:
24180 case BFD_RELOC_ARM_LDRS_PC_G2
:
24181 case BFD_RELOC_ARM_LDRS_SB_G0
:
24182 case BFD_RELOC_ARM_LDRS_SB_G1
:
24183 case BFD_RELOC_ARM_LDRS_SB_G2
:
24184 gas_assert (!fixP
->fx_done
);
24185 if (!seg
->use_rela_p
)
24188 bfd_vma addend_abs
= abs (value
);
24190 /* Check that the absolute value of the addend can be
24191 encoded in 8 bits. */
24192 if (addend_abs
>= 0x100)
24193 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24194 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24195 (unsigned long) addend_abs
);
24197 /* Extract the instruction. */
24198 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24200 /* If the addend is negative, clear bit 23 of the instruction.
24201 Otherwise set it. */
24203 insn
&= ~(1 << 23);
24207 /* Place the first four bits of the absolute value of the addend
24208 into the first 4 bits of the instruction, and the remaining
24209 four into bits 8 .. 11. */
24210 insn
&= 0xfffff0f0;
24211 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
24213 /* Update the instruction. */
24214 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24218 case BFD_RELOC_ARM_LDC_PC_G0
:
24219 case BFD_RELOC_ARM_LDC_PC_G1
:
24220 case BFD_RELOC_ARM_LDC_PC_G2
:
24221 case BFD_RELOC_ARM_LDC_SB_G0
:
24222 case BFD_RELOC_ARM_LDC_SB_G1
:
24223 case BFD_RELOC_ARM_LDC_SB_G2
:
24224 gas_assert (!fixP
->fx_done
);
24225 if (!seg
->use_rela_p
)
24228 bfd_vma addend_abs
= abs (value
);
24230 /* Check that the absolute value of the addend is a multiple of
24231 four and, when divided by four, fits in 8 bits. */
24232 if (addend_abs
& 0x3)
24233 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24234 _("bad offset 0x%08lX (must be word-aligned)"),
24235 (unsigned long) addend_abs
);
24237 if ((addend_abs
>> 2) > 0xff)
24238 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24239 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24240 (unsigned long) addend_abs
);
24242 /* Extract the instruction. */
24243 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24245 /* If the addend is negative, clear bit 23 of the instruction.
24246 Otherwise set it. */
24248 insn
&= ~(1 << 23);
24252 /* Place the addend (divided by four) into the first eight
24253 bits of the instruction. */
24254 insn
&= 0xfffffff0;
24255 insn
|= addend_abs
>> 2;
24257 /* Update the instruction. */
24258 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24262 case BFD_RELOC_ARM_V4BX
:
24263 /* This will need to go in the object file. */
24267 case BFD_RELOC_UNUSED
:
24269 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24270 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
24274 /* Translate internal representation of relocation info to BFD target
24278 tc_gen_reloc (asection
*section
, fixS
*fixp
)
24281 bfd_reloc_code_real_type code
;
24283 reloc
= XNEW (arelent
);
24285 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
24286 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
24287 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
24289 if (fixp
->fx_pcrel
)
24291 if (section
->use_rela_p
)
24292 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
24294 fixp
->fx_offset
= reloc
->address
;
24296 reloc
->addend
= fixp
->fx_offset
;
24298 switch (fixp
->fx_r_type
)
24301 if (fixp
->fx_pcrel
)
24303 code
= BFD_RELOC_8_PCREL
;
24306 /* Fall through. */
24309 if (fixp
->fx_pcrel
)
24311 code
= BFD_RELOC_16_PCREL
;
24314 /* Fall through. */
24317 if (fixp
->fx_pcrel
)
24319 code
= BFD_RELOC_32_PCREL
;
24322 /* Fall through. */
24324 case BFD_RELOC_ARM_MOVW
:
24325 if (fixp
->fx_pcrel
)
24327 code
= BFD_RELOC_ARM_MOVW_PCREL
;
24330 /* Fall through. */
24332 case BFD_RELOC_ARM_MOVT
:
24333 if (fixp
->fx_pcrel
)
24335 code
= BFD_RELOC_ARM_MOVT_PCREL
;
24338 /* Fall through. */
24340 case BFD_RELOC_ARM_THUMB_MOVW
:
24341 if (fixp
->fx_pcrel
)
24343 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
24346 /* Fall through. */
24348 case BFD_RELOC_ARM_THUMB_MOVT
:
24349 if (fixp
->fx_pcrel
)
24351 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
24354 /* Fall through. */
24356 case BFD_RELOC_NONE
:
24357 case BFD_RELOC_ARM_PCREL_BRANCH
:
24358 case BFD_RELOC_ARM_PCREL_BLX
:
24359 case BFD_RELOC_RVA
:
24360 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24361 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24362 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24363 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24364 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24365 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24366 case BFD_RELOC_VTABLE_ENTRY
:
24367 case BFD_RELOC_VTABLE_INHERIT
:
24369 case BFD_RELOC_32_SECREL
:
24371 code
= fixp
->fx_r_type
;
24374 case BFD_RELOC_THUMB_PCREL_BLX
:
24376 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
24377 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24380 code
= BFD_RELOC_THUMB_PCREL_BLX
;
24383 case BFD_RELOC_ARM_LITERAL
:
24384 case BFD_RELOC_ARM_HWLITERAL
:
24385 /* If this is called then the a literal has
24386 been referenced across a section boundary. */
24387 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24388 _("literal referenced across section boundary"));
24392 case BFD_RELOC_ARM_TLS_CALL
:
24393 case BFD_RELOC_ARM_THM_TLS_CALL
:
24394 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24395 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24396 case BFD_RELOC_ARM_GOT32
:
24397 case BFD_RELOC_ARM_GOTOFF
:
24398 case BFD_RELOC_ARM_GOT_PREL
:
24399 case BFD_RELOC_ARM_PLT32
:
24400 case BFD_RELOC_ARM_TARGET1
:
24401 case BFD_RELOC_ARM_ROSEGREL32
:
24402 case BFD_RELOC_ARM_SBREL32
:
24403 case BFD_RELOC_ARM_PREL31
:
24404 case BFD_RELOC_ARM_TARGET2
:
24405 case BFD_RELOC_ARM_TLS_LDO32
:
24406 case BFD_RELOC_ARM_PCREL_CALL
:
24407 case BFD_RELOC_ARM_PCREL_JUMP
:
24408 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
24409 case BFD_RELOC_ARM_ALU_PC_G0
:
24410 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
24411 case BFD_RELOC_ARM_ALU_PC_G1
:
24412 case BFD_RELOC_ARM_ALU_PC_G2
:
24413 case BFD_RELOC_ARM_LDR_PC_G0
:
24414 case BFD_RELOC_ARM_LDR_PC_G1
:
24415 case BFD_RELOC_ARM_LDR_PC_G2
:
24416 case BFD_RELOC_ARM_LDRS_PC_G0
:
24417 case BFD_RELOC_ARM_LDRS_PC_G1
:
24418 case BFD_RELOC_ARM_LDRS_PC_G2
:
24419 case BFD_RELOC_ARM_LDC_PC_G0
:
24420 case BFD_RELOC_ARM_LDC_PC_G1
:
24421 case BFD_RELOC_ARM_LDC_PC_G2
:
24422 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
24423 case BFD_RELOC_ARM_ALU_SB_G0
:
24424 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
24425 case BFD_RELOC_ARM_ALU_SB_G1
:
24426 case BFD_RELOC_ARM_ALU_SB_G2
:
24427 case BFD_RELOC_ARM_LDR_SB_G0
:
24428 case BFD_RELOC_ARM_LDR_SB_G1
:
24429 case BFD_RELOC_ARM_LDR_SB_G2
:
24430 case BFD_RELOC_ARM_LDRS_SB_G0
:
24431 case BFD_RELOC_ARM_LDRS_SB_G1
:
24432 case BFD_RELOC_ARM_LDRS_SB_G2
:
24433 case BFD_RELOC_ARM_LDC_SB_G0
:
24434 case BFD_RELOC_ARM_LDC_SB_G1
:
24435 case BFD_RELOC_ARM_LDC_SB_G2
:
24436 case BFD_RELOC_ARM_V4BX
:
24437 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24438 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24439 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24440 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24441 code
= fixp
->fx_r_type
;
24444 case BFD_RELOC_ARM_TLS_GOTDESC
:
24445 case BFD_RELOC_ARM_TLS_GD32
:
24446 case BFD_RELOC_ARM_TLS_LE32
:
24447 case BFD_RELOC_ARM_TLS_IE32
:
24448 case BFD_RELOC_ARM_TLS_LDM32
:
24449 /* BFD will include the symbol's address in the addend.
24450 But we don't want that, so subtract it out again here. */
24451 if (!S_IS_COMMON (fixp
->fx_addsy
))
24452 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
24453 code
= fixp
->fx_r_type
;
24457 case BFD_RELOC_ARM_IMMEDIATE
:
24458 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24459 _("internal relocation (type: IMMEDIATE) not fixed up"));
24462 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
24463 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24464 _("ADRL used for a symbol not defined in the same file"));
24467 case BFD_RELOC_ARM_OFFSET_IMM
:
24468 if (section
->use_rela_p
)
24470 code
= fixp
->fx_r_type
;
24474 if (fixp
->fx_addsy
!= NULL
24475 && !S_IS_DEFINED (fixp
->fx_addsy
)
24476 && S_IS_LOCAL (fixp
->fx_addsy
))
24478 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24479 _("undefined local label `%s'"),
24480 S_GET_NAME (fixp
->fx_addsy
));
24484 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24485 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24492 switch (fixp
->fx_r_type
)
24494 case BFD_RELOC_NONE
: type
= "NONE"; break;
24495 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
24496 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
24497 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
24498 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
24499 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
24500 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
24501 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
24502 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
24503 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
24504 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
24505 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
24506 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
24507 default: type
= _("<unknown>"); break;
24509 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24510 _("cannot represent %s relocation in this object file format"),
24517 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
24519 && fixp
->fx_addsy
== GOT_symbol
)
24521 code
= BFD_RELOC_ARM_GOTPC
;
24522 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
24526 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
24528 if (reloc
->howto
== NULL
)
24530 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24531 _("cannot represent %s relocation in this object file format"),
24532 bfd_get_reloc_code_name (code
));
24536 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24537 vtable entry to be used in the relocation's section offset. */
24538 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24539 reloc
->address
= fixp
->fx_offset
;
24544 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24547 cons_fix_new_arm (fragS
* frag
,
24551 bfd_reloc_code_real_type reloc
)
24556 FIXME: @@ Should look at CPU word size. */
24560 reloc
= BFD_RELOC_8
;
24563 reloc
= BFD_RELOC_16
;
24567 reloc
= BFD_RELOC_32
;
24570 reloc
= BFD_RELOC_64
;
24575 if (exp
->X_op
== O_secrel
)
24577 exp
->X_op
= O_symbol
;
24578 reloc
= BFD_RELOC_32_SECREL
;
24582 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
24585 #if defined (OBJ_COFF)
24587 arm_validate_fix (fixS
* fixP
)
24589 /* If the destination of the branch is a defined symbol which does not have
24590 the THUMB_FUNC attribute, then we must be calling a function which has
24591 the (interfacearm) attribute. We look for the Thumb entry point to that
24592 function and change the branch to refer to that function instead. */
24593 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
24594 && fixP
->fx_addsy
!= NULL
24595 && S_IS_DEFINED (fixP
->fx_addsy
)
24596 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
24598 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
24605 arm_force_relocation (struct fix
* fixp
)
24607 #if defined (OBJ_COFF) && defined (TE_PE)
24608 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
24612 /* In case we have a call or a branch to a function in ARM ISA mode from
24613 a thumb function or vice-versa force the relocation. These relocations
24614 are cleared off for some cores that might have blx and simple transformations
24618 switch (fixp
->fx_r_type
)
24620 case BFD_RELOC_ARM_PCREL_JUMP
:
24621 case BFD_RELOC_ARM_PCREL_CALL
:
24622 case BFD_RELOC_THUMB_PCREL_BLX
:
24623 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
24627 case BFD_RELOC_ARM_PCREL_BLX
:
24628 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24629 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24630 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24631 if (ARM_IS_FUNC (fixp
->fx_addsy
))
24640 /* Resolve these relocations even if the symbol is extern or weak.
24641 Technically this is probably wrong due to symbol preemption.
24642 In practice these relocations do not have enough range to be useful
24643 at dynamic link time, and some code (e.g. in the Linux kernel)
24644 expects these references to be resolved. */
24645 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
24646 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
24647 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
24648 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
24649 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24650 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
24651 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
24652 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
24653 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24654 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
24655 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
24656 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
24657 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
24658 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
24661 /* Always leave these relocations for the linker. */
24662 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24663 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24664 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24667 /* Always generate relocations against function symbols. */
24668 if (fixp
->fx_r_type
== BFD_RELOC_32
24670 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24673 return generic_force_reloc (fixp
);
24676 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24677 /* Relocations against function names must be left unadjusted,
24678 so that the linker can use this information to generate interworking
24679 stubs. The MIPS version of this function
24680 also prevents relocations that are mips-16 specific, but I do not
24681 know why it does this.
24684 There is one other problem that ought to be addressed here, but
24685 which currently is not: Taking the address of a label (rather
24686 than a function) and then later jumping to that address. Such
24687 addresses also ought to have their bottom bit set (assuming that
24688 they reside in Thumb code), but at the moment they will not. */
24691 arm_fix_adjustable (fixS
* fixP
)
24693 if (fixP
->fx_addsy
== NULL
)
24696 /* Preserve relocations against symbols with function type. */
24697 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24700 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24701 && fixP
->fx_subsy
== NULL
)
24704 /* We need the symbol name for the VTABLE entries. */
24705 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24706 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24709 /* Don't allow symbols to be discarded on GOT related relocs. */
24710 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24711 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24712 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24713 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24714 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24715 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24716 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24717 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24718 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24719 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24720 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24721 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24722 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24723 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24726 /* Similarly for group relocations. */
24727 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24728 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24729 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24732 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24733 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24734 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24735 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24736 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24737 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24738 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24739 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24740 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24743 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24744 offsets, so keep these symbols. */
24745 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24746 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24751 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24755 elf32_arm_target_format (void)
24758 return (target_big_endian
24759 ? "elf32-bigarm-symbian"
24760 : "elf32-littlearm-symbian");
24761 #elif defined (TE_VXWORKS)
24762 return (target_big_endian
24763 ? "elf32-bigarm-vxworks"
24764 : "elf32-littlearm-vxworks");
24765 #elif defined (TE_NACL)
24766 return (target_big_endian
24767 ? "elf32-bigarm-nacl"
24768 : "elf32-littlearm-nacl");
24770 if (target_big_endian
)
24771 return "elf32-bigarm";
24773 return "elf32-littlearm";
24778 armelf_frob_symbol (symbolS
* symp
,
24781 elf_frob_symbol (symp
, puntp
);
24785 /* MD interface: Finalization. */
24790 literal_pool
* pool
;
24792 /* Ensure that all the IT blocks are properly closed. */
24793 check_it_blocks_finished ();
24795 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24797 /* Put it at the end of the relevant section. */
24798 subseg_set (pool
->section
, pool
->sub_section
);
24800 arm_elf_change_section ();
24807 /* Remove any excess mapping symbols generated for alignment frags in
24808 SEC. We may have created a mapping symbol before a zero byte
24809 alignment; remove it if there's a mapping symbol after the
24812 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24813 void *dummy ATTRIBUTE_UNUSED
)
24815 segment_info_type
*seginfo
= seg_info (sec
);
24818 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24821 for (fragp
= seginfo
->frchainP
->frch_root
;
24823 fragp
= fragp
->fr_next
)
24825 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24826 fragS
*next
= fragp
->fr_next
;
24828 /* Variable-sized frags have been converted to fixed size by
24829 this point. But if this was variable-sized to start with,
24830 there will be a fixed-size frag after it. So don't handle
24832 if (sym
== NULL
|| next
== NULL
)
24835 if (S_GET_VALUE (sym
) < next
->fr_address
)
24836 /* Not at the end of this frag. */
24838 know (S_GET_VALUE (sym
) == next
->fr_address
);
24842 if (next
->tc_frag_data
.first_map
!= NULL
)
24844 /* Next frag starts with a mapping symbol. Discard this
24846 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24850 if (next
->fr_next
== NULL
)
24852 /* This mapping symbol is at the end of the section. Discard
24854 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24855 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24859 /* As long as we have empty frags without any mapping symbols,
24861 /* If the next frag is non-empty and does not start with a
24862 mapping symbol, then this mapping symbol is required. */
24863 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24866 next
= next
->fr_next
;
24868 while (next
!= NULL
);
24873 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24877 arm_adjust_symtab (void)
24882 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24884 if (ARM_IS_THUMB (sym
))
24886 if (THUMB_IS_FUNC (sym
))
24888 /* Mark the symbol as a Thumb function. */
24889 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24890 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24891 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
24893 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
24894 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
24896 as_bad (_("%s: unexpected function type: %d"),
24897 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
24899 else switch (S_GET_STORAGE_CLASS (sym
))
24902 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
24905 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
24908 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
24916 if (ARM_IS_INTERWORK (sym
))
24917 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
24924 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24926 if (ARM_IS_THUMB (sym
))
24928 elf_symbol_type
* elf_sym
;
24930 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
24931 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
24933 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
24934 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
24936 /* If it's a .thumb_func, declare it as so,
24937 otherwise tag label as .code 16. */
24938 if (THUMB_IS_FUNC (sym
))
24939 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
24940 ST_BRANCH_TO_THUMB
);
24941 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
24942 elf_sym
->internal_elf_sym
.st_info
=
24943 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
24948 /* Remove any overlapping mapping symbols generated by alignment frags. */
24949 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
24950 /* Now do generic ELF adjustments. */
24951 elf_adjust_symtab ();
24955 /* MD interface: Initialization. */
24958 set_constant_flonums (void)
24962 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
24963 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
24967 /* Auto-select Thumb mode if it's the only available instruction set for the
24968 given architecture. */
24971 autoselect_thumb_from_cpu_variant (void)
24973 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
24974 opcode_select (16);
24983 if ( (arm_ops_hsh
= hash_new ()) == NULL
24984 || (arm_cond_hsh
= hash_new ()) == NULL
24985 || (arm_shift_hsh
= hash_new ()) == NULL
24986 || (arm_psr_hsh
= hash_new ()) == NULL
24987 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
24988 || (arm_reg_hsh
= hash_new ()) == NULL
24989 || (arm_reloc_hsh
= hash_new ()) == NULL
24990 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
24991 as_fatal (_("virtual memory exhausted"));
24993 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
24994 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
24995 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
24996 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
24997 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
24998 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
24999 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
25000 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
25001 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
25002 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
25003 (void *) (v7m_psrs
+ i
));
25004 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
25005 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
25007 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
25009 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
25010 (void *) (barrier_opt_names
+ i
));
25012 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
25014 struct reloc_entry
* entry
= reloc_names
+ i
;
25016 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
25017 /* This makes encode_branch() use the EABI versions of this relocation. */
25018 entry
->reloc
= BFD_RELOC_UNUSED
;
25020 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
25024 set_constant_flonums ();
25026 /* Set the cpu variant based on the command-line options. We prefer
25027 -mcpu= over -march= if both are set (as for GCC); and we prefer
25028 -mfpu= over any other way of setting the floating point unit.
25029 Use of legacy options with new options are faulted. */
25032 if (mcpu_cpu_opt
|| march_cpu_opt
)
25033 as_bad (_("use of old and new-style options to set CPU type"));
25035 mcpu_cpu_opt
= legacy_cpu
;
25037 else if (!mcpu_cpu_opt
)
25039 mcpu_cpu_opt
= march_cpu_opt
;
25040 dyn_mcpu_ext_opt
= dyn_march_ext_opt
;
25041 /* Avoid double free in arm_md_end. */
25042 dyn_march_ext_opt
= NULL
;
25048 as_bad (_("use of old and new-style options to set FPU type"));
25050 mfpu_opt
= legacy_fpu
;
25052 else if (!mfpu_opt
)
25054 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25055 || defined (TE_NetBSD) || defined (TE_VXWORKS))
25056 /* Some environments specify a default FPU. If they don't, infer it
25057 from the processor. */
25059 mfpu_opt
= mcpu_fpu_opt
;
25061 mfpu_opt
= march_fpu_opt
;
25063 mfpu_opt
= &fpu_default
;
25069 if (mcpu_cpu_opt
!= NULL
)
25070 mfpu_opt
= &fpu_default
;
25071 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
25072 mfpu_opt
= &fpu_arch_vfp_v2
;
25074 mfpu_opt
= &fpu_arch_fpa
;
25080 mcpu_cpu_opt
= &cpu_default
;
25081 selected_cpu
= cpu_default
;
25083 else if (dyn_mcpu_ext_opt
)
25084 ARM_MERGE_FEATURE_SETS (selected_cpu
, *mcpu_cpu_opt
, *dyn_mcpu_ext_opt
);
25086 selected_cpu
= *mcpu_cpu_opt
;
25088 if (mcpu_cpu_opt
&& dyn_mcpu_ext_opt
)
25089 ARM_MERGE_FEATURE_SETS (selected_cpu
, *mcpu_cpu_opt
, *dyn_mcpu_ext_opt
);
25090 else if (mcpu_cpu_opt
)
25091 selected_cpu
= *mcpu_cpu_opt
;
25093 mcpu_cpu_opt
= &arm_arch_any
;
25096 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25097 if (dyn_mcpu_ext_opt
)
25098 ARM_MERGE_FEATURE_SETS (cpu_variant
, cpu_variant
, *dyn_mcpu_ext_opt
);
25100 autoselect_thumb_from_cpu_variant ();
25102 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
25104 #if defined OBJ_COFF || defined OBJ_ELF
25106 unsigned int flags
= 0;
25108 #if defined OBJ_ELF
25109 flags
= meabi_flags
;
25111 switch (meabi_flags
)
25113 case EF_ARM_EABI_UNKNOWN
:
25115 /* Set the flags in the private structure. */
25116 if (uses_apcs_26
) flags
|= F_APCS26
;
25117 if (support_interwork
) flags
|= F_INTERWORK
;
25118 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
25119 if (pic_code
) flags
|= F_PIC
;
25120 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
25121 flags
|= F_SOFT_FLOAT
;
25123 switch (mfloat_abi_opt
)
25125 case ARM_FLOAT_ABI_SOFT
:
25126 case ARM_FLOAT_ABI_SOFTFP
:
25127 flags
|= F_SOFT_FLOAT
;
25130 case ARM_FLOAT_ABI_HARD
:
25131 if (flags
& F_SOFT_FLOAT
)
25132 as_bad (_("hard-float conflicts with specified fpu"));
25136 /* Using pure-endian doubles (even if soft-float). */
25137 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
25138 flags
|= F_VFP_FLOAT
;
25140 #if defined OBJ_ELF
25141 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
25142 flags
|= EF_ARM_MAVERICK_FLOAT
;
25145 case EF_ARM_EABI_VER4
:
25146 case EF_ARM_EABI_VER5
:
25147 /* No additional flags to set. */
25154 bfd_set_private_flags (stdoutput
, flags
);
25156 /* We have run out flags in the COFF header to encode the
25157 status of ATPCS support, so instead we create a dummy,
25158 empty, debug section called .arm.atpcs. */
25163 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
25167 bfd_set_section_flags
25168 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
25169 bfd_set_section_size (stdoutput
, sec
, 0);
25170 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
25176 /* Record the CPU type as well. */
25177 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
25178 mach
= bfd_mach_arm_iWMMXt2
;
25179 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
25180 mach
= bfd_mach_arm_iWMMXt
;
25181 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
25182 mach
= bfd_mach_arm_XScale
;
25183 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
25184 mach
= bfd_mach_arm_ep9312
;
25185 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
25186 mach
= bfd_mach_arm_5TE
;
25187 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
25189 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
25190 mach
= bfd_mach_arm_5T
;
25192 mach
= bfd_mach_arm_5
;
25194 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
25196 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
25197 mach
= bfd_mach_arm_4T
;
25199 mach
= bfd_mach_arm_4
;
25201 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
25202 mach
= bfd_mach_arm_3M
;
25203 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
25204 mach
= bfd_mach_arm_3
;
25205 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
25206 mach
= bfd_mach_arm_2a
;
25207 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
25208 mach
= bfd_mach_arm_2
;
25210 mach
= bfd_mach_arm_unknown
;
25212 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
25215 /* Command line processing. */
25218 Invocation line includes a switch not recognized by the base assembler.
25219 See if it's a processor-specific option.
25221 This routine is somewhat complicated by the need for backwards
25222 compatibility (since older releases of gcc can't be changed).
25223 The new options try to make the interface as compatible as
25226 New options (supported) are:
25228 -mcpu=<cpu name> Assemble for selected processor
25229 -march=<architecture name> Assemble for selected architecture
25230 -mfpu=<fpu architecture> Assemble for selected FPU.
25231 -EB/-mbig-endian Big-endian
25232 -EL/-mlittle-endian Little-endian
25233 -k Generate PIC code
25234 -mthumb Start in Thumb mode
25235 -mthumb-interwork Code supports ARM/Thumb interworking
25237 -m[no-]warn-deprecated Warn about deprecated features
25238 -m[no-]warn-syms Warn when symbols match instructions
25240 For now we will also provide support for:
25242 -mapcs-32 32-bit Program counter
25243 -mapcs-26 26-bit Program counter
25244 -macps-float Floats passed in FP registers
25245 -mapcs-reentrant Reentrant code
25247 (sometime these will probably be replaced with -mapcs=<list of options>
25248 and -matpcs=<list of options>)
25250 The remaining options are only supported for back-wards compatibility.
25251 Cpu variants, the arm part is optional:
25252 -m[arm]1 Currently not supported.
25253 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25254 -m[arm]3 Arm 3 processor
25255 -m[arm]6[xx], Arm 6 processors
25256 -m[arm]7[xx][t][[d]m] Arm 7 processors
25257 -m[arm]8[10] Arm 8 processors
25258 -m[arm]9[20][tdmi] Arm 9 processors
25259 -mstrongarm[110[0]] StrongARM processors
25260 -mxscale XScale processors
25261 -m[arm]v[2345[t[e]]] Arm architectures
25262 -mall All (except the ARM1)
25264 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25265 -mfpe-old (No float load/store multiples)
25266 -mvfpxd VFP Single precision
25268 -mno-fpu Disable all floating point instructions
25270 The following CPU names are recognized:
25271 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25272 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25273 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25274 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25275 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25276 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25277 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25281 const char * md_shortopts
= "m:k";
25283 #ifdef ARM_BI_ENDIAN
25284 #define OPTION_EB (OPTION_MD_BASE + 0)
25285 #define OPTION_EL (OPTION_MD_BASE + 1)
25287 #if TARGET_BYTES_BIG_ENDIAN
25288 #define OPTION_EB (OPTION_MD_BASE + 0)
25290 #define OPTION_EL (OPTION_MD_BASE + 1)
25293 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25295 struct option md_longopts
[] =
25298 {"EB", no_argument
, NULL
, OPTION_EB
},
25301 {"EL", no_argument
, NULL
, OPTION_EL
},
25303 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
25304 {NULL
, no_argument
, NULL
, 0}
25308 size_t md_longopts_size
= sizeof (md_longopts
);
25310 struct arm_option_table
25312 const char *option
; /* Option name to match. */
25313 const char *help
; /* Help information. */
25314 int *var
; /* Variable to change. */
25315 int value
; /* What to change it to. */
25316 const char *deprecated
; /* If non-null, print this message. */
25319 struct arm_option_table arm_opts
[] =
25321 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
25322 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
25323 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25324 &support_interwork
, 1, NULL
},
25325 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
25326 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
25327 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
25329 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
25330 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
25331 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
25332 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
25335 /* These are recognized by the assembler, but have no affect on code. */
25336 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
25337 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
25339 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
25340 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25341 &warn_on_deprecated
, 0, NULL
},
25342 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
25343 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
25344 {NULL
, NULL
, NULL
, 0, NULL
}
25347 struct arm_legacy_option_table
25349 const char *option
; /* Option name to match. */
25350 const arm_feature_set
**var
; /* Variable to change. */
25351 const arm_feature_set value
; /* What to change it to. */
25352 const char *deprecated
; /* If non-null, print this message. */
25355 const struct arm_legacy_option_table arm_legacy_opts
[] =
25357 /* DON'T add any new processors to this list -- we want the whole list
25358 to go away... Add them to the processors table instead. */
25359 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25360 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25361 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25362 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25363 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25364 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25365 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25366 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25367 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25368 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25369 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25370 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25371 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25372 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25373 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25374 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25375 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25376 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25377 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25378 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25379 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25380 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25381 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25382 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25383 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25384 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25385 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25386 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25387 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25388 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25389 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25390 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25391 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25392 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25393 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25394 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25395 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25396 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25397 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25398 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25399 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25400 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25401 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25402 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25403 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25404 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25405 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25406 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25407 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25408 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25409 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25410 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25411 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25412 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25413 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25414 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25415 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25416 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25417 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25418 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25419 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25420 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25421 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25422 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25423 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25424 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25425 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25426 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25427 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
25428 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
25429 N_("use -mcpu=strongarm110")},
25430 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
25431 N_("use -mcpu=strongarm1100")},
25432 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
25433 N_("use -mcpu=strongarm1110")},
25434 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
25435 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
25436 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
25438 /* Architecture variants -- don't add any more to this list either. */
25439 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25440 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25441 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25442 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25443 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25444 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25445 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25446 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25447 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25448 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25449 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25450 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25451 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25452 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25453 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25454 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25455 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25456 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25458 /* Floating point variants -- don't add any more to this list either. */
25459 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
25460 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
25461 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
25462 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
25463 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25465 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
25468 struct arm_cpu_option_table
25472 const arm_feature_set value
;
25473 const arm_feature_set ext
;
25474 /* For some CPUs we assume an FPU unless the user explicitly sets
25476 const arm_feature_set default_fpu
;
25477 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25479 const char *canonical_name
;
25482 /* This list should, at a minimum, contain all the cpu names
25483 recognized by GCC. */
25484 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
25485 static const struct arm_cpu_option_table arm_cpus
[] =
25487 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
25490 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
25493 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
25496 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
25499 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
25502 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
25505 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
25508 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
25511 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
25514 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
25517 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
25520 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
25523 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
25526 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
25529 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
25532 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
25535 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
25538 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
25541 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
25544 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
25547 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
25550 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
25553 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
25556 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
25559 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
25562 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
25565 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
25568 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
25571 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
25574 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
25577 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
25580 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
25583 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
25586 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
25589 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
25592 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
25595 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
25598 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
25601 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
25604 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
25607 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
25610 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
25613 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
25616 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
25619 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
25622 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
25626 /* For V5 or later processors we default to using VFP; but the user
25627 should really set the FPU type explicitly. */
25628 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
25631 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
25634 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
25637 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
25640 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
25643 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
25646 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
25649 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
25652 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
25655 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
25658 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
25661 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
25664 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
25667 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
25670 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
25673 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
25676 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
25679 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
25682 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
25685 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
25688 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
25691 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
25694 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
25697 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
25700 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
25703 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
25706 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
25709 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
25712 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
25715 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
25718 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
25721 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
25724 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
25727 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
25730 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
25733 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
25736 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
25737 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
25739 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
25741 FPU_ARCH_NEON_VFP_V4
),
25742 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
25743 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25744 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
25745 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
25746 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
25747 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
25748 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
25750 FPU_ARCH_NEON_VFP_V4
),
25751 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
25753 FPU_ARCH_NEON_VFP_V4
),
25754 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
25756 FPU_ARCH_NEON_VFP_V4
),
25757 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
25758 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25759 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25760 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
25761 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25762 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25763 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
25764 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25765 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25766 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
25767 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25768 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25769 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
25770 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25771 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25772 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
25773 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25774 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25775 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
25778 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
25780 FPU_ARCH_VFP_V3D16
),
25781 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
25782 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
25784 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
25785 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
25786 FPU_ARCH_VFP_V3D16
),
25787 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
25788 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
25789 FPU_ARCH_VFP_V3D16
),
25790 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
25791 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25793 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
25796 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
25799 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
25802 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
25805 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
25808 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
25811 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
25814 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
25815 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25816 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25818 /* ??? XSCALE is really an architecture. */
25819 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
25823 /* ??? iwmmxt is not a processor. */
25824 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
25827 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
25830 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
25835 ARM_CPU_OPT ("ep9312", "ARM920T",
25836 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
25837 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
25839 /* Marvell processors. */
25840 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
25841 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
25842 FPU_ARCH_VFP_V3D16
),
25843 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
25844 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
25845 FPU_ARCH_NEON_VFP_V4
),
25847 /* APM X-Gene family. */
25848 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
25850 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25851 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
25852 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25853 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25855 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
25859 struct arm_arch_option_table
25863 const arm_feature_set value
;
25864 const arm_feature_set default_fpu
;
25867 /* This list should, at a minimum, contain all the architecture names
25868 recognized by GCC. */
25869 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25870 static const struct arm_arch_option_table arm_archs
[] =
25872 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
25873 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
25874 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
25875 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25876 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25877 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
25878 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
25879 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
25880 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
25881 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
25882 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
25883 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
25884 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
25885 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
25886 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
25887 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
25888 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
25889 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25890 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25891 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
25892 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
25893 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25894 kept to preserve existing behaviour. */
25895 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25896 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25897 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
25898 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
25899 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
25900 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25901 kept to preserve existing behaviour. */
25902 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25903 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25904 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
25905 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
25906 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
25907 /* The official spelling of the ARMv7 profile variants is the dashed form.
25908 Accept the non-dashed form for compatibility with old toolchains. */
25909 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25910 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
25911 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25912 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25913 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25914 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25915 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25916 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
25917 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
25918 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
25919 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
25920 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
25921 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
25922 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
),
25923 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
25924 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
25925 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
25926 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25928 #undef ARM_ARCH_OPT
25930 /* ISA extensions in the co-processor and main instruction set space. */
25931 struct arm_option_extension_value_table
25935 const arm_feature_set merge_value
;
25936 const arm_feature_set clear_value
;
25937 /* List of architectures for which an extension is available. ARM_ARCH_NONE
25938 indicates that an extension is available for all architectures while
25939 ARM_ANY marks an empty entry. */
25940 const arm_feature_set allowed_archs
[2];
25943 /* The following table must be in alphabetical order with a NULL last entry.
25945 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25946 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25947 static const struct arm_option_extension_value_table arm_extensions
[] =
25949 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25950 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25951 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25952 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
25953 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25954 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25955 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25956 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
25957 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
25958 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25959 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25960 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25962 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25963 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25964 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
25965 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
25966 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
25967 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
25968 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
25969 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
25970 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
25971 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
25972 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25973 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25974 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
25975 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
25976 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25977 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25978 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
25979 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
25980 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
25981 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25982 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
25983 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
25984 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25985 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
25986 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
25987 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25988 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25989 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25990 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
25991 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25992 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
25993 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
25994 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25995 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
25997 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
25998 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25999 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
26000 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
26001 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
26005 /* ISA floating-point and Advanced SIMD extensions. */
26006 struct arm_option_fpu_value_table
26009 const arm_feature_set value
;
26012 /* This list should, at a minimum, contain all the fpu names
26013 recognized by GCC. */
26014 static const struct arm_option_fpu_value_table arm_fpus
[] =
26016 {"softfpa", FPU_NONE
},
26017 {"fpe", FPU_ARCH_FPE
},
26018 {"fpe2", FPU_ARCH_FPE
},
26019 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
26020 {"fpa", FPU_ARCH_FPA
},
26021 {"fpa10", FPU_ARCH_FPA
},
26022 {"fpa11", FPU_ARCH_FPA
},
26023 {"arm7500fe", FPU_ARCH_FPA
},
26024 {"softvfp", FPU_ARCH_VFP
},
26025 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
26026 {"vfp", FPU_ARCH_VFP_V2
},
26027 {"vfp9", FPU_ARCH_VFP_V2
},
26028 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
26029 {"vfp10", FPU_ARCH_VFP_V2
},
26030 {"vfp10-r0", FPU_ARCH_VFP_V1
},
26031 {"vfpxd", FPU_ARCH_VFP_V1xD
},
26032 {"vfpv2", FPU_ARCH_VFP_V2
},
26033 {"vfpv3", FPU_ARCH_VFP_V3
},
26034 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
26035 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
26036 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
26037 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
26038 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
26039 {"arm1020t", FPU_ARCH_VFP_V1
},
26040 {"arm1020e", FPU_ARCH_VFP_V2
},
26041 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
26042 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
26043 {"maverick", FPU_ARCH_MAVERICK
},
26044 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
26045 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
26046 {"neon-fp16", FPU_ARCH_NEON_FP16
},
26047 {"vfpv4", FPU_ARCH_VFP_V4
},
26048 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
26049 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
26050 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
26051 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
26052 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
26053 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
26054 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
26055 {"crypto-neon-fp-armv8",
26056 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
26057 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
26058 {"crypto-neon-fp-armv8.1",
26059 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
26060 {NULL
, ARM_ARCH_NONE
}
26063 struct arm_option_value_table
26069 static const struct arm_option_value_table arm_float_abis
[] =
26071 {"hard", ARM_FLOAT_ABI_HARD
},
26072 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
26073 {"soft", ARM_FLOAT_ABI_SOFT
},
26078 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
26079 static const struct arm_option_value_table arm_eabis
[] =
26081 {"gnu", EF_ARM_EABI_UNKNOWN
},
26082 {"4", EF_ARM_EABI_VER4
},
26083 {"5", EF_ARM_EABI_VER5
},
26088 struct arm_long_option_table
26090 const char * option
; /* Substring to match. */
26091 const char * help
; /* Help information. */
26092 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
26093 const char * deprecated
; /* If non-null, print this message. */
26097 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
26098 arm_feature_set
**ext_set_p
)
26100 /* We insist on extensions being specified in alphabetical order, and with
26101 extensions being added before being removed. We achieve this by having
26102 the global ARM_EXTENSIONS table in alphabetical order, and using the
26103 ADDING_VALUE variable to indicate whether we are adding an extension (1)
26104 or removing it (0) and only allowing it to change in the order
26106 const struct arm_option_extension_value_table
* opt
= NULL
;
26107 const arm_feature_set arm_any
= ARM_ANY
;
26108 int adding_value
= -1;
26112 *ext_set_p
= XNEW (arm_feature_set
);
26113 **ext_set_p
= arm_arch_none
;
26116 while (str
!= NULL
&& *str
!= 0)
26123 as_bad (_("invalid architectural extension"));
26128 ext
= strchr (str
, '+');
26133 len
= strlen (str
);
26135 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
26137 if (adding_value
!= 0)
26140 opt
= arm_extensions
;
26148 if (adding_value
== -1)
26151 opt
= arm_extensions
;
26153 else if (adding_value
!= 1)
26155 as_bad (_("must specify extensions to add before specifying "
26156 "those to remove"));
26163 as_bad (_("missing architectural extension"));
26167 gas_assert (adding_value
!= -1);
26168 gas_assert (opt
!= NULL
);
26170 /* Scan over the options table trying to find an exact match. */
26171 for (; opt
->name
!= NULL
; opt
++)
26172 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
26174 int i
, nb_allowed_archs
=
26175 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
26176 /* Check we can apply the extension to this architecture. */
26177 for (i
= 0; i
< nb_allowed_archs
; i
++)
26180 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
26182 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
26185 if (i
== nb_allowed_archs
)
26187 as_bad (_("extension does not apply to the base architecture"));
26191 /* Add or remove the extension. */
26193 ARM_MERGE_FEATURE_SETS (**ext_set_p
, **ext_set_p
,
26196 ARM_CLEAR_FEATURE (**ext_set_p
, **ext_set_p
, opt
->clear_value
);
26201 if (opt
->name
== NULL
)
26203 /* Did we fail to find an extension because it wasn't specified in
26204 alphabetical order, or because it does not exist? */
26206 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26207 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
26210 if (opt
->name
== NULL
)
26211 as_bad (_("unknown architectural extension `%s'"), str
);
26213 as_bad (_("architectural extensions must be specified in "
26214 "alphabetical order"));
26220 /* We should skip the extension we've just matched the next time
26232 arm_parse_cpu (const char *str
)
26234 const struct arm_cpu_option_table
*opt
;
26235 const char *ext
= strchr (str
, '+');
26241 len
= strlen (str
);
26245 as_bad (_("missing cpu name `%s'"), str
);
26249 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
26250 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
26252 mcpu_cpu_opt
= &opt
->value
;
26253 if (!dyn_mcpu_ext_opt
)
26254 dyn_mcpu_ext_opt
= XNEW (arm_feature_set
);
26255 *dyn_mcpu_ext_opt
= opt
->ext
;
26256 mcpu_fpu_opt
= &opt
->default_fpu
;
26257 if (opt
->canonical_name
)
26259 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
26260 strcpy (selected_cpu_name
, opt
->canonical_name
);
26266 if (len
>= sizeof selected_cpu_name
)
26267 len
= (sizeof selected_cpu_name
) - 1;
26269 for (i
= 0; i
< len
; i
++)
26270 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
26271 selected_cpu_name
[i
] = 0;
26275 return arm_parse_extension (ext
, mcpu_cpu_opt
, &dyn_mcpu_ext_opt
);
26280 as_bad (_("unknown cpu `%s'"), str
);
26285 arm_parse_arch (const char *str
)
26287 const struct arm_arch_option_table
*opt
;
26288 const char *ext
= strchr (str
, '+');
26294 len
= strlen (str
);
26298 as_bad (_("missing architecture name `%s'"), str
);
26302 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
26303 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
26305 march_cpu_opt
= &opt
->value
;
26306 march_fpu_opt
= &opt
->default_fpu
;
26307 strcpy (selected_cpu_name
, opt
->name
);
26310 return arm_parse_extension (ext
, march_cpu_opt
, &dyn_march_ext_opt
);
26315 as_bad (_("unknown architecture `%s'\n"), str
);
26320 arm_parse_fpu (const char * str
)
26322 const struct arm_option_fpu_value_table
* opt
;
26324 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26325 if (streq (opt
->name
, str
))
26327 mfpu_opt
= &opt
->value
;
26331 as_bad (_("unknown floating point format `%s'\n"), str
);
26336 arm_parse_float_abi (const char * str
)
26338 const struct arm_option_value_table
* opt
;
26340 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
26341 if (streq (opt
->name
, str
))
26343 mfloat_abi_opt
= opt
->value
;
26347 as_bad (_("unknown floating point abi `%s'\n"), str
);
26353 arm_parse_eabi (const char * str
)
26355 const struct arm_option_value_table
*opt
;
26357 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
26358 if (streq (opt
->name
, str
))
26360 meabi_flags
= opt
->value
;
26363 as_bad (_("unknown EABI `%s'\n"), str
);
26369 arm_parse_it_mode (const char * str
)
26371 bfd_boolean ret
= TRUE
;
26373 if (streq ("arm", str
))
26374 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
26375 else if (streq ("thumb", str
))
26376 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
26377 else if (streq ("always", str
))
26378 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
26379 else if (streq ("never", str
))
26380 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
26383 as_bad (_("unknown implicit IT mode `%s', should be "\
26384 "arm, thumb, always, or never."), str
);
26392 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
26394 codecomposer_syntax
= TRUE
;
26395 arm_comment_chars
[0] = ';';
26396 arm_line_separator_chars
[0] = 0;
26400 struct arm_long_option_table arm_long_opts
[] =
26402 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26403 arm_parse_cpu
, NULL
},
26404 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26405 arm_parse_arch
, NULL
},
26406 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26407 arm_parse_fpu
, NULL
},
26408 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26409 arm_parse_float_abi
, NULL
},
26411 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26412 arm_parse_eabi
, NULL
},
26414 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26415 arm_parse_it_mode
, NULL
},
26416 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26417 arm_ccs_mode
, NULL
},
26418 {NULL
, NULL
, 0, NULL
}
26422 md_parse_option (int c
, const char * arg
)
26424 struct arm_option_table
*opt
;
26425 const struct arm_legacy_option_table
*fopt
;
26426 struct arm_long_option_table
*lopt
;
26432 target_big_endian
= 1;
26438 target_big_endian
= 0;
26442 case OPTION_FIX_V4BX
:
26447 /* Listing option. Just ignore these, we don't support additional
26452 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26454 if (c
== opt
->option
[0]
26455 && ((arg
== NULL
&& opt
->option
[1] == 0)
26456 || streq (arg
, opt
->option
+ 1)))
26458 /* If the option is deprecated, tell the user. */
26459 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
26460 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26461 arg
? arg
: "", _(opt
->deprecated
));
26463 if (opt
->var
!= NULL
)
26464 *opt
->var
= opt
->value
;
26470 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
26472 if (c
== fopt
->option
[0]
26473 && ((arg
== NULL
&& fopt
->option
[1] == 0)
26474 || streq (arg
, fopt
->option
+ 1)))
26476 /* If the option is deprecated, tell the user. */
26477 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
26478 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26479 arg
? arg
: "", _(fopt
->deprecated
));
26481 if (fopt
->var
!= NULL
)
26482 *fopt
->var
= &fopt
->value
;
26488 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26490 /* These options are expected to have an argument. */
26491 if (c
== lopt
->option
[0]
26493 && strncmp (arg
, lopt
->option
+ 1,
26494 strlen (lopt
->option
+ 1)) == 0)
26496 /* If the option is deprecated, tell the user. */
26497 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
26498 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
26499 _(lopt
->deprecated
));
26501 /* Call the sup-option parser. */
26502 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
26513 md_show_usage (FILE * fp
)
26515 struct arm_option_table
*opt
;
26516 struct arm_long_option_table
*lopt
;
26518 fprintf (fp
, _(" ARM-specific assembler options:\n"));
26520 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26521 if (opt
->help
!= NULL
)
26522 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
26524 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26525 if (lopt
->help
!= NULL
)
26526 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
26530 -EB assemble code for a big-endian cpu\n"));
26535 -EL assemble code for a little-endian cpu\n"));
26539 --fix-v4bx Allow BX in ARMv4 code\n"));
26547 arm_feature_set flags
;
26548 } cpu_arch_ver_table
;
26550 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
26551 must be sorted least features first but some reordering is needed, eg. for
26552 Thumb-2 instructions to be detected as coming from ARMv6T2. */
26553 static const cpu_arch_ver_table cpu_arch_ver
[] =
26559 {4, ARM_ARCH_V5TE
},
26560 {5, ARM_ARCH_V5TEJ
},
26564 {11, ARM_ARCH_V6M
},
26565 {12, ARM_ARCH_V6SM
},
26566 {8, ARM_ARCH_V6T2
},
26567 {10, ARM_ARCH_V7VE
},
26568 {10, ARM_ARCH_V7R
},
26569 {10, ARM_ARCH_V7M
},
26570 {14, ARM_ARCH_V8A
},
26571 {16, ARM_ARCH_V8M_BASE
},
26572 {17, ARM_ARCH_V8M_MAIN
},
26576 /* Set an attribute if it has not already been set by the user. */
26578 aeabi_set_attribute_int (int tag
, int value
)
26581 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26582 || !attributes_set_explicitly
[tag
])
26583 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
26587 aeabi_set_attribute_string (int tag
, const char *value
)
26590 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26591 || !attributes_set_explicitly
[tag
])
26592 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
26595 /* Set the public EABI object attributes. */
26597 aeabi_set_public_attributes (void)
26602 int fp16_optional
= 0;
26603 arm_feature_set flags
;
26604 arm_feature_set tmp
;
26605 arm_feature_set arm_arch_v8m_base
= ARM_ARCH_V8M_BASE
;
26606 const cpu_arch_ver_table
*p
;
26608 /* Choose the architecture based on the capabilities of the requested cpu
26609 (if any) and/or the instructions actually used. */
26610 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
26611 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
26612 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
26614 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
26615 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
26617 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
26618 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
26620 selected_cpu
= flags
;
26622 /* Allow the user to override the reported architecture. */
26625 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
26626 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
26629 /* We need to make sure that the attributes do not identify us as v6S-M
26630 when the only v6S-M feature in use is the Operating System Extensions. */
26631 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
26632 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
26633 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
26637 for (p
= cpu_arch_ver
; p
->val
; p
++)
26639 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
26642 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
26646 /* The table lookup above finds the last architecture to contribute
26647 a new feature. Unfortunately, Tag13 is a subset of the union of
26648 v6T2 and v7-M, so it is never seen as contributing a new feature.
26649 We can not search for the last entry which is entirely used,
26650 because if no CPU is specified we build up only those flags
26651 actually used. Perhaps we should separate out the specified
26652 and implicit cases. Avoid taking this path for -march=all by
26653 checking for contradictory v7-A / v7-M features. */
26654 if (arch
== TAG_CPU_ARCH_V7
26655 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26656 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
26657 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
26658 arch
= TAG_CPU_ARCH_V7E_M
;
26660 ARM_CLEAR_FEATURE (tmp
, flags
, arm_arch_v8m_base
);
26661 if (arch
== TAG_CPU_ARCH_V8M_BASE
&& ARM_CPU_HAS_FEATURE (tmp
, arm_arch_any
))
26662 arch
= TAG_CPU_ARCH_V8M_MAIN
;
26664 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26665 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26666 ARMv8-M, -march=all must be detected as ARMv8-A. */
26667 if (arch
== TAG_CPU_ARCH_V8M_MAIN
26668 && ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
26669 arch
= TAG_CPU_ARCH_V8
;
26671 /* Tag_CPU_name. */
26672 if (selected_cpu_name
[0])
26676 q
= selected_cpu_name
;
26677 if (strncmp (q
, "armv", 4) == 0)
26682 for (i
= 0; q
[i
]; i
++)
26683 q
[i
] = TOUPPER (q
[i
]);
26685 aeabi_set_attribute_string (Tag_CPU_name
, q
);
26688 /* Tag_CPU_arch. */
26689 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
26691 /* Tag_CPU_arch_profile. */
26692 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
26693 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26694 || (ARM_CPU_HAS_FEATURE (flags
, arm_ext_atomics
)
26695 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
)))
26697 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
26699 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
26704 if (profile
!= '\0')
26705 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
26707 /* Tag_DSP_extension. */
26708 if (dyn_mcpu_ext_opt
&& ARM_CPU_HAS_FEATURE (*dyn_mcpu_ext_opt
, arm_ext_dsp
))
26709 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
26711 /* Tag_ARM_ISA_use. */
26712 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
26714 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
26716 /* Tag_THUMB_ISA_use. */
26717 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
26722 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26723 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
26725 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
26729 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
26732 /* Tag_VFP_arch. */
26733 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
26734 aeabi_set_attribute_int (Tag_VFP_arch
,
26735 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26737 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
26738 aeabi_set_attribute_int (Tag_VFP_arch
,
26739 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
26741 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
26744 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
26746 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
26748 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
26751 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
26752 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
26753 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
26754 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
26755 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
26757 /* Tag_ABI_HardFP_use. */
26758 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
26759 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
26760 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
26762 /* Tag_WMMX_arch. */
26763 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
26764 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
26765 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
26766 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
26768 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26769 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
26770 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
26771 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
26772 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
26773 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
26775 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
26777 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
26781 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
26786 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26787 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
26788 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
26792 We set Tag_DIV_use to two when integer divide instructions have been used
26793 in ARM state, or when Thumb integer divide instructions have been used,
26794 but we have no architecture profile set, nor have we any ARM instructions.
26796 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26797 by the base architecture.
26799 For new architectures we will have to check these tests. */
26800 gas_assert (arch
<= TAG_CPU_ARCH_V8
26801 || (arch
>= TAG_CPU_ARCH_V8M_BASE
26802 && arch
<= TAG_CPU_ARCH_V8M_MAIN
));
26803 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
26804 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
26805 aeabi_set_attribute_int (Tag_DIV_use
, 0);
26806 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
26807 || (profile
== '\0'
26808 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
26809 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
26810 aeabi_set_attribute_int (Tag_DIV_use
, 2);
26812 /* Tag_MP_extension_use. */
26813 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
26814 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
26816 /* Tag Virtualization_use. */
26817 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
26819 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
26822 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
26825 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
26826 finished and free extension feature bits which will not be used anymore. */
26828 arm_md_post_relax (void)
26830 aeabi_set_public_attributes ();
26831 XDELETE (dyn_mcpu_ext_opt
);
26832 dyn_mcpu_ext_opt
= NULL
;
26833 XDELETE (dyn_march_ext_opt
);
26834 dyn_march_ext_opt
= NULL
;
26837 /* Add the default contents for the .ARM.attributes section. */
26841 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26844 aeabi_set_public_attributes ();
26846 #endif /* OBJ_ELF */
26849 /* Parse a .cpu directive. */
26852 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
26854 const struct arm_cpu_option_table
*opt
;
26858 name
= input_line_pointer
;
26859 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26860 input_line_pointer
++;
26861 saved_char
= *input_line_pointer
;
26862 *input_line_pointer
= 0;
26864 /* Skip the first "all" entry. */
26865 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
26866 if (streq (opt
->name
, name
))
26868 mcpu_cpu_opt
= &opt
->value
;
26869 if (!dyn_mcpu_ext_opt
)
26870 dyn_mcpu_ext_opt
= XNEW (arm_feature_set
);
26871 *dyn_mcpu_ext_opt
= opt
->ext
;
26872 ARM_MERGE_FEATURE_SETS (selected_cpu
, *mcpu_cpu_opt
, *dyn_mcpu_ext_opt
);
26873 if (opt
->canonical_name
)
26874 strcpy (selected_cpu_name
, opt
->canonical_name
);
26878 for (i
= 0; opt
->name
[i
]; i
++)
26879 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
26881 selected_cpu_name
[i
] = 0;
26883 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26884 if (dyn_mcpu_ext_opt
)
26885 ARM_MERGE_FEATURE_SETS (cpu_variant
, cpu_variant
, *dyn_mcpu_ext_opt
);
26886 *input_line_pointer
= saved_char
;
26887 demand_empty_rest_of_line ();
26890 as_bad (_("unknown cpu `%s'"), name
);
26891 *input_line_pointer
= saved_char
;
26892 ignore_rest_of_line ();
26896 /* Parse a .arch directive. */
26899 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
26901 const struct arm_arch_option_table
*opt
;
26905 name
= input_line_pointer
;
26906 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26907 input_line_pointer
++;
26908 saved_char
= *input_line_pointer
;
26909 *input_line_pointer
= 0;
26911 /* Skip the first "all" entry. */
26912 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26913 if (streq (opt
->name
, name
))
26915 mcpu_cpu_opt
= &opt
->value
;
26916 XDELETE (dyn_mcpu_ext_opt
);
26917 dyn_mcpu_ext_opt
= NULL
;
26918 selected_cpu
= *mcpu_cpu_opt
;
26919 strcpy (selected_cpu_name
, opt
->name
);
26920 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, *mfpu_opt
);
26921 *input_line_pointer
= saved_char
;
26922 demand_empty_rest_of_line ();
26926 as_bad (_("unknown architecture `%s'\n"), name
);
26927 *input_line_pointer
= saved_char
;
26928 ignore_rest_of_line ();
26932 /* Parse a .object_arch directive. */
26935 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
26937 const struct arm_arch_option_table
*opt
;
26941 name
= input_line_pointer
;
26942 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26943 input_line_pointer
++;
26944 saved_char
= *input_line_pointer
;
26945 *input_line_pointer
= 0;
26947 /* Skip the first "all" entry. */
26948 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26949 if (streq (opt
->name
, name
))
26951 object_arch
= &opt
->value
;
26952 *input_line_pointer
= saved_char
;
26953 demand_empty_rest_of_line ();
26957 as_bad (_("unknown architecture `%s'\n"), name
);
26958 *input_line_pointer
= saved_char
;
26959 ignore_rest_of_line ();
26962 /* Parse a .arch_extension directive. */
26965 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
26967 const struct arm_option_extension_value_table
*opt
;
26968 const arm_feature_set arm_any
= ARM_ANY
;
26971 int adding_value
= 1;
26973 name
= input_line_pointer
;
26974 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26975 input_line_pointer
++;
26976 saved_char
= *input_line_pointer
;
26977 *input_line_pointer
= 0;
26979 if (strlen (name
) >= 2
26980 && strncmp (name
, "no", 2) == 0)
26986 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26987 if (streq (opt
->name
, name
))
26989 int i
, nb_allowed_archs
=
26990 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
26991 for (i
= 0; i
< nb_allowed_archs
; i
++)
26994 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
26996 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *mcpu_cpu_opt
))
27000 if (i
== nb_allowed_archs
)
27002 as_bad (_("architectural extension `%s' is not allowed for the "
27003 "current base architecture"), name
);
27007 if (!dyn_mcpu_ext_opt
)
27009 dyn_mcpu_ext_opt
= XNEW (arm_feature_set
);
27010 *dyn_mcpu_ext_opt
= arm_arch_none
;
27013 ARM_MERGE_FEATURE_SETS (*dyn_mcpu_ext_opt
, *dyn_mcpu_ext_opt
,
27016 ARM_CLEAR_FEATURE (*dyn_mcpu_ext_opt
, *dyn_mcpu_ext_opt
,
27019 ARM_MERGE_FEATURE_SETS (selected_cpu
, *mcpu_cpu_opt
, *dyn_mcpu_ext_opt
);
27020 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, *mfpu_opt
);
27021 *input_line_pointer
= saved_char
;
27022 demand_empty_rest_of_line ();
27026 if (opt
->name
== NULL
)
27027 as_bad (_("unknown architecture extension `%s'\n"), name
);
27029 *input_line_pointer
= saved_char
;
27030 ignore_rest_of_line ();
27033 /* Parse a .fpu directive. */
27036 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
27038 const struct arm_option_fpu_value_table
*opt
;
27042 name
= input_line_pointer
;
27043 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
27044 input_line_pointer
++;
27045 saved_char
= *input_line_pointer
;
27046 *input_line_pointer
= 0;
27048 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
27049 if (streq (opt
->name
, name
))
27051 mfpu_opt
= &opt
->value
;
27052 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
27053 if (dyn_mcpu_ext_opt
)
27054 ARM_MERGE_FEATURE_SETS (cpu_variant
, cpu_variant
, *dyn_mcpu_ext_opt
);
27055 *input_line_pointer
= saved_char
;
27056 demand_empty_rest_of_line ();
27060 as_bad (_("unknown floating point format `%s'\n"), name
);
27061 *input_line_pointer
= saved_char
;
27062 ignore_rest_of_line ();
27065 /* Copy symbol information. */
27068 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
27070 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
27074 /* Given a symbolic attribute NAME, return the proper integer value.
27075 Returns -1 if the attribute is not known. */
27078 arm_convert_symbolic_attribute (const char *name
)
27080 static const struct
27085 attribute_table
[] =
27087 /* When you modify this table you should
27088 also modify the list in doc/c-arm.texi. */
27089 #define T(tag) {#tag, tag}
27090 T (Tag_CPU_raw_name
),
27093 T (Tag_CPU_arch_profile
),
27094 T (Tag_ARM_ISA_use
),
27095 T (Tag_THUMB_ISA_use
),
27099 T (Tag_Advanced_SIMD_arch
),
27100 T (Tag_PCS_config
),
27101 T (Tag_ABI_PCS_R9_use
),
27102 T (Tag_ABI_PCS_RW_data
),
27103 T (Tag_ABI_PCS_RO_data
),
27104 T (Tag_ABI_PCS_GOT_use
),
27105 T (Tag_ABI_PCS_wchar_t
),
27106 T (Tag_ABI_FP_rounding
),
27107 T (Tag_ABI_FP_denormal
),
27108 T (Tag_ABI_FP_exceptions
),
27109 T (Tag_ABI_FP_user_exceptions
),
27110 T (Tag_ABI_FP_number_model
),
27111 T (Tag_ABI_align_needed
),
27112 T (Tag_ABI_align8_needed
),
27113 T (Tag_ABI_align_preserved
),
27114 T (Tag_ABI_align8_preserved
),
27115 T (Tag_ABI_enum_size
),
27116 T (Tag_ABI_HardFP_use
),
27117 T (Tag_ABI_VFP_args
),
27118 T (Tag_ABI_WMMX_args
),
27119 T (Tag_ABI_optimization_goals
),
27120 T (Tag_ABI_FP_optimization_goals
),
27121 T (Tag_compatibility
),
27122 T (Tag_CPU_unaligned_access
),
27123 T (Tag_FP_HP_extension
),
27124 T (Tag_VFP_HP_extension
),
27125 T (Tag_ABI_FP_16bit_format
),
27126 T (Tag_MPextension_use
),
27128 T (Tag_nodefaults
),
27129 T (Tag_also_compatible_with
),
27130 T (Tag_conformance
),
27132 T (Tag_Virtualization_use
),
27133 T (Tag_DSP_extension
),
27134 /* We deliberately do not include Tag_MPextension_use_legacy. */
27142 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
27143 if (streq (name
, attribute_table
[i
].name
))
27144 return attribute_table
[i
].tag
;
27150 /* Apply sym value for relocations only in the case that they are for
27151 local symbols in the same segment as the fixup and you have the
27152 respective architectural feature for blx and simple switches. */
27154 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
27157 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
27158 /* PR 17444: If the local symbol is in a different section then a reloc
27159 will always be generated for it, so applying the symbol value now
27160 will result in a double offset being stored in the relocation. */
27161 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
27162 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
27164 switch (fixP
->fx_r_type
)
27166 case BFD_RELOC_ARM_PCREL_BLX
:
27167 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
27168 if (ARM_IS_FUNC (fixP
->fx_addsy
))
27172 case BFD_RELOC_ARM_PCREL_CALL
:
27173 case BFD_RELOC_THUMB_PCREL_BLX
:
27174 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
27185 #endif /* OBJ_ELF */