1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS
,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result
;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant
;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax
= FALSE
;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set
*legacy_cpu
= NULL
;
158 static const arm_feature_set
*legacy_fpu
= NULL
;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
162 static arm_feature_set
*mcpu_ext_opt
= NULL
;
163 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set
*march_cpu_opt
= NULL
;
167 static arm_feature_set
*march_ext_opt
= NULL
;
168 static const arm_feature_set
*march_fpu_opt
= NULL
;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set
*mfpu_opt
= NULL
;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
176 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
179 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
180 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
182 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
184 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
187 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
190 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
191 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
192 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
193 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
194 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
195 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
196 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
197 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
198 static const arm_feature_set arm_ext_v4t_5
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
200 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
201 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
202 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
203 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
204 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
205 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
206 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2
=
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
210 static const arm_feature_set arm_ext_v6_notm
=
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
212 static const arm_feature_set arm_ext_v6_dsp
=
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
214 static const arm_feature_set arm_ext_barrier
=
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
216 static const arm_feature_set arm_ext_msr
=
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
218 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
219 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
220 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
221 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
225 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
226 static const arm_feature_set arm_ext_m
=
227 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
228 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
229 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
230 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
231 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
232 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
233 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
234 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
235 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
236 static const arm_feature_set arm_ext_v8m_main
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
238 static const arm_feature_set arm_ext_v8_1m_main
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only
=
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
243 static const arm_feature_set arm_ext_v6t2_v8m
=
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics
=
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp
=
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
253 static const arm_feature_set arm_ext_ras
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16
=
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
258 static const arm_feature_set arm_ext_fp16_fml
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
260 static const arm_feature_set arm_ext_v8_2
=
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
262 static const arm_feature_set arm_ext_v8_3
=
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
264 static const arm_feature_set arm_ext_sb
=
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
266 static const arm_feature_set arm_ext_predres
=
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
269 static const arm_feature_set arm_arch_any
= ARM_ANY
;
271 static const arm_feature_set fpu_any
= FPU_ANY
;
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
275 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
277 static const arm_feature_set arm_cext_iwmmxt2
=
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
279 static const arm_feature_set arm_cext_iwmmxt
=
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
281 static const arm_feature_set arm_cext_xscale
=
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
283 static const arm_feature_set arm_cext_maverick
=
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
285 static const arm_feature_set fpu_fpa_ext_v1
=
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
287 static const arm_feature_set fpu_fpa_ext_v2
=
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
289 static const arm_feature_set fpu_vfp_ext_v1xd
=
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
291 static const arm_feature_set fpu_vfp_ext_v1
=
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
293 static const arm_feature_set fpu_vfp_ext_v2
=
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
295 static const arm_feature_set fpu_vfp_ext_v3xd
=
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
297 static const arm_feature_set fpu_vfp_ext_v3
=
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
299 static const arm_feature_set fpu_vfp_ext_d32
=
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
301 static const arm_feature_set fpu_neon_ext_v1
=
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
306 static const arm_feature_set fpu_vfp_fp16
=
307 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
308 static const arm_feature_set fpu_neon_ext_fma
=
309 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
311 static const arm_feature_set fpu_vfp_ext_fma
=
312 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
313 static const arm_feature_set fpu_vfp_ext_armv8
=
314 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
315 static const arm_feature_set fpu_vfp_ext_armv8xd
=
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
317 static const arm_feature_set fpu_neon_ext_armv8
=
318 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
319 static const arm_feature_set fpu_crypto_ext_armv8
=
320 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
321 static const arm_feature_set crc_ext_armv8
=
322 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
323 static const arm_feature_set fpu_neon_ext_v8_1
=
324 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
325 static const arm_feature_set fpu_neon_ext_dotprod
=
326 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
328 static int mfloat_abi_opt
= -1;
329 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
331 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
332 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
334 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
335 /* Feature bits selected by the last -mcpu/-march or by the combination of the
336 last .cpu/.arch directive .arch_extension directives since that
338 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
339 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
340 static arm_feature_set selected_fpu
= FPU_NONE
;
341 /* Feature bits selected by the last .object_arch directive. */
342 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
343 /* Must be long enough to hold any of the names in arm_cpus. */
344 static char selected_cpu_name
[20];
346 extern FLONUM_TYPE generic_floating_point_number
;
348 /* Return if no cpu was selected on command-line. */
350 no_cpu_selected (void)
352 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
357 static int meabi_flags
= EABI_DEFAULT
;
359 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
362 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
367 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
372 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
373 symbolS
* GOT_symbol
;
376 /* 0: assemble for ARM,
377 1: assemble for Thumb,
378 2: assemble for Thumb even though target CPU does not support thumb
380 static int thumb_mode
= 0;
381 /* A value distinct from the possible values for thumb_mode that we
382 can use to record whether thumb_mode has been copied into the
383 tc_frag_data field of a frag. */
384 #define MODE_RECORDED (1 << 4)
386 /* Specifies the intrinsic IT insn behavior mode. */
387 enum implicit_it_mode
389 IMPLICIT_IT_MODE_NEVER
= 0x00,
390 IMPLICIT_IT_MODE_ARM
= 0x01,
391 IMPLICIT_IT_MODE_THUMB
= 0x02,
392 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
394 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
396 /* If unified_syntax is true, we are processing the new unified
397 ARM/Thumb syntax. Important differences from the old ARM mode:
399 - Immediate operands do not require a # prefix.
400 - Conditional affixes always appear at the end of the
401 instruction. (For backward compatibility, those instructions
402 that formerly had them in the middle, continue to accept them
404 - The IT instruction may appear, and if it does is validated
405 against subsequent conditional affixes. It does not generate
408 Important differences from the old Thumb mode:
410 - Immediate operands do not require a # prefix.
411 - Most of the V6T2 instructions are only available in unified mode.
412 - The .N and .W suffixes are recognized and honored (it is an error
413 if they cannot be honored).
414 - All instructions set the flags if and only if they have an 's' affix.
415 - Conditional affixes may be used. They are validated against
416 preceding IT instructions. Unlike ARM mode, you cannot use a
417 conditional affix except in the scope of an IT instruction. */
419 static bfd_boolean unified_syntax
= FALSE
;
421 /* An immediate operand can start with #, and ld*, st*, pld operands
422 can contain [ and ]. We need to tell APP not to elide whitespace
423 before a [, which can appear as the first operand for pld.
424 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
425 const char arm_symbol_chars
[] = "#[]{}";
440 enum neon_el_type type
;
444 #define NEON_MAX_TYPE_ELS 4
448 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
452 enum it_instruction_type
457 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
458 if inside, should be the last one. */
459 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
460 i.e. BKPT and NOP. */
461 IT_INSN
/* The IT insn has been parsed. */
464 /* The maximum number of operands we need. */
465 #define ARM_IT_MAX_OPERANDS 6
466 #define ARM_IT_MAX_RELOCS 3
471 unsigned long instruction
;
475 /* "uncond_value" is set to the value in place of the conditional field in
476 unconditional versions of the instruction, or -1 if nothing is
479 struct neon_type vectype
;
480 /* This does not indicate an actual NEON instruction, only that
481 the mnemonic accepts neon-style type suffixes. */
483 /* Set to the opcode if the instruction needs relaxation.
484 Zero if the instruction is not relaxed. */
488 bfd_reloc_code_real_type type
;
491 } relocs
[ARM_IT_MAX_RELOCS
];
493 enum it_instruction_type it_insn_type
;
499 struct neon_type_el vectype
;
500 unsigned present
: 1; /* Operand present. */
501 unsigned isreg
: 1; /* Operand was a register. */
502 unsigned immisreg
: 1; /* .imm field is a second register. */
503 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
504 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
505 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
506 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
507 instructions. This allows us to disambiguate ARM <-> vector insns. */
508 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
509 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
510 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
511 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
512 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
513 unsigned writeback
: 1; /* Operand has trailing ! */
514 unsigned preind
: 1; /* Preindexed address. */
515 unsigned postind
: 1; /* Postindexed address. */
516 unsigned negative
: 1; /* Index register was negated. */
517 unsigned shifted
: 1; /* Shift applied to operation. */
518 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
519 } operands
[ARM_IT_MAX_OPERANDS
];
522 static struct arm_it inst
;
524 #define NUM_FLOAT_VALS 8
526 const char * fp_const
[] =
528 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
531 /* Number of littlenums required to hold an extended precision number. */
532 #define MAX_LITTLENUMS 6
534 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
544 #define CP_T_X 0x00008000
545 #define CP_T_Y 0x00400000
547 #define CONDS_BIT 0x00100000
548 #define LOAD_BIT 0x00100000
550 #define DOUBLE_LOAD_FLAG 0x00000001
554 const char * template_name
;
558 #define COND_ALWAYS 0xE
562 const char * template_name
;
566 struct asm_barrier_opt
568 const char * template_name
;
570 const arm_feature_set arch
;
573 /* The bit that distinguishes CPSR and SPSR. */
574 #define SPSR_BIT (1 << 22)
576 /* The individual PSR flag bits. */
577 #define PSR_c (1 << 16)
578 #define PSR_x (1 << 17)
579 #define PSR_s (1 << 18)
580 #define PSR_f (1 << 19)
585 bfd_reloc_code_real_type reloc
;
590 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
591 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
596 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
599 /* Bits for DEFINED field in neon_typed_alias. */
600 #define NTA_HASTYPE 1
601 #define NTA_HASINDEX 2
603 struct neon_typed_alias
605 unsigned char defined
;
607 struct neon_type_el eltype
;
610 /* ARM register categories. This includes coprocessor numbers and various
611 architecture extensions' registers. Each entry should have an error message
612 in reg_expected_msgs below. */
640 /* Structure for a hash table entry for a register.
641 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
642 information which states whether a vector type or index is specified (for a
643 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
649 unsigned char builtin
;
650 struct neon_typed_alias
* neon
;
653 /* Diagnostics used when we don't get a register of the expected type. */
654 const char * const reg_expected_msgs
[] =
656 [REG_TYPE_RN
] = N_("ARM register expected"),
657 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
658 [REG_TYPE_CN
] = N_("co-processor register expected"),
659 [REG_TYPE_FN
] = N_("FPA register expected"),
660 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
661 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
662 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
663 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
664 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
665 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
666 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
668 [REG_TYPE_VFC
] = N_("VFP system register expected"),
669 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
670 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
671 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
672 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
673 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
674 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
675 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
676 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
677 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
678 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
679 [REG_TYPE_RNB
] = N_("")
682 /* Some well known registers that we refer to directly elsewhere. */
688 /* ARM instructions take 4bytes in the object file, Thumb instructions
694 /* Basic string to match. */
695 const char * template_name
;
697 /* Parameters to instruction. */
698 unsigned int operands
[8];
700 /* Conditional tag - see opcode_lookup. */
701 unsigned int tag
: 4;
703 /* Basic instruction code. */
704 unsigned int avalue
: 28;
706 /* Thumb-format instruction code. */
709 /* Which architecture variant provides this instruction. */
710 const arm_feature_set
* avariant
;
711 const arm_feature_set
* tvariant
;
713 /* Function to call to encode instruction in ARM format. */
714 void (* aencode
) (void);
716 /* Function to call to encode instruction in Thumb format. */
717 void (* tencode
) (void);
720 /* Defines for various bits that we will want to toggle. */
721 #define INST_IMMEDIATE 0x02000000
722 #define OFFSET_REG 0x02000000
723 #define HWOFFSET_IMM 0x00400000
724 #define SHIFT_BY_REG 0x00000010
725 #define PRE_INDEX 0x01000000
726 #define INDEX_UP 0x00800000
727 #define WRITE_BACK 0x00200000
728 #define LDM_TYPE_2_OR_3 0x00400000
729 #define CPSI_MMOD 0x00020000
731 #define LITERAL_MASK 0xf000f000
732 #define OPCODE_MASK 0xfe1fffff
733 #define V4_STR_BIT 0x00000020
734 #define VLDR_VMOV_SAME 0x0040f000
736 #define T2_SUBS_PC_LR 0xf3de8f00
738 #define DATA_OP_SHIFT 21
739 #define SBIT_SHIFT 20
741 #define T2_OPCODE_MASK 0xfe1fffff
742 #define T2_DATA_OP_SHIFT 21
743 #define T2_SBIT_SHIFT 20
745 #define A_COND_MASK 0xf0000000
746 #define A_PUSH_POP_OP_MASK 0x0fff0000
748 /* Opcodes for pushing/poping registers to/from the stack. */
749 #define A1_OPCODE_PUSH 0x092d0000
750 #define A2_OPCODE_PUSH 0x052d0004
751 #define A2_OPCODE_POP 0x049d0004
753 /* Codes to distinguish the arithmetic instructions. */
764 #define OPCODE_CMP 10
765 #define OPCODE_CMN 11
766 #define OPCODE_ORR 12
767 #define OPCODE_MOV 13
768 #define OPCODE_BIC 14
769 #define OPCODE_MVN 15
771 #define T2_OPCODE_AND 0
772 #define T2_OPCODE_BIC 1
773 #define T2_OPCODE_ORR 2
774 #define T2_OPCODE_ORN 3
775 #define T2_OPCODE_EOR 4
776 #define T2_OPCODE_ADD 8
777 #define T2_OPCODE_ADC 10
778 #define T2_OPCODE_SBC 11
779 #define T2_OPCODE_SUB 13
780 #define T2_OPCODE_RSB 14
782 #define T_OPCODE_MUL 0x4340
783 #define T_OPCODE_TST 0x4200
784 #define T_OPCODE_CMN 0x42c0
785 #define T_OPCODE_NEG 0x4240
786 #define T_OPCODE_MVN 0x43c0
788 #define T_OPCODE_ADD_R3 0x1800
789 #define T_OPCODE_SUB_R3 0x1a00
790 #define T_OPCODE_ADD_HI 0x4400
791 #define T_OPCODE_ADD_ST 0xb000
792 #define T_OPCODE_SUB_ST 0xb080
793 #define T_OPCODE_ADD_SP 0xa800
794 #define T_OPCODE_ADD_PC 0xa000
795 #define T_OPCODE_ADD_I8 0x3000
796 #define T_OPCODE_SUB_I8 0x3800
797 #define T_OPCODE_ADD_I3 0x1c00
798 #define T_OPCODE_SUB_I3 0x1e00
800 #define T_OPCODE_ASR_R 0x4100
801 #define T_OPCODE_LSL_R 0x4080
802 #define T_OPCODE_LSR_R 0x40c0
803 #define T_OPCODE_ROR_R 0x41c0
804 #define T_OPCODE_ASR_I 0x1000
805 #define T_OPCODE_LSL_I 0x0000
806 #define T_OPCODE_LSR_I 0x0800
808 #define T_OPCODE_MOV_I8 0x2000
809 #define T_OPCODE_CMP_I8 0x2800
810 #define T_OPCODE_CMP_LR 0x4280
811 #define T_OPCODE_MOV_HR 0x4600
812 #define T_OPCODE_CMP_HR 0x4500
814 #define T_OPCODE_LDR_PC 0x4800
815 #define T_OPCODE_LDR_SP 0x9800
816 #define T_OPCODE_STR_SP 0x9000
817 #define T_OPCODE_LDR_IW 0x6800
818 #define T_OPCODE_STR_IW 0x6000
819 #define T_OPCODE_LDR_IH 0x8800
820 #define T_OPCODE_STR_IH 0x8000
821 #define T_OPCODE_LDR_IB 0x7800
822 #define T_OPCODE_STR_IB 0x7000
823 #define T_OPCODE_LDR_RW 0x5800
824 #define T_OPCODE_STR_RW 0x5000
825 #define T_OPCODE_LDR_RH 0x5a00
826 #define T_OPCODE_STR_RH 0x5200
827 #define T_OPCODE_LDR_RB 0x5c00
828 #define T_OPCODE_STR_RB 0x5400
830 #define T_OPCODE_PUSH 0xb400
831 #define T_OPCODE_POP 0xbc00
833 #define T_OPCODE_BRANCH 0xe000
835 #define THUMB_SIZE 2 /* Size of thumb instruction. */
836 #define THUMB_PP_PC_LR 0x0100
837 #define THUMB_LOAD_BIT 0x0800
838 #define THUMB2_LOAD_BIT 0x00100000
840 #define BAD_ARGS _("bad arguments to instruction")
841 #define BAD_SP _("r13 not allowed here")
842 #define BAD_PC _("r15 not allowed here")
843 #define BAD_COND _("instruction cannot be conditional")
844 #define BAD_OVERLAP _("registers may not be the same")
845 #define BAD_HIREG _("lo register required")
846 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
847 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
848 #define BAD_BRANCH _("branch must be last instruction in IT block")
849 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
850 #define BAD_NOT_IT _("instruction not allowed in IT block")
851 #define BAD_FPU _("selected FPU does not support instruction")
852 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
853 #define BAD_IT_COND _("incorrect condition in IT block")
854 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
855 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
856 #define BAD_PC_ADDRESSING \
857 _("cannot use register index with PC-relative addressing")
858 #define BAD_PC_WRITEBACK \
859 _("cannot use writeback with PC-relative addressing")
860 #define BAD_RANGE _("branch out of range")
861 #define BAD_FP16 _("selected processor does not support fp16 instruction")
862 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
863 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
865 static struct hash_control
* arm_ops_hsh
;
866 static struct hash_control
* arm_cond_hsh
;
867 static struct hash_control
* arm_shift_hsh
;
868 static struct hash_control
* arm_psr_hsh
;
869 static struct hash_control
* arm_v7m_psr_hsh
;
870 static struct hash_control
* arm_reg_hsh
;
871 static struct hash_control
* arm_reloc_hsh
;
872 static struct hash_control
* arm_barrier_opt_hsh
;
874 /* Stuff needed to resolve the label ambiguity
883 symbolS
* last_label_seen
;
884 static int label_is_thumb_function_name
= FALSE
;
886 /* Literal pool structure. Held on a per-section
887 and per-sub-section basis. */
889 #define MAX_LITERAL_POOL_SIZE 1024
890 typedef struct literal_pool
892 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
893 unsigned int next_free_entry
;
899 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
901 struct literal_pool
* next
;
902 unsigned int alignment
;
905 /* Pointer to a linked list of literal pools. */
906 literal_pool
* list_of_pools
= NULL
;
908 typedef enum asmfunc_states
911 WAITING_ASMFUNC_NAME
,
915 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
918 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
920 static struct current_it now_it
;
924 now_it_compatible (int cond
)
926 return (cond
& ~1) == (now_it
.cc
& ~1);
930 conditional_insn (void)
932 return inst
.cond
!= COND_ALWAYS
;
935 static int in_it_block (void);
937 static int handle_it_state (void);
939 static void force_automatic_it_block_close (void);
941 static void it_fsm_post_encode (void);
943 #define set_it_insn_type(type) \
946 inst.it_insn_type = type; \
947 if (handle_it_state () == FAIL) \
952 #define set_it_insn_type_nonvoid(type, failret) \
955 inst.it_insn_type = type; \
956 if (handle_it_state () == FAIL) \
961 #define set_it_insn_type_last() \
964 if (inst.cond == COND_ALWAYS) \
965 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
967 set_it_insn_type (INSIDE_IT_LAST_INSN); \
973 /* This array holds the chars that always start a comment. If the
974 pre-processor is disabled, these aren't very useful. */
975 char arm_comment_chars
[] = "@";
977 /* This array holds the chars that only start a comment at the beginning of
978 a line. If the line seems to have the form '# 123 filename'
979 .line and .file directives will appear in the pre-processed output. */
980 /* Note that input_file.c hand checks for '#' at the beginning of the
981 first line of the input file. This is because the compiler outputs
982 #NO_APP at the beginning of its output. */
983 /* Also note that comments like this one will always work. */
984 const char line_comment_chars
[] = "#";
986 char arm_line_separator_chars
[] = ";";
988 /* Chars that can be used to separate mant
989 from exp in floating point numbers. */
990 const char EXP_CHARS
[] = "eE";
992 /* Chars that mean this number is a floating point constant. */
996 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
998 /* Prefix characters that indicate the start of an immediate
1000 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1002 /* Separator character handling. */
1004 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1007 skip_past_char (char ** str
, char c
)
1009 /* PR gas/14987: Allow for whitespace before the expected character. */
1010 skip_whitespace (*str
);
1021 #define skip_past_comma(str) skip_past_char (str, ',')
1023 /* Arithmetic expressions (possibly involving symbols). */
1025 /* Return TRUE if anything in the expression is a bignum. */
1028 walk_no_bignums (symbolS
* sp
)
1030 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1033 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1035 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1036 || (symbol_get_value_expression (sp
)->X_op_symbol
1037 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1043 static bfd_boolean in_my_get_expression
= FALSE
;
1045 /* Third argument to my_get_expression. */
1046 #define GE_NO_PREFIX 0
1047 #define GE_IMM_PREFIX 1
1048 #define GE_OPT_PREFIX 2
1049 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1050 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1051 #define GE_OPT_PREFIX_BIG 3
1054 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1058 /* In unified syntax, all prefixes are optional. */
1060 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1063 switch (prefix_mode
)
1065 case GE_NO_PREFIX
: break;
1067 if (!is_immediate_prefix (**str
))
1069 inst
.error
= _("immediate expression requires a # prefix");
1075 case GE_OPT_PREFIX_BIG
:
1076 if (is_immediate_prefix (**str
))
1083 memset (ep
, 0, sizeof (expressionS
));
1085 save_in
= input_line_pointer
;
1086 input_line_pointer
= *str
;
1087 in_my_get_expression
= TRUE
;
1089 in_my_get_expression
= FALSE
;
1091 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1093 /* We found a bad or missing expression in md_operand(). */
1094 *str
= input_line_pointer
;
1095 input_line_pointer
= save_in
;
1096 if (inst
.error
== NULL
)
1097 inst
.error
= (ep
->X_op
== O_absent
1098 ? _("missing expression") :_("bad expression"));
1102 /* Get rid of any bignums now, so that we don't generate an error for which
1103 we can't establish a line number later on. Big numbers are never valid
1104 in instructions, which is where this routine is always called. */
1105 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1106 && (ep
->X_op
== O_big
1107 || (ep
->X_add_symbol
1108 && (walk_no_bignums (ep
->X_add_symbol
)
1110 && walk_no_bignums (ep
->X_op_symbol
))))))
1112 inst
.error
= _("invalid constant");
1113 *str
= input_line_pointer
;
1114 input_line_pointer
= save_in
;
1118 *str
= input_line_pointer
;
1119 input_line_pointer
= save_in
;
1123 /* Turn a string in input_line_pointer into a floating point constant
1124 of type TYPE, and store the appropriate bytes in *LITP. The number
1125 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1126 returned, or NULL on OK.
1128 Note that fp constants aren't represent in the normal way on the ARM.
1129 In big endian mode, things are as expected. However, in little endian
1130 mode fp constants are big-endian word-wise, and little-endian byte-wise
1131 within the words. For example, (double) 1.1 in big endian mode is
1132 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1133 the byte sequence 99 99 f1 3f 9a 99 99 99.
1135 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1138 md_atof (int type
, char * litP
, int * sizeP
)
1141 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1173 return _("Unrecognized or unsupported floating point constant");
1176 t
= atof_ieee (input_line_pointer
, type
, words
);
1178 input_line_pointer
= t
;
1179 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1181 if (target_big_endian
)
1183 for (i
= 0; i
< prec
; i
++)
1185 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1186 litP
+= sizeof (LITTLENUM_TYPE
);
1191 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1192 for (i
= prec
- 1; i
>= 0; i
--)
1194 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1195 litP
+= sizeof (LITTLENUM_TYPE
);
1198 /* For a 4 byte float the order of elements in `words' is 1 0.
1199 For an 8 byte float the order is 1 0 3 2. */
1200 for (i
= 0; i
< prec
; i
+= 2)
1202 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1203 sizeof (LITTLENUM_TYPE
));
1204 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1205 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1206 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1213 /* We handle all bad expressions here, so that we can report the faulty
1214 instruction in the error message. */
1217 md_operand (expressionS
* exp
)
1219 if (in_my_get_expression
)
1220 exp
->X_op
= O_illegal
;
1223 /* Immediate values. */
1226 /* Generic immediate-value read function for use in directives.
1227 Accepts anything that 'expression' can fold to a constant.
1228 *val receives the number. */
1231 immediate_for_directive (int *val
)
1234 exp
.X_op
= O_illegal
;
1236 if (is_immediate_prefix (*input_line_pointer
))
1238 input_line_pointer
++;
1242 if (exp
.X_op
!= O_constant
)
1244 as_bad (_("expected #constant"));
1245 ignore_rest_of_line ();
1248 *val
= exp
.X_add_number
;
1253 /* Register parsing. */
1255 /* Generic register parser. CCP points to what should be the
1256 beginning of a register name. If it is indeed a valid register
1257 name, advance CCP over it and return the reg_entry structure;
1258 otherwise return NULL. Does not issue diagnostics. */
1260 static struct reg_entry
*
1261 arm_reg_parse_multi (char **ccp
)
1265 struct reg_entry
*reg
;
1267 skip_whitespace (start
);
1269 #ifdef REGISTER_PREFIX
1270 if (*start
!= REGISTER_PREFIX
)
1274 #ifdef OPTIONAL_REGISTER_PREFIX
1275 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1280 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1285 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1287 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1297 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1298 enum arm_reg_type type
)
1300 /* Alternative syntaxes are accepted for a few register classes. */
1307 /* Generic coprocessor register names are allowed for these. */
1308 if (reg
&& reg
->type
== REG_TYPE_CN
)
1313 /* For backward compatibility, a bare number is valid here. */
1315 unsigned long processor
= strtoul (start
, ccp
, 10);
1316 if (*ccp
!= start
&& processor
<= 15)
1321 case REG_TYPE_MMXWC
:
1322 /* WC includes WCG. ??? I'm not sure this is true for all
1323 instructions that take WC registers. */
1324 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1335 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1336 return value is the register number or FAIL. */
1339 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1342 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1345 /* Do not allow a scalar (reg+index) to parse as a register. */
1346 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1349 if (reg
&& reg
->type
== type
)
1352 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1359 /* Parse a Neon type specifier. *STR should point at the leading '.'
1360 character. Does no verification at this stage that the type fits the opcode
1367 Can all be legally parsed by this function.
1369 Fills in neon_type struct pointer with parsed information, and updates STR
1370 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1371 type, FAIL if not. */
1374 parse_neon_type (struct neon_type
*type
, char **str
)
1381 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1383 enum neon_el_type thistype
= NT_untyped
;
1384 unsigned thissize
= -1u;
1391 /* Just a size without an explicit type. */
1395 switch (TOLOWER (*ptr
))
1397 case 'i': thistype
= NT_integer
; break;
1398 case 'f': thistype
= NT_float
; break;
1399 case 'p': thistype
= NT_poly
; break;
1400 case 's': thistype
= NT_signed
; break;
1401 case 'u': thistype
= NT_unsigned
; break;
1403 thistype
= NT_float
;
1408 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1414 /* .f is an abbreviation for .f32. */
1415 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1420 thissize
= strtoul (ptr
, &ptr
, 10);
1422 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1425 as_bad (_("bad size %d in type specifier"), thissize
);
1433 type
->el
[type
->elems
].type
= thistype
;
1434 type
->el
[type
->elems
].size
= thissize
;
1439 /* Empty/missing type is not a successful parse. */
1440 if (type
->elems
== 0)
1448 /* Errors may be set multiple times during parsing or bit encoding
1449 (particularly in the Neon bits), but usually the earliest error which is set
1450 will be the most meaningful. Avoid overwriting it with later (cascading)
1451 errors by calling this function. */
1454 first_error (const char *err
)
1460 /* Parse a single type, e.g. ".s32", leading period included. */
1462 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1465 struct neon_type optype
;
1469 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1471 if (optype
.elems
== 1)
1472 *vectype
= optype
.el
[0];
1475 first_error (_("only one type should be specified for operand"));
1481 first_error (_("vector type expected"));
1493 /* Special meanings for indices (which have a range of 0-7), which will fit into
1496 #define NEON_ALL_LANES 15
1497 #define NEON_INTERLEAVE_LANES 14
1499 /* Parse either a register or a scalar, with an optional type. Return the
1500 register number, and optionally fill in the actual type of the register
1501 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1502 type/index information in *TYPEINFO. */
1505 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1506 enum arm_reg_type
*rtype
,
1507 struct neon_typed_alias
*typeinfo
)
1510 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1511 struct neon_typed_alias atype
;
1512 struct neon_type_el parsetype
;
1516 atype
.eltype
.type
= NT_invtype
;
1517 atype
.eltype
.size
= -1;
1519 /* Try alternate syntax for some types of register. Note these are mutually
1520 exclusive with the Neon syntax extensions. */
1523 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1531 /* Undo polymorphism when a set of register types may be accepted. */
1532 if ((type
== REG_TYPE_NDQ
1533 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1534 || (type
== REG_TYPE_VFSD
1535 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1536 || (type
== REG_TYPE_NSDQ
1537 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1538 || reg
->type
== REG_TYPE_NQ
))
1539 || (type
== REG_TYPE_NSD
1540 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1541 || (type
== REG_TYPE_MMXWC
1542 && (reg
->type
== REG_TYPE_MMXWCG
)))
1543 type
= (enum arm_reg_type
) reg
->type
;
1545 if (type
!= reg
->type
)
1551 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1553 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1555 first_error (_("can't redefine type for operand"));
1558 atype
.defined
|= NTA_HASTYPE
;
1559 atype
.eltype
= parsetype
;
1562 if (skip_past_char (&str
, '[') == SUCCESS
)
1564 if (type
!= REG_TYPE_VFD
1565 && !(type
== REG_TYPE_VFS
1566 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
)))
1568 first_error (_("only D registers may be indexed"));
1572 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1574 first_error (_("can't change index for operand"));
1578 atype
.defined
|= NTA_HASINDEX
;
1580 if (skip_past_char (&str
, ']') == SUCCESS
)
1581 atype
.index
= NEON_ALL_LANES
;
1586 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1588 if (exp
.X_op
!= O_constant
)
1590 first_error (_("constant expression required"));
1594 if (skip_past_char (&str
, ']') == FAIL
)
1597 atype
.index
= exp
.X_add_number
;
1612 /* Like arm_reg_parse, but allow allow the following extra features:
1613 - If RTYPE is non-zero, return the (possibly restricted) type of the
1614 register (e.g. Neon double or quad reg when either has been requested).
1615 - If this is a Neon vector type with additional type information, fill
1616 in the struct pointed to by VECTYPE (if non-NULL).
1617 This function will fault on encountering a scalar. */
1620 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1621 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1623 struct neon_typed_alias atype
;
1625 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1630 /* Do not allow regname(... to parse as a register. */
1634 /* Do not allow a scalar (reg+index) to parse as a register. */
1635 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1637 first_error (_("register operand expected, but got scalar"));
1642 *vectype
= atype
.eltype
;
1649 #define NEON_SCALAR_REG(X) ((X) >> 4)
1650 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1652 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1653 have enough information to be able to do a good job bounds-checking. So, we
1654 just do easy checks here, and do further checks later. */
1657 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1661 struct neon_typed_alias atype
;
1662 enum arm_reg_type reg_type
= REG_TYPE_VFD
;
1665 reg_type
= REG_TYPE_VFS
;
1667 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1669 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1672 if (atype
.index
== NEON_ALL_LANES
)
1674 first_error (_("scalar must have an index"));
1677 else if (atype
.index
>= 64 / elsize
)
1679 first_error (_("scalar index out of range"));
1684 *type
= atype
.eltype
;
1688 return reg
* 16 + atype
.index
;
1691 /* Types of registers in a list. */
1702 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1705 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1711 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
);
1713 /* We come back here if we get ranges concatenated by '+' or '|'. */
1716 skip_whitespace (str
);
1729 const char apsr_str
[] = "apsr";
1730 int apsr_str_len
= strlen (apsr_str
);
1732 reg
= arm_reg_parse (&str
, REGLIST_RN
);
1733 if (etype
== REGLIST_CLRM
)
1735 if (reg
== REG_SP
|| reg
== REG_PC
)
1737 else if (reg
== FAIL
1738 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1739 && !ISALPHA (*(str
+ apsr_str_len
)))
1742 str
+= apsr_str_len
;
1747 first_error (_("r0-r12, lr or APSR expected"));
1751 else /* etype == REGLIST_RN. */
1755 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1766 first_error (_("bad range in register list"));
1770 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1772 if (range
& (1 << i
))
1774 (_("Warning: duplicated register (r%d) in register list"),
1782 if (range
& (1 << reg
))
1783 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1785 else if (reg
<= cur_reg
)
1786 as_tsktsk (_("Warning: register range not in ascending order"));
1791 while (skip_past_comma (&str
) != FAIL
1792 || (in_range
= 1, *str
++ == '-'));
1795 if (skip_past_char (&str
, '}') == FAIL
)
1797 first_error (_("missing `}'"));
1801 else if (etype
== REGLIST_RN
)
1805 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1808 if (exp
.X_op
== O_constant
)
1810 if (exp
.X_add_number
1811 != (exp
.X_add_number
& 0x0000ffff))
1813 inst
.error
= _("invalid register mask");
1817 if ((range
& exp
.X_add_number
) != 0)
1819 int regno
= range
& exp
.X_add_number
;
1822 regno
= (1 << regno
) - 1;
1824 (_("Warning: duplicated register (r%d) in register list"),
1828 range
|= exp
.X_add_number
;
1832 if (inst
.relocs
[0].type
!= 0)
1834 inst
.error
= _("expression too complex");
1838 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
1839 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
1840 inst
.relocs
[0].pc_rel
= 0;
1844 if (*str
== '|' || *str
== '+')
1850 while (another_range
);
1856 /* Parse a VFP register list. If the string is invalid return FAIL.
1857 Otherwise return the number of registers, and set PBASE to the first
1858 register. Parses registers of type ETYPE.
1859 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1860 - Q registers can be used to specify pairs of D registers
1861 - { } can be omitted from around a singleton register list
1862 FIXME: This is not implemented, as it would require backtracking in
1865 This could be done (the meaning isn't really ambiguous), but doesn't
1866 fit in well with the current parsing framework.
1867 - 32 D registers may be used (also true for VFPv3).
1868 FIXME: Types are ignored in these register lists, which is probably a
1872 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1877 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1881 unsigned long mask
= 0;
1884 if (skip_past_char (&str
, '{') == FAIL
)
1886 inst
.error
= _("expecting {");
1893 regtype
= REG_TYPE_VFS
;
1898 regtype
= REG_TYPE_VFD
;
1901 case REGLIST_NEON_D
:
1902 regtype
= REG_TYPE_NDQ
;
1909 if (etype
!= REGLIST_VFP_S
)
1911 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1912 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1916 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1919 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1926 base_reg
= max_regs
;
1930 int setmask
= 1, addregs
= 1;
1932 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1934 if (new_base
== FAIL
)
1936 first_error (_(reg_expected_msgs
[regtype
]));
1940 if (new_base
>= max_regs
)
1942 first_error (_("register out of range in list"));
1946 /* Note: a value of 2 * n is returned for the register Q<n>. */
1947 if (regtype
== REG_TYPE_NQ
)
1953 if (new_base
< base_reg
)
1954 base_reg
= new_base
;
1956 if (mask
& (setmask
<< new_base
))
1958 first_error (_("invalid register list"));
1962 if ((mask
>> new_base
) != 0 && ! warned
)
1964 as_tsktsk (_("register list not in ascending order"));
1968 mask
|= setmask
<< new_base
;
1971 if (*str
== '-') /* We have the start of a range expression */
1977 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1980 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1984 if (high_range
>= max_regs
)
1986 first_error (_("register out of range in list"));
1990 if (regtype
== REG_TYPE_NQ
)
1991 high_range
= high_range
+ 1;
1993 if (high_range
<= new_base
)
1995 inst
.error
= _("register range not in ascending order");
1999 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2001 if (mask
& (setmask
<< new_base
))
2003 inst
.error
= _("invalid register list");
2007 mask
|= setmask
<< new_base
;
2012 while (skip_past_comma (&str
) != FAIL
);
2016 /* Sanity check -- should have raised a parse error above. */
2017 if (count
== 0 || count
> max_regs
)
2022 /* Final test -- the registers must be consecutive. */
2024 for (i
= 0; i
< count
; i
++)
2026 if ((mask
& (1u << i
)) == 0)
2028 inst
.error
= _("non-contiguous register range");
2038 /* True if two alias types are the same. */
2041 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2049 if (a
->defined
!= b
->defined
)
2052 if ((a
->defined
& NTA_HASTYPE
) != 0
2053 && (a
->eltype
.type
!= b
->eltype
.type
2054 || a
->eltype
.size
!= b
->eltype
.size
))
2057 if ((a
->defined
& NTA_HASINDEX
) != 0
2058 && (a
->index
!= b
->index
))
2064 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2065 The base register is put in *PBASE.
2066 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2068 The register stride (minus one) is put in bit 4 of the return value.
2069 Bits [6:5] encode the list length (minus one).
2070 The type of the list elements is put in *ELTYPE, if non-NULL. */
2072 #define NEON_LANE(X) ((X) & 0xf)
2073 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2074 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2077 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2078 struct neon_type_el
*eltype
)
2085 int leading_brace
= 0;
2086 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2087 const char *const incr_error
= _("register stride must be 1 or 2");
2088 const char *const type_error
= _("mismatched element/structure types in list");
2089 struct neon_typed_alias firsttype
;
2090 firsttype
.defined
= 0;
2091 firsttype
.eltype
.type
= NT_invtype
;
2092 firsttype
.eltype
.size
= -1;
2093 firsttype
.index
= -1;
2095 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2100 struct neon_typed_alias atype
;
2101 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2105 first_error (_(reg_expected_msgs
[rtype
]));
2112 if (rtype
== REG_TYPE_NQ
)
2118 else if (reg_incr
== -1)
2120 reg_incr
= getreg
- base_reg
;
2121 if (reg_incr
< 1 || reg_incr
> 2)
2123 first_error (_(incr_error
));
2127 else if (getreg
!= base_reg
+ reg_incr
* count
)
2129 first_error (_(incr_error
));
2133 if (! neon_alias_types_same (&atype
, &firsttype
))
2135 first_error (_(type_error
));
2139 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2143 struct neon_typed_alias htype
;
2144 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2146 lane
= NEON_INTERLEAVE_LANES
;
2147 else if (lane
!= NEON_INTERLEAVE_LANES
)
2149 first_error (_(type_error
));
2154 else if (reg_incr
!= 1)
2156 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2160 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2163 first_error (_(reg_expected_msgs
[rtype
]));
2166 if (! neon_alias_types_same (&htype
, &firsttype
))
2168 first_error (_(type_error
));
2171 count
+= hireg
+ dregs
- getreg
;
2175 /* If we're using Q registers, we can't use [] or [n] syntax. */
2176 if (rtype
== REG_TYPE_NQ
)
2182 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2186 else if (lane
!= atype
.index
)
2188 first_error (_(type_error
));
2192 else if (lane
== -1)
2193 lane
= NEON_INTERLEAVE_LANES
;
2194 else if (lane
!= NEON_INTERLEAVE_LANES
)
2196 first_error (_(type_error
));
2201 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2203 /* No lane set by [x]. We must be interleaving structures. */
2205 lane
= NEON_INTERLEAVE_LANES
;
2208 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2209 || (count
> 1 && reg_incr
== -1))
2211 first_error (_("error parsing element/structure list"));
2215 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2217 first_error (_("expected }"));
2225 *eltype
= firsttype
.eltype
;
2230 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2233 /* Parse an explicit relocation suffix on an expression. This is
2234 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2235 arm_reloc_hsh contains no entries, so this function can only
2236 succeed if there is no () after the word. Returns -1 on error,
2237 BFD_RELOC_UNUSED if there wasn't any suffix. */
2240 parse_reloc (char **str
)
2242 struct reloc_entry
*r
;
2246 return BFD_RELOC_UNUSED
;
2251 while (*q
&& *q
!= ')' && *q
!= ',')
2256 if ((r
= (struct reloc_entry
*)
2257 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2264 /* Directives: register aliases. */
2266 static struct reg_entry
*
2267 insert_reg_alias (char *str
, unsigned number
, int type
)
2269 struct reg_entry
*new_reg
;
2272 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2274 if (new_reg
->builtin
)
2275 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2277 /* Only warn about a redefinition if it's not defined as the
2279 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2280 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2285 name
= xstrdup (str
);
2286 new_reg
= XNEW (struct reg_entry
);
2288 new_reg
->name
= name
;
2289 new_reg
->number
= number
;
2290 new_reg
->type
= type
;
2291 new_reg
->builtin
= FALSE
;
2292 new_reg
->neon
= NULL
;
2294 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2301 insert_neon_reg_alias (char *str
, int number
, int type
,
2302 struct neon_typed_alias
*atype
)
2304 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2308 first_error (_("attempt to redefine typed alias"));
2314 reg
->neon
= XNEW (struct neon_typed_alias
);
2315 *reg
->neon
= *atype
;
2319 /* Look for the .req directive. This is of the form:
2321 new_register_name .req existing_register_name
2323 If we find one, or if it looks sufficiently like one that we want to
2324 handle any error here, return TRUE. Otherwise return FALSE. */
2327 create_register_alias (char * newname
, char *p
)
2329 struct reg_entry
*old
;
2330 char *oldname
, *nbuf
;
2333 /* The input scrubber ensures that whitespace after the mnemonic is
2334 collapsed to single spaces. */
2336 if (strncmp (oldname
, " .req ", 6) != 0)
2340 if (*oldname
== '\0')
2343 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2346 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2350 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2351 the desired alias name, and p points to its end. If not, then
2352 the desired alias name is in the global original_case_string. */
2353 #ifdef TC_CASE_SENSITIVE
2356 newname
= original_case_string
;
2357 nlen
= strlen (newname
);
2360 nbuf
= xmemdup0 (newname
, nlen
);
2362 /* Create aliases under the new name as stated; an all-lowercase
2363 version of the new name; and an all-uppercase version of the new
2365 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2367 for (p
= nbuf
; *p
; p
++)
2370 if (strncmp (nbuf
, newname
, nlen
))
2372 /* If this attempt to create an additional alias fails, do not bother
2373 trying to create the all-lower case alias. We will fail and issue
2374 a second, duplicate error message. This situation arises when the
2375 programmer does something like:
2378 The second .req creates the "Foo" alias but then fails to create
2379 the artificial FOO alias because it has already been created by the
2381 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2388 for (p
= nbuf
; *p
; p
++)
2391 if (strncmp (nbuf
, newname
, nlen
))
2392 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2399 /* Create a Neon typed/indexed register alias using directives, e.g.:
2404 These typed registers can be used instead of the types specified after the
2405 Neon mnemonic, so long as all operands given have types. Types can also be
2406 specified directly, e.g.:
2407 vadd d0.s32, d1.s32, d2.s32 */
2410 create_neon_reg_alias (char *newname
, char *p
)
2412 enum arm_reg_type basetype
;
2413 struct reg_entry
*basereg
;
2414 struct reg_entry mybasereg
;
2415 struct neon_type ntype
;
2416 struct neon_typed_alias typeinfo
;
2417 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2420 typeinfo
.defined
= 0;
2421 typeinfo
.eltype
.type
= NT_invtype
;
2422 typeinfo
.eltype
.size
= -1;
2423 typeinfo
.index
= -1;
2427 if (strncmp (p
, " .dn ", 5) == 0)
2428 basetype
= REG_TYPE_VFD
;
2429 else if (strncmp (p
, " .qn ", 5) == 0)
2430 basetype
= REG_TYPE_NQ
;
2439 basereg
= arm_reg_parse_multi (&p
);
2441 if (basereg
&& basereg
->type
!= basetype
)
2443 as_bad (_("bad type for register"));
2447 if (basereg
== NULL
)
2450 /* Try parsing as an integer. */
2451 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2452 if (exp
.X_op
!= O_constant
)
2454 as_bad (_("expression must be constant"));
2457 basereg
= &mybasereg
;
2458 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2464 typeinfo
= *basereg
->neon
;
2466 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2468 /* We got a type. */
2469 if (typeinfo
.defined
& NTA_HASTYPE
)
2471 as_bad (_("can't redefine the type of a register alias"));
2475 typeinfo
.defined
|= NTA_HASTYPE
;
2476 if (ntype
.elems
!= 1)
2478 as_bad (_("you must specify a single type only"));
2481 typeinfo
.eltype
= ntype
.el
[0];
2484 if (skip_past_char (&p
, '[') == SUCCESS
)
2487 /* We got a scalar index. */
2489 if (typeinfo
.defined
& NTA_HASINDEX
)
2491 as_bad (_("can't redefine the index of a scalar alias"));
2495 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2497 if (exp
.X_op
!= O_constant
)
2499 as_bad (_("scalar index must be constant"));
2503 typeinfo
.defined
|= NTA_HASINDEX
;
2504 typeinfo
.index
= exp
.X_add_number
;
2506 if (skip_past_char (&p
, ']') == FAIL
)
2508 as_bad (_("expecting ]"));
2513 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2514 the desired alias name, and p points to its end. If not, then
2515 the desired alias name is in the global original_case_string. */
2516 #ifdef TC_CASE_SENSITIVE
2517 namelen
= nameend
- newname
;
2519 newname
= original_case_string
;
2520 namelen
= strlen (newname
);
2523 namebuf
= xmemdup0 (newname
, namelen
);
2525 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2526 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2528 /* Insert name in all uppercase. */
2529 for (p
= namebuf
; *p
; p
++)
2532 if (strncmp (namebuf
, newname
, namelen
))
2533 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2534 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2536 /* Insert name in all lowercase. */
2537 for (p
= namebuf
; *p
; p
++)
2540 if (strncmp (namebuf
, newname
, namelen
))
2541 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2542 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2548 /* Should never be called, as .req goes between the alias and the
2549 register name, not at the beginning of the line. */
2552 s_req (int a ATTRIBUTE_UNUSED
)
2554 as_bad (_("invalid syntax for .req directive"));
2558 s_dn (int a ATTRIBUTE_UNUSED
)
2560 as_bad (_("invalid syntax for .dn directive"));
2564 s_qn (int a ATTRIBUTE_UNUSED
)
2566 as_bad (_("invalid syntax for .qn directive"));
2569 /* The .unreq directive deletes an alias which was previously defined
2570 by .req. For example:
2576 s_unreq (int a ATTRIBUTE_UNUSED
)
2581 name
= input_line_pointer
;
2583 while (*input_line_pointer
!= 0
2584 && *input_line_pointer
!= ' '
2585 && *input_line_pointer
!= '\n')
2586 ++input_line_pointer
;
2588 saved_char
= *input_line_pointer
;
2589 *input_line_pointer
= 0;
2592 as_bad (_("invalid syntax for .unreq directive"));
2595 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2599 as_bad (_("unknown register alias '%s'"), name
);
2600 else if (reg
->builtin
)
2601 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2608 hash_delete (arm_reg_hsh
, name
, FALSE
);
2609 free ((char *) reg
->name
);
2614 /* Also locate the all upper case and all lower case versions.
2615 Do not complain if we cannot find one or the other as it
2616 was probably deleted above. */
2618 nbuf
= strdup (name
);
2619 for (p
= nbuf
; *p
; p
++)
2621 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2624 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2625 free ((char *) reg
->name
);
2631 for (p
= nbuf
; *p
; p
++)
2633 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2636 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2637 free ((char *) reg
->name
);
2647 *input_line_pointer
= saved_char
;
2648 demand_empty_rest_of_line ();
2651 /* Directives: Instruction set selection. */
2654 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2655 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2656 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2657 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2659 /* Create a new mapping symbol for the transition to STATE. */
2662 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2665 const char * symname
;
2672 type
= BSF_NO_FLAGS
;
2676 type
= BSF_NO_FLAGS
;
2680 type
= BSF_NO_FLAGS
;
2686 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2687 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2692 THUMB_SET_FUNC (symbolP
, 0);
2693 ARM_SET_THUMB (symbolP
, 0);
2694 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2698 THUMB_SET_FUNC (symbolP
, 1);
2699 ARM_SET_THUMB (symbolP
, 1);
2700 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2708 /* Save the mapping symbols for future reference. Also check that
2709 we do not place two mapping symbols at the same offset within a
2710 frag. We'll handle overlap between frags in
2711 check_mapping_symbols.
2713 If .fill or other data filling directive generates zero sized data,
2714 the mapping symbol for the following code will have the same value
2715 as the one generated for the data filling directive. In this case,
2716 we replace the old symbol with the new one at the same address. */
2719 if (frag
->tc_frag_data
.first_map
!= NULL
)
2721 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2722 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2724 frag
->tc_frag_data
.first_map
= symbolP
;
2726 if (frag
->tc_frag_data
.last_map
!= NULL
)
2728 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2729 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2730 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2732 frag
->tc_frag_data
.last_map
= symbolP
;
2735 /* We must sometimes convert a region marked as code to data during
2736 code alignment, if an odd number of bytes have to be padded. The
2737 code mapping symbol is pushed to an aligned address. */
2740 insert_data_mapping_symbol (enum mstate state
,
2741 valueT value
, fragS
*frag
, offsetT bytes
)
2743 /* If there was already a mapping symbol, remove it. */
2744 if (frag
->tc_frag_data
.last_map
!= NULL
2745 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2747 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2751 know (frag
->tc_frag_data
.first_map
== symp
);
2752 frag
->tc_frag_data
.first_map
= NULL
;
2754 frag
->tc_frag_data
.last_map
= NULL
;
2755 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2758 make_mapping_symbol (MAP_DATA
, value
, frag
);
2759 make_mapping_symbol (state
, value
+ bytes
, frag
);
2762 static void mapping_state_2 (enum mstate state
, int max_chars
);
2764 /* Set the mapping state to STATE. Only call this when about to
2765 emit some STATE bytes to the file. */
2767 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2769 mapping_state (enum mstate state
)
2771 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2773 if (mapstate
== state
)
2774 /* The mapping symbol has already been emitted.
2775 There is nothing else to do. */
2778 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2780 All ARM instructions require 4-byte alignment.
2781 (Almost) all Thumb instructions require 2-byte alignment.
2783 When emitting instructions into any section, mark the section
2786 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2787 but themselves require 2-byte alignment; this applies to some
2788 PC- relative forms. However, these cases will involve implicit
2789 literal pool generation or an explicit .align >=2, both of
2790 which will cause the section to me marked with sufficient
2791 alignment. Thus, we don't handle those cases here. */
2792 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2794 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2795 /* This case will be evaluated later. */
2798 mapping_state_2 (state
, 0);
2801 /* Same as mapping_state, but MAX_CHARS bytes have already been
2802 allocated. Put the mapping symbol that far back. */
2805 mapping_state_2 (enum mstate state
, int max_chars
)
2807 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2809 if (!SEG_NORMAL (now_seg
))
2812 if (mapstate
== state
)
2813 /* The mapping symbol has already been emitted.
2814 There is nothing else to do. */
2817 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2818 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2820 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2821 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2824 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2827 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2828 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2832 #define mapping_state(x) ((void)0)
2833 #define mapping_state_2(x, y) ((void)0)
2836 /* Find the real, Thumb encoded start of a Thumb function. */
2840 find_real_start (symbolS
* symbolP
)
2843 const char * name
= S_GET_NAME (symbolP
);
2844 symbolS
* new_target
;
2846 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2847 #define STUB_NAME ".real_start_of"
2852 /* The compiler may generate BL instructions to local labels because
2853 it needs to perform a branch to a far away location. These labels
2854 do not have a corresponding ".real_start_of" label. We check
2855 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2856 the ".real_start_of" convention for nonlocal branches. */
2857 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2860 real_start
= concat (STUB_NAME
, name
, NULL
);
2861 new_target
= symbol_find (real_start
);
2864 if (new_target
== NULL
)
2866 as_warn (_("Failed to find real start of function: %s\n"), name
);
2867 new_target
= symbolP
;
2875 opcode_select (int width
)
2882 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2883 as_bad (_("selected processor does not support THUMB opcodes"));
2886 /* No need to force the alignment, since we will have been
2887 coming from ARM mode, which is word-aligned. */
2888 record_alignment (now_seg
, 1);
2895 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2896 as_bad (_("selected processor does not support ARM opcodes"));
2901 frag_align (2, 0, 0);
2903 record_alignment (now_seg
, 1);
2908 as_bad (_("invalid instruction size selected (%d)"), width
);
2913 s_arm (int ignore ATTRIBUTE_UNUSED
)
2916 demand_empty_rest_of_line ();
2920 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2923 demand_empty_rest_of_line ();
2927 s_code (int unused ATTRIBUTE_UNUSED
)
2931 temp
= get_absolute_expression ();
2936 opcode_select (temp
);
2940 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2945 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2947 /* If we are not already in thumb mode go into it, EVEN if
2948 the target processor does not support thumb instructions.
2949 This is used by gcc/config/arm/lib1funcs.asm for example
2950 to compile interworking support functions even if the
2951 target processor should not support interworking. */
2955 record_alignment (now_seg
, 1);
2958 demand_empty_rest_of_line ();
2962 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2966 /* The following label is the name/address of the start of a Thumb function.
2967 We need to know this for the interworking support. */
2968 label_is_thumb_function_name
= TRUE
;
2971 /* Perform a .set directive, but also mark the alias as
2972 being a thumb function. */
2975 s_thumb_set (int equiv
)
2977 /* XXX the following is a duplicate of the code for s_set() in read.c
2978 We cannot just call that code as we need to get at the symbol that
2985 /* Especial apologies for the random logic:
2986 This just grew, and could be parsed much more simply!
2988 delim
= get_symbol_name (& name
);
2989 end_name
= input_line_pointer
;
2990 (void) restore_line_pointer (delim
);
2992 if (*input_line_pointer
!= ',')
2995 as_bad (_("expected comma after name \"%s\""), name
);
2997 ignore_rest_of_line ();
3001 input_line_pointer
++;
3004 if (name
[0] == '.' && name
[1] == '\0')
3006 /* XXX - this should not happen to .thumb_set. */
3010 if ((symbolP
= symbol_find (name
)) == NULL
3011 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3014 /* When doing symbol listings, play games with dummy fragments living
3015 outside the normal fragment chain to record the file and line info
3017 if (listing
& LISTING_SYMBOLS
)
3019 extern struct list_info_struct
* listing_tail
;
3020 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3022 memset (dummy_frag
, 0, sizeof (fragS
));
3023 dummy_frag
->fr_type
= rs_fill
;
3024 dummy_frag
->line
= listing_tail
;
3025 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
3026 dummy_frag
->fr_symbol
= symbolP
;
3030 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
3033 /* "set" symbols are local unless otherwise specified. */
3034 SF_SET_LOCAL (symbolP
);
3035 #endif /* OBJ_COFF */
3036 } /* Make a new symbol. */
3038 symbol_table_insert (symbolP
);
3043 && S_IS_DEFINED (symbolP
)
3044 && S_GET_SEGMENT (symbolP
) != reg_section
)
3045 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3047 pseudo_set (symbolP
);
3049 demand_empty_rest_of_line ();
3051 /* XXX Now we come to the Thumb specific bit of code. */
3053 THUMB_SET_FUNC (symbolP
, 1);
3054 ARM_SET_THUMB (symbolP
, 1);
3055 #if defined OBJ_ELF || defined OBJ_COFF
3056 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3060 /* Directives: Mode selection. */
3062 /* .syntax [unified|divided] - choose the new unified syntax
3063 (same for Arm and Thumb encoding, modulo slight differences in what
3064 can be represented) or the old divergent syntax for each mode. */
3066 s_syntax (int unused ATTRIBUTE_UNUSED
)
3070 delim
= get_symbol_name (& name
);
3072 if (!strcasecmp (name
, "unified"))
3073 unified_syntax
= TRUE
;
3074 else if (!strcasecmp (name
, "divided"))
3075 unified_syntax
= FALSE
;
3078 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3081 (void) restore_line_pointer (delim
);
3082 demand_empty_rest_of_line ();
3085 /* Directives: sectioning and alignment. */
3088 s_bss (int ignore ATTRIBUTE_UNUSED
)
3090 /* We don't support putting frags in the BSS segment, we fake it by
3091 marking in_bss, then looking at s_skip for clues. */
3092 subseg_set (bss_section
, 0);
3093 demand_empty_rest_of_line ();
3095 #ifdef md_elf_section_change_hook
3096 md_elf_section_change_hook ();
3101 s_even (int ignore ATTRIBUTE_UNUSED
)
3103 /* Never make frag if expect extra pass. */
3105 frag_align (1, 0, 0);
3107 record_alignment (now_seg
, 1);
3109 demand_empty_rest_of_line ();
3112 /* Directives: CodeComposer Studio. */
3114 /* .ref (for CodeComposer Studio syntax only). */
3116 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3118 if (codecomposer_syntax
)
3119 ignore_rest_of_line ();
3121 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3124 /* If name is not NULL, then it is used for marking the beginning of a
3125 function, whereas if it is NULL then it means the function end. */
3127 asmfunc_debug (const char * name
)
3129 static const char * last_name
= NULL
;
3133 gas_assert (last_name
== NULL
);
3136 if (debug_type
== DEBUG_STABS
)
3137 stabs_generate_asm_func (name
, name
);
3141 gas_assert (last_name
!= NULL
);
3143 if (debug_type
== DEBUG_STABS
)
3144 stabs_generate_asm_endfunc (last_name
, last_name
);
3151 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3153 if (codecomposer_syntax
)
3155 switch (asmfunc_state
)
3157 case OUTSIDE_ASMFUNC
:
3158 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3161 case WAITING_ASMFUNC_NAME
:
3162 as_bad (_(".asmfunc repeated."));
3165 case WAITING_ENDASMFUNC
:
3166 as_bad (_(".asmfunc without function."));
3169 demand_empty_rest_of_line ();
3172 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3176 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3178 if (codecomposer_syntax
)
3180 switch (asmfunc_state
)
3182 case OUTSIDE_ASMFUNC
:
3183 as_bad (_(".endasmfunc without a .asmfunc."));
3186 case WAITING_ASMFUNC_NAME
:
3187 as_bad (_(".endasmfunc without function."));
3190 case WAITING_ENDASMFUNC
:
3191 asmfunc_state
= OUTSIDE_ASMFUNC
;
3192 asmfunc_debug (NULL
);
3195 demand_empty_rest_of_line ();
3198 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3202 s_ccs_def (int name
)
3204 if (codecomposer_syntax
)
3207 as_bad (_(".def pseudo-op only available with -mccs flag."));
3210 /* Directives: Literal pools. */
3212 static literal_pool
*
3213 find_literal_pool (void)
3215 literal_pool
* pool
;
3217 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3219 if (pool
->section
== now_seg
3220 && pool
->sub_section
== now_subseg
)
3227 static literal_pool
*
3228 find_or_make_literal_pool (void)
3230 /* Next literal pool ID number. */
3231 static unsigned int latest_pool_num
= 1;
3232 literal_pool
* pool
;
3234 pool
= find_literal_pool ();
3238 /* Create a new pool. */
3239 pool
= XNEW (literal_pool
);
3243 pool
->next_free_entry
= 0;
3244 pool
->section
= now_seg
;
3245 pool
->sub_section
= now_subseg
;
3246 pool
->next
= list_of_pools
;
3247 pool
->symbol
= NULL
;
3248 pool
->alignment
= 2;
3250 /* Add it to the list. */
3251 list_of_pools
= pool
;
3254 /* New pools, and emptied pools, will have a NULL symbol. */
3255 if (pool
->symbol
== NULL
)
3257 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3258 (valueT
) 0, &zero_address_frag
);
3259 pool
->id
= latest_pool_num
++;
3266 /* Add the literal in the global 'inst'
3267 structure to the relevant literal pool. */
3270 add_to_lit_pool (unsigned int nbytes
)
3272 #define PADDING_SLOT 0x1
3273 #define LIT_ENTRY_SIZE_MASK 0xFF
3274 literal_pool
* pool
;
3275 unsigned int entry
, pool_size
= 0;
3276 bfd_boolean padding_slot_p
= FALSE
;
3282 imm1
= inst
.operands
[1].imm
;
3283 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3284 : inst
.relocs
[0].exp
.X_unsigned
? 0
3285 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3286 if (target_big_endian
)
3289 imm2
= inst
.operands
[1].imm
;
3293 pool
= find_or_make_literal_pool ();
3295 /* Check if this literal value is already in the pool. */
3296 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3300 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3301 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3302 && (pool
->literals
[entry
].X_add_number
3303 == inst
.relocs
[0].exp
.X_add_number
)
3304 && (pool
->literals
[entry
].X_md
== nbytes
)
3305 && (pool
->literals
[entry
].X_unsigned
3306 == inst
.relocs
[0].exp
.X_unsigned
))
3309 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3310 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3311 && (pool
->literals
[entry
].X_add_number
3312 == inst
.relocs
[0].exp
.X_add_number
)
3313 && (pool
->literals
[entry
].X_add_symbol
3314 == inst
.relocs
[0].exp
.X_add_symbol
)
3315 && (pool
->literals
[entry
].X_op_symbol
3316 == inst
.relocs
[0].exp
.X_op_symbol
)
3317 && (pool
->literals
[entry
].X_md
== nbytes
))
3320 else if ((nbytes
== 8)
3321 && !(pool_size
& 0x7)
3322 && ((entry
+ 1) != pool
->next_free_entry
)
3323 && (pool
->literals
[entry
].X_op
== O_constant
)
3324 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3325 && (pool
->literals
[entry
].X_unsigned
3326 == inst
.relocs
[0].exp
.X_unsigned
)
3327 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3328 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3329 && (pool
->literals
[entry
+ 1].X_unsigned
3330 == inst
.relocs
[0].exp
.X_unsigned
))
3333 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3334 if (padding_slot_p
&& (nbytes
== 4))
3340 /* Do we need to create a new entry? */
3341 if (entry
== pool
->next_free_entry
)
3343 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3345 inst
.error
= _("literal pool overflow");
3351 /* For 8-byte entries, we align to an 8-byte boundary,
3352 and split it into two 4-byte entries, because on 32-bit
3353 host, 8-byte constants are treated as big num, thus
3354 saved in "generic_bignum" which will be overwritten
3355 by later assignments.
3357 We also need to make sure there is enough space for
3360 We also check to make sure the literal operand is a
3362 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3363 || inst
.relocs
[0].exp
.X_op
== O_big
))
3365 inst
.error
= _("invalid type for literal pool");
3368 else if (pool_size
& 0x7)
3370 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3372 inst
.error
= _("literal pool overflow");
3376 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3377 pool
->literals
[entry
].X_op
= O_constant
;
3378 pool
->literals
[entry
].X_add_number
= 0;
3379 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3380 pool
->next_free_entry
+= 1;
3383 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3385 inst
.error
= _("literal pool overflow");
3389 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3390 pool
->literals
[entry
].X_op
= O_constant
;
3391 pool
->literals
[entry
].X_add_number
= imm1
;
3392 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3393 pool
->literals
[entry
++].X_md
= 4;
3394 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3395 pool
->literals
[entry
].X_op
= O_constant
;
3396 pool
->literals
[entry
].X_add_number
= imm2
;
3397 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3398 pool
->literals
[entry
].X_md
= 4;
3399 pool
->alignment
= 3;
3400 pool
->next_free_entry
+= 1;
3404 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3405 pool
->literals
[entry
].X_md
= 4;
3409 /* PR ld/12974: Record the location of the first source line to reference
3410 this entry in the literal pool. If it turns out during linking that the
3411 symbol does not exist we will be able to give an accurate line number for
3412 the (first use of the) missing reference. */
3413 if (debug_type
== DEBUG_DWARF2
)
3414 dwarf2_where (pool
->locs
+ entry
);
3416 pool
->next_free_entry
+= 1;
3418 else if (padding_slot_p
)
3420 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3421 pool
->literals
[entry
].X_md
= nbytes
;
3424 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3425 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3426 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3432 tc_start_label_without_colon (void)
3434 bfd_boolean ret
= TRUE
;
3436 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3438 const char *label
= input_line_pointer
;
3440 while (!is_end_of_line
[(int) label
[-1]])
3445 as_bad (_("Invalid label '%s'"), label
);
3449 asmfunc_debug (label
);
3451 asmfunc_state
= WAITING_ENDASMFUNC
;
3457 /* Can't use symbol_new here, so have to create a symbol and then at
3458 a later date assign it a value. That's what these functions do. */
3461 symbol_locate (symbolS
* symbolP
,
3462 const char * name
, /* It is copied, the caller can modify. */
3463 segT segment
, /* Segment identifier (SEG_<something>). */
3464 valueT valu
, /* Symbol value. */
3465 fragS
* frag
) /* Associated fragment. */
3468 char * preserved_copy_of_name
;
3470 name_length
= strlen (name
) + 1; /* +1 for \0. */
3471 obstack_grow (¬es
, name
, name_length
);
3472 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3474 #ifdef tc_canonicalize_symbol_name
3475 preserved_copy_of_name
=
3476 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3479 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3481 S_SET_SEGMENT (symbolP
, segment
);
3482 S_SET_VALUE (symbolP
, valu
);
3483 symbol_clear_list_pointers (symbolP
);
3485 symbol_set_frag (symbolP
, frag
);
3487 /* Link to end of symbol chain. */
3489 extern int symbol_table_frozen
;
3491 if (symbol_table_frozen
)
3495 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3497 obj_symbol_new_hook (symbolP
);
3499 #ifdef tc_symbol_new_hook
3500 tc_symbol_new_hook (symbolP
);
3504 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3505 #endif /* DEBUG_SYMS */
3509 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3512 literal_pool
* pool
;
3515 pool
= find_literal_pool ();
3517 || pool
->symbol
== NULL
3518 || pool
->next_free_entry
== 0)
3521 /* Align pool as you have word accesses.
3522 Only make a frag if we have to. */
3524 frag_align (pool
->alignment
, 0, 0);
3526 record_alignment (now_seg
, 2);
3529 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3530 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3532 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3534 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3535 (valueT
) frag_now_fix (), frag_now
);
3536 symbol_table_insert (pool
->symbol
);
3538 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3540 #if defined OBJ_COFF || defined OBJ_ELF
3541 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3544 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3547 if (debug_type
== DEBUG_DWARF2
)
3548 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3550 /* First output the expression in the instruction to the pool. */
3551 emit_expr (&(pool
->literals
[entry
]),
3552 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3555 /* Mark the pool as empty. */
3556 pool
->next_free_entry
= 0;
3557 pool
->symbol
= NULL
;
3561 /* Forward declarations for functions below, in the MD interface
3563 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3564 static valueT
create_unwind_entry (int);
3565 static void start_unwind_section (const segT
, int);
3566 static void add_unwind_opcode (valueT
, int);
3567 static void flush_pending_unwind (void);
3569 /* Directives: Data. */
3572 s_arm_elf_cons (int nbytes
)
3576 #ifdef md_flush_pending_output
3577 md_flush_pending_output ();
3580 if (is_it_end_of_statement ())
3582 demand_empty_rest_of_line ();
3586 #ifdef md_cons_align
3587 md_cons_align (nbytes
);
3590 mapping_state (MAP_DATA
);
3594 char *base
= input_line_pointer
;
3598 if (exp
.X_op
!= O_symbol
)
3599 emit_expr (&exp
, (unsigned int) nbytes
);
3602 char *before_reloc
= input_line_pointer
;
3603 reloc
= parse_reloc (&input_line_pointer
);
3606 as_bad (_("unrecognized relocation suffix"));
3607 ignore_rest_of_line ();
3610 else if (reloc
== BFD_RELOC_UNUSED
)
3611 emit_expr (&exp
, (unsigned int) nbytes
);
3614 reloc_howto_type
*howto
= (reloc_howto_type
*)
3615 bfd_reloc_type_lookup (stdoutput
,
3616 (bfd_reloc_code_real_type
) reloc
);
3617 int size
= bfd_get_reloc_size (howto
);
3619 if (reloc
== BFD_RELOC_ARM_PLT32
)
3621 as_bad (_("(plt) is only valid on branch targets"));
3622 reloc
= BFD_RELOC_UNUSED
;
3627 as_bad (ngettext ("%s relocations do not fit in %d byte",
3628 "%s relocations do not fit in %d bytes",
3630 howto
->name
, nbytes
);
3633 /* We've parsed an expression stopping at O_symbol.
3634 But there may be more expression left now that we
3635 have parsed the relocation marker. Parse it again.
3636 XXX Surely there is a cleaner way to do this. */
3637 char *p
= input_line_pointer
;
3639 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3641 memcpy (save_buf
, base
, input_line_pointer
- base
);
3642 memmove (base
+ (input_line_pointer
- before_reloc
),
3643 base
, before_reloc
- base
);
3645 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3647 memcpy (base
, save_buf
, p
- base
);
3649 offset
= nbytes
- size
;
3650 p
= frag_more (nbytes
);
3651 memset (p
, 0, nbytes
);
3652 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3653 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3659 while (*input_line_pointer
++ == ',');
3661 /* Put terminator back into stream. */
3662 input_line_pointer
--;
3663 demand_empty_rest_of_line ();
3666 /* Emit an expression containing a 32-bit thumb instruction.
3667 Implementation based on put_thumb32_insn. */
3670 emit_thumb32_expr (expressionS
* exp
)
3672 expressionS exp_high
= *exp
;
3674 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3675 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3676 exp
->X_add_number
&= 0xffff;
3677 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3680 /* Guess the instruction size based on the opcode. */
3683 thumb_insn_size (int opcode
)
3685 if ((unsigned int) opcode
< 0xe800u
)
3687 else if ((unsigned int) opcode
>= 0xe8000000u
)
3694 emit_insn (expressionS
*exp
, int nbytes
)
3698 if (exp
->X_op
== O_constant
)
3703 size
= thumb_insn_size (exp
->X_add_number
);
3707 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3709 as_bad (_(".inst.n operand too big. "\
3710 "Use .inst.w instead"));
3715 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3716 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3718 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3720 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3721 emit_thumb32_expr (exp
);
3723 emit_expr (exp
, (unsigned int) size
);
3725 it_fsm_post_encode ();
3729 as_bad (_("cannot determine Thumb instruction size. " \
3730 "Use .inst.n/.inst.w instead"));
3733 as_bad (_("constant expression required"));
3738 /* Like s_arm_elf_cons but do not use md_cons_align and
3739 set the mapping state to MAP_ARM/MAP_THUMB. */
3742 s_arm_elf_inst (int nbytes
)
3744 if (is_it_end_of_statement ())
3746 demand_empty_rest_of_line ();
3750 /* Calling mapping_state () here will not change ARM/THUMB,
3751 but will ensure not to be in DATA state. */
3754 mapping_state (MAP_THUMB
);
3759 as_bad (_("width suffixes are invalid in ARM mode"));
3760 ignore_rest_of_line ();
3766 mapping_state (MAP_ARM
);
3775 if (! emit_insn (& exp
, nbytes
))
3777 ignore_rest_of_line ();
3781 while (*input_line_pointer
++ == ',');
3783 /* Put terminator back into stream. */
3784 input_line_pointer
--;
3785 demand_empty_rest_of_line ();
3788 /* Parse a .rel31 directive. */
3791 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3798 if (*input_line_pointer
== '1')
3799 highbit
= 0x80000000;
3800 else if (*input_line_pointer
!= '0')
3801 as_bad (_("expected 0 or 1"));
3803 input_line_pointer
++;
3804 if (*input_line_pointer
!= ',')
3805 as_bad (_("missing comma"));
3806 input_line_pointer
++;
3808 #ifdef md_flush_pending_output
3809 md_flush_pending_output ();
3812 #ifdef md_cons_align
3816 mapping_state (MAP_DATA
);
3821 md_number_to_chars (p
, highbit
, 4);
3822 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3823 BFD_RELOC_ARM_PREL31
);
3825 demand_empty_rest_of_line ();
3828 /* Directives: AEABI stack-unwind tables. */
3830 /* Parse an unwind_fnstart directive. Simply records the current location. */
3833 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3835 demand_empty_rest_of_line ();
3836 if (unwind
.proc_start
)
3838 as_bad (_("duplicate .fnstart directive"));
3842 /* Mark the start of the function. */
3843 unwind
.proc_start
= expr_build_dot ();
3845 /* Reset the rest of the unwind info. */
3846 unwind
.opcode_count
= 0;
3847 unwind
.table_entry
= NULL
;
3848 unwind
.personality_routine
= NULL
;
3849 unwind
.personality_index
= -1;
3850 unwind
.frame_size
= 0;
3851 unwind
.fp_offset
= 0;
3852 unwind
.fp_reg
= REG_SP
;
3854 unwind
.sp_restored
= 0;
3858 /* Parse a handlerdata directive. Creates the exception handling table entry
3859 for the function. */
3862 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3864 demand_empty_rest_of_line ();
3865 if (!unwind
.proc_start
)
3866 as_bad (MISSING_FNSTART
);
3868 if (unwind
.table_entry
)
3869 as_bad (_("duplicate .handlerdata directive"));
3871 create_unwind_entry (1);
3874 /* Parse an unwind_fnend directive. Generates the index table entry. */
3877 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3882 unsigned int marked_pr_dependency
;
3884 demand_empty_rest_of_line ();
3886 if (!unwind
.proc_start
)
3888 as_bad (_(".fnend directive without .fnstart"));
3892 /* Add eh table entry. */
3893 if (unwind
.table_entry
== NULL
)
3894 val
= create_unwind_entry (0);
3898 /* Add index table entry. This is two words. */
3899 start_unwind_section (unwind
.saved_seg
, 1);
3900 frag_align (2, 0, 0);
3901 record_alignment (now_seg
, 2);
3903 ptr
= frag_more (8);
3905 where
= frag_now_fix () - 8;
3907 /* Self relative offset of the function start. */
3908 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3909 BFD_RELOC_ARM_PREL31
);
3911 /* Indicate dependency on EHABI-defined personality routines to the
3912 linker, if it hasn't been done already. */
3913 marked_pr_dependency
3914 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3915 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3916 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3918 static const char *const name
[] =
3920 "__aeabi_unwind_cpp_pr0",
3921 "__aeabi_unwind_cpp_pr1",
3922 "__aeabi_unwind_cpp_pr2"
3924 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3925 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3926 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3927 |= 1 << unwind
.personality_index
;
3931 /* Inline exception table entry. */
3932 md_number_to_chars (ptr
+ 4, val
, 4);
3934 /* Self relative offset of the table entry. */
3935 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3936 BFD_RELOC_ARM_PREL31
);
3938 /* Restore the original section. */
3939 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3941 unwind
.proc_start
= NULL
;
3945 /* Parse an unwind_cantunwind directive. */
3948 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3950 demand_empty_rest_of_line ();
3951 if (!unwind
.proc_start
)
3952 as_bad (MISSING_FNSTART
);
3954 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3955 as_bad (_("personality routine specified for cantunwind frame"));
3957 unwind
.personality_index
= -2;
3961 /* Parse a personalityindex directive. */
3964 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3968 if (!unwind
.proc_start
)
3969 as_bad (MISSING_FNSTART
);
3971 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3972 as_bad (_("duplicate .personalityindex directive"));
3976 if (exp
.X_op
!= O_constant
3977 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3979 as_bad (_("bad personality routine number"));
3980 ignore_rest_of_line ();
3984 unwind
.personality_index
= exp
.X_add_number
;
3986 demand_empty_rest_of_line ();
3990 /* Parse a personality directive. */
3993 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3997 if (!unwind
.proc_start
)
3998 as_bad (MISSING_FNSTART
);
4000 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4001 as_bad (_("duplicate .personality directive"));
4003 c
= get_symbol_name (& name
);
4004 p
= input_line_pointer
;
4006 ++ input_line_pointer
;
4007 unwind
.personality_routine
= symbol_find_or_make (name
);
4009 demand_empty_rest_of_line ();
4013 /* Parse a directive saving core registers. */
4016 s_arm_unwind_save_core (void)
4022 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4025 as_bad (_("expected register list"));
4026 ignore_rest_of_line ();
4030 demand_empty_rest_of_line ();
4032 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4033 into .unwind_save {..., sp...}. We aren't bothered about the value of
4034 ip because it is clobbered by calls. */
4035 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4036 && (range
& 0x3000) == 0x1000)
4038 unwind
.opcode_count
--;
4039 unwind
.sp_restored
= 0;
4040 range
= (range
| 0x2000) & ~0x1000;
4041 unwind
.pending_offset
= 0;
4047 /* See if we can use the short opcodes. These pop a block of up to 8
4048 registers starting with r4, plus maybe r14. */
4049 for (n
= 0; n
< 8; n
++)
4051 /* Break at the first non-saved register. */
4052 if ((range
& (1 << (n
+ 4))) == 0)
4055 /* See if there are any other bits set. */
4056 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4058 /* Use the long form. */
4059 op
= 0x8000 | ((range
>> 4) & 0xfff);
4060 add_unwind_opcode (op
, 2);
4064 /* Use the short form. */
4066 op
= 0xa8; /* Pop r14. */
4068 op
= 0xa0; /* Do not pop r14. */
4070 add_unwind_opcode (op
, 1);
4077 op
= 0xb100 | (range
& 0xf);
4078 add_unwind_opcode (op
, 2);
4081 /* Record the number of bytes pushed. */
4082 for (n
= 0; n
< 16; n
++)
4084 if (range
& (1 << n
))
4085 unwind
.frame_size
+= 4;
4090 /* Parse a directive saving FPA registers. */
4093 s_arm_unwind_save_fpa (int reg
)
4099 /* Get Number of registers to transfer. */
4100 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4103 exp
.X_op
= O_illegal
;
4105 if (exp
.X_op
!= O_constant
)
4107 as_bad (_("expected , <constant>"));
4108 ignore_rest_of_line ();
4112 num_regs
= exp
.X_add_number
;
4114 if (num_regs
< 1 || num_regs
> 4)
4116 as_bad (_("number of registers must be in the range [1:4]"));
4117 ignore_rest_of_line ();
4121 demand_empty_rest_of_line ();
4126 op
= 0xb4 | (num_regs
- 1);
4127 add_unwind_opcode (op
, 1);
4132 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4133 add_unwind_opcode (op
, 2);
4135 unwind
.frame_size
+= num_regs
* 12;
4139 /* Parse a directive saving VFP registers for ARMv6 and above. */
4142 s_arm_unwind_save_vfp_armv6 (void)
4147 int num_vfpv3_regs
= 0;
4148 int num_regs_below_16
;
4150 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4153 as_bad (_("expected register list"));
4154 ignore_rest_of_line ();
4158 demand_empty_rest_of_line ();
4160 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4161 than FSTMX/FLDMX-style ones). */
4163 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4165 num_vfpv3_regs
= count
;
4166 else if (start
+ count
> 16)
4167 num_vfpv3_regs
= start
+ count
- 16;
4169 if (num_vfpv3_regs
> 0)
4171 int start_offset
= start
> 16 ? start
- 16 : 0;
4172 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4173 add_unwind_opcode (op
, 2);
4176 /* Generate opcode for registers numbered in the range 0 .. 15. */
4177 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4178 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4179 if (num_regs_below_16
> 0)
4181 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4182 add_unwind_opcode (op
, 2);
4185 unwind
.frame_size
+= count
* 8;
4189 /* Parse a directive saving VFP registers for pre-ARMv6. */
4192 s_arm_unwind_save_vfp (void)
4198 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4201 as_bad (_("expected register list"));
4202 ignore_rest_of_line ();
4206 demand_empty_rest_of_line ();
4211 op
= 0xb8 | (count
- 1);
4212 add_unwind_opcode (op
, 1);
4217 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4218 add_unwind_opcode (op
, 2);
4220 unwind
.frame_size
+= count
* 8 + 4;
4224 /* Parse a directive saving iWMMXt data registers. */
4227 s_arm_unwind_save_mmxwr (void)
4235 if (*input_line_pointer
== '{')
4236 input_line_pointer
++;
4240 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4244 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4249 as_tsktsk (_("register list not in ascending order"));
4252 if (*input_line_pointer
== '-')
4254 input_line_pointer
++;
4255 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4258 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4261 else if (reg
>= hi_reg
)
4263 as_bad (_("bad register range"));
4266 for (; reg
< hi_reg
; reg
++)
4270 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4272 skip_past_char (&input_line_pointer
, '}');
4274 demand_empty_rest_of_line ();
4276 /* Generate any deferred opcodes because we're going to be looking at
4278 flush_pending_unwind ();
4280 for (i
= 0; i
< 16; i
++)
4282 if (mask
& (1 << i
))
4283 unwind
.frame_size
+= 8;
4286 /* Attempt to combine with a previous opcode. We do this because gcc
4287 likes to output separate unwind directives for a single block of
4289 if (unwind
.opcode_count
> 0)
4291 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4292 if ((i
& 0xf8) == 0xc0)
4295 /* Only merge if the blocks are contiguous. */
4298 if ((mask
& 0xfe00) == (1 << 9))
4300 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4301 unwind
.opcode_count
--;
4304 else if (i
== 6 && unwind
.opcode_count
>= 2)
4306 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4310 op
= 0xffff << (reg
- 1);
4312 && ((mask
& op
) == (1u << (reg
- 1))))
4314 op
= (1 << (reg
+ i
+ 1)) - 1;
4315 op
&= ~((1 << reg
) - 1);
4317 unwind
.opcode_count
-= 2;
4324 /* We want to generate opcodes in the order the registers have been
4325 saved, ie. descending order. */
4326 for (reg
= 15; reg
>= -1; reg
--)
4328 /* Save registers in blocks. */
4330 || !(mask
& (1 << reg
)))
4332 /* We found an unsaved reg. Generate opcodes to save the
4339 op
= 0xc0 | (hi_reg
- 10);
4340 add_unwind_opcode (op
, 1);
4345 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4346 add_unwind_opcode (op
, 2);
4355 ignore_rest_of_line ();
4359 s_arm_unwind_save_mmxwcg (void)
4366 if (*input_line_pointer
== '{')
4367 input_line_pointer
++;
4369 skip_whitespace (input_line_pointer
);
4373 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4377 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4383 as_tsktsk (_("register list not in ascending order"));
4386 if (*input_line_pointer
== '-')
4388 input_line_pointer
++;
4389 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4392 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4395 else if (reg
>= hi_reg
)
4397 as_bad (_("bad register range"));
4400 for (; reg
< hi_reg
; reg
++)
4404 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4406 skip_past_char (&input_line_pointer
, '}');
4408 demand_empty_rest_of_line ();
4410 /* Generate any deferred opcodes because we're going to be looking at
4412 flush_pending_unwind ();
4414 for (reg
= 0; reg
< 16; reg
++)
4416 if (mask
& (1 << reg
))
4417 unwind
.frame_size
+= 4;
4420 add_unwind_opcode (op
, 2);
4423 ignore_rest_of_line ();
4427 /* Parse an unwind_save directive.
4428 If the argument is non-zero, this is a .vsave directive. */
4431 s_arm_unwind_save (int arch_v6
)
4434 struct reg_entry
*reg
;
4435 bfd_boolean had_brace
= FALSE
;
4437 if (!unwind
.proc_start
)
4438 as_bad (MISSING_FNSTART
);
4440 /* Figure out what sort of save we have. */
4441 peek
= input_line_pointer
;
4449 reg
= arm_reg_parse_multi (&peek
);
4453 as_bad (_("register expected"));
4454 ignore_rest_of_line ();
4463 as_bad (_("FPA .unwind_save does not take a register list"));
4464 ignore_rest_of_line ();
4467 input_line_pointer
= peek
;
4468 s_arm_unwind_save_fpa (reg
->number
);
4472 s_arm_unwind_save_core ();
4477 s_arm_unwind_save_vfp_armv6 ();
4479 s_arm_unwind_save_vfp ();
4482 case REG_TYPE_MMXWR
:
4483 s_arm_unwind_save_mmxwr ();
4486 case REG_TYPE_MMXWCG
:
4487 s_arm_unwind_save_mmxwcg ();
4491 as_bad (_(".unwind_save does not support this kind of register"));
4492 ignore_rest_of_line ();
4497 /* Parse an unwind_movsp directive. */
4500 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4506 if (!unwind
.proc_start
)
4507 as_bad (MISSING_FNSTART
);
4509 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4512 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4513 ignore_rest_of_line ();
4517 /* Optional constant. */
4518 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4520 if (immediate_for_directive (&offset
) == FAIL
)
4526 demand_empty_rest_of_line ();
4528 if (reg
== REG_SP
|| reg
== REG_PC
)
4530 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4534 if (unwind
.fp_reg
!= REG_SP
)
4535 as_bad (_("unexpected .unwind_movsp directive"));
4537 /* Generate opcode to restore the value. */
4539 add_unwind_opcode (op
, 1);
4541 /* Record the information for later. */
4542 unwind
.fp_reg
= reg
;
4543 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4544 unwind
.sp_restored
= 1;
4547 /* Parse an unwind_pad directive. */
4550 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4554 if (!unwind
.proc_start
)
4555 as_bad (MISSING_FNSTART
);
4557 if (immediate_for_directive (&offset
) == FAIL
)
4562 as_bad (_("stack increment must be multiple of 4"));
4563 ignore_rest_of_line ();
4567 /* Don't generate any opcodes, just record the details for later. */
4568 unwind
.frame_size
+= offset
;
4569 unwind
.pending_offset
+= offset
;
4571 demand_empty_rest_of_line ();
4574 /* Parse an unwind_setfp directive. */
4577 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4583 if (!unwind
.proc_start
)
4584 as_bad (MISSING_FNSTART
);
4586 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4587 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4590 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4592 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4594 as_bad (_("expected <reg>, <reg>"));
4595 ignore_rest_of_line ();
4599 /* Optional constant. */
4600 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4602 if (immediate_for_directive (&offset
) == FAIL
)
4608 demand_empty_rest_of_line ();
4610 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4612 as_bad (_("register must be either sp or set by a previous"
4613 "unwind_movsp directive"));
4617 /* Don't generate any opcodes, just record the information for later. */
4618 unwind
.fp_reg
= fp_reg
;
4620 if (sp_reg
== REG_SP
)
4621 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4623 unwind
.fp_offset
-= offset
;
4626 /* Parse an unwind_raw directive. */
4629 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4632 /* This is an arbitrary limit. */
4633 unsigned char op
[16];
4636 if (!unwind
.proc_start
)
4637 as_bad (MISSING_FNSTART
);
4640 if (exp
.X_op
== O_constant
4641 && skip_past_comma (&input_line_pointer
) != FAIL
)
4643 unwind
.frame_size
+= exp
.X_add_number
;
4647 exp
.X_op
= O_illegal
;
4649 if (exp
.X_op
!= O_constant
)
4651 as_bad (_("expected <offset>, <opcode>"));
4652 ignore_rest_of_line ();
4658 /* Parse the opcode. */
4663 as_bad (_("unwind opcode too long"));
4664 ignore_rest_of_line ();
4666 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4668 as_bad (_("invalid unwind opcode"));
4669 ignore_rest_of_line ();
4672 op
[count
++] = exp
.X_add_number
;
4674 /* Parse the next byte. */
4675 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4681 /* Add the opcode bytes in reverse order. */
4683 add_unwind_opcode (op
[count
], 1);
4685 demand_empty_rest_of_line ();
4689 /* Parse a .eabi_attribute directive. */
4692 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4694 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4696 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4697 attributes_set_explicitly
[tag
] = 1;
4700 /* Emit a tls fix for the symbol. */
4703 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4707 #ifdef md_flush_pending_output
4708 md_flush_pending_output ();
4711 #ifdef md_cons_align
4715 /* Since we're just labelling the code, there's no need to define a
4718 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4719 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4720 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4721 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4723 #endif /* OBJ_ELF */
4725 static void s_arm_arch (int);
4726 static void s_arm_object_arch (int);
4727 static void s_arm_cpu (int);
4728 static void s_arm_fpu (int);
4729 static void s_arm_arch_extension (int);
4734 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4741 if (exp
.X_op
== O_symbol
)
4742 exp
.X_op
= O_secrel
;
4744 emit_expr (&exp
, 4);
4746 while (*input_line_pointer
++ == ',');
4748 input_line_pointer
--;
4749 demand_empty_rest_of_line ();
4753 /* This table describes all the machine specific pseudo-ops the assembler
4754 has to support. The fields are:
4755 pseudo-op name without dot
4756 function to call to execute this pseudo-op
4757 Integer arg to pass to the function. */
4759 const pseudo_typeS md_pseudo_table
[] =
4761 /* Never called because '.req' does not start a line. */
4762 { "req", s_req
, 0 },
4763 /* Following two are likewise never called. */
4766 { "unreq", s_unreq
, 0 },
4767 { "bss", s_bss
, 0 },
4768 { "align", s_align_ptwo
, 2 },
4769 { "arm", s_arm
, 0 },
4770 { "thumb", s_thumb
, 0 },
4771 { "code", s_code
, 0 },
4772 { "force_thumb", s_force_thumb
, 0 },
4773 { "thumb_func", s_thumb_func
, 0 },
4774 { "thumb_set", s_thumb_set
, 0 },
4775 { "even", s_even
, 0 },
4776 { "ltorg", s_ltorg
, 0 },
4777 { "pool", s_ltorg
, 0 },
4778 { "syntax", s_syntax
, 0 },
4779 { "cpu", s_arm_cpu
, 0 },
4780 { "arch", s_arm_arch
, 0 },
4781 { "object_arch", s_arm_object_arch
, 0 },
4782 { "fpu", s_arm_fpu
, 0 },
4783 { "arch_extension", s_arm_arch_extension
, 0 },
4785 { "word", s_arm_elf_cons
, 4 },
4786 { "long", s_arm_elf_cons
, 4 },
4787 { "inst.n", s_arm_elf_inst
, 2 },
4788 { "inst.w", s_arm_elf_inst
, 4 },
4789 { "inst", s_arm_elf_inst
, 0 },
4790 { "rel31", s_arm_rel31
, 0 },
4791 { "fnstart", s_arm_unwind_fnstart
, 0 },
4792 { "fnend", s_arm_unwind_fnend
, 0 },
4793 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4794 { "personality", s_arm_unwind_personality
, 0 },
4795 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4796 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4797 { "save", s_arm_unwind_save
, 0 },
4798 { "vsave", s_arm_unwind_save
, 1 },
4799 { "movsp", s_arm_unwind_movsp
, 0 },
4800 { "pad", s_arm_unwind_pad
, 0 },
4801 { "setfp", s_arm_unwind_setfp
, 0 },
4802 { "unwind_raw", s_arm_unwind_raw
, 0 },
4803 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4804 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4808 /* These are used for dwarf. */
4812 /* These are used for dwarf2. */
4813 { "file", dwarf2_directive_file
, 0 },
4814 { "loc", dwarf2_directive_loc
, 0 },
4815 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4817 { "extend", float_cons
, 'x' },
4818 { "ldouble", float_cons
, 'x' },
4819 { "packed", float_cons
, 'p' },
4821 {"secrel32", pe_directive_secrel
, 0},
4824 /* These are for compatibility with CodeComposer Studio. */
4825 {"ref", s_ccs_ref
, 0},
4826 {"def", s_ccs_def
, 0},
4827 {"asmfunc", s_ccs_asmfunc
, 0},
4828 {"endasmfunc", s_ccs_endasmfunc
, 0},
4833 /* Parser functions used exclusively in instruction operands. */
4835 /* Generic immediate-value read function for use in insn parsing.
4836 STR points to the beginning of the immediate (the leading #);
4837 VAL receives the value; if the value is outside [MIN, MAX]
4838 issue an error. PREFIX_OPT is true if the immediate prefix is
4842 parse_immediate (char **str
, int *val
, int min
, int max
,
4843 bfd_boolean prefix_opt
)
4847 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4848 if (exp
.X_op
!= O_constant
)
4850 inst
.error
= _("constant expression required");
4854 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4856 inst
.error
= _("immediate value out of range");
4860 *val
= exp
.X_add_number
;
4864 /* Less-generic immediate-value read function with the possibility of loading a
4865 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4866 instructions. Puts the result directly in inst.operands[i]. */
4869 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4870 bfd_boolean allow_symbol_p
)
4873 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4876 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4878 if (exp_p
->X_op
== O_constant
)
4880 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4881 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4882 O_constant. We have to be careful not to break compilation for
4883 32-bit X_add_number, though. */
4884 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4886 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4887 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4889 inst
.operands
[i
].regisimm
= 1;
4892 else if (exp_p
->X_op
== O_big
4893 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4895 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4897 /* Bignums have their least significant bits in
4898 generic_bignum[0]. Make sure we put 32 bits in imm and
4899 32 bits in reg, in a (hopefully) portable way. */
4900 gas_assert (parts
!= 0);
4902 /* Make sure that the number is not too big.
4903 PR 11972: Bignums can now be sign-extended to the
4904 size of a .octa so check that the out of range bits
4905 are all zero or all one. */
4906 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4908 LITTLENUM_TYPE m
= -1;
4910 if (generic_bignum
[parts
* 2] != 0
4911 && generic_bignum
[parts
* 2] != m
)
4914 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4915 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4919 inst
.operands
[i
].imm
= 0;
4920 for (j
= 0; j
< parts
; j
++, idx
++)
4921 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4922 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4923 inst
.operands
[i
].reg
= 0;
4924 for (j
= 0; j
< parts
; j
++, idx
++)
4925 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4926 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4927 inst
.operands
[i
].regisimm
= 1;
4929 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4937 /* Returns the pseudo-register number of an FPA immediate constant,
4938 or FAIL if there isn't a valid constant here. */
4941 parse_fpa_immediate (char ** str
)
4943 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4949 /* First try and match exact strings, this is to guarantee
4950 that some formats will work even for cross assembly. */
4952 for (i
= 0; fp_const
[i
]; i
++)
4954 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4958 *str
+= strlen (fp_const
[i
]);
4959 if (is_end_of_line
[(unsigned char) **str
])
4965 /* Just because we didn't get a match doesn't mean that the constant
4966 isn't valid, just that it is in a format that we don't
4967 automatically recognize. Try parsing it with the standard
4968 expression routines. */
4970 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4972 /* Look for a raw floating point number. */
4973 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4974 && is_end_of_line
[(unsigned char) *save_in
])
4976 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4978 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4980 if (words
[j
] != fp_values
[i
][j
])
4984 if (j
== MAX_LITTLENUMS
)
4992 /* Try and parse a more complex expression, this will probably fail
4993 unless the code uses a floating point prefix (eg "0f"). */
4994 save_in
= input_line_pointer
;
4995 input_line_pointer
= *str
;
4996 if (expression (&exp
) == absolute_section
4997 && exp
.X_op
== O_big
4998 && exp
.X_add_number
< 0)
5000 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5002 #define X_PRECISION 5
5003 #define E_PRECISION 15L
5004 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5006 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5008 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5010 if (words
[j
] != fp_values
[i
][j
])
5014 if (j
== MAX_LITTLENUMS
)
5016 *str
= input_line_pointer
;
5017 input_line_pointer
= save_in
;
5024 *str
= input_line_pointer
;
5025 input_line_pointer
= save_in
;
5026 inst
.error
= _("invalid FPA immediate expression");
5030 /* Returns 1 if a number has "quarter-precision" float format
5031 0baBbbbbbc defgh000 00000000 00000000. */
5034 is_quarter_float (unsigned imm
)
5036 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5037 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5041 /* Detect the presence of a floating point or integer zero constant,
5045 parse_ifimm_zero (char **in
)
5049 if (!is_immediate_prefix (**in
))
5051 /* In unified syntax, all prefixes are optional. */
5052 if (!unified_syntax
)
5058 /* Accept #0x0 as a synonym for #0. */
5059 if (strncmp (*in
, "0x", 2) == 0)
5062 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
5067 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5068 &generic_floating_point_number
);
5071 && generic_floating_point_number
.sign
== '+'
5072 && (generic_floating_point_number
.low
5073 > generic_floating_point_number
.leader
))
5079 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5080 0baBbbbbbc defgh000 00000000 00000000.
5081 The zero and minus-zero cases need special handling, since they can't be
5082 encoded in the "quarter-precision" float format, but can nonetheless be
5083 loaded as integer constants. */
5086 parse_qfloat_immediate (char **ccp
, int *immed
)
5090 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5091 int found_fpchar
= 0;
5093 skip_past_char (&str
, '#');
5095 /* We must not accidentally parse an integer as a floating-point number. Make
5096 sure that the value we parse is not an integer by checking for special
5097 characters '.' or 'e'.
5098 FIXME: This is a horrible hack, but doing better is tricky because type
5099 information isn't in a very usable state at parse time. */
5101 skip_whitespace (fpnum
);
5103 if (strncmp (fpnum
, "0x", 2) == 0)
5107 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5108 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5118 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5120 unsigned fpword
= 0;
5123 /* Our FP word must be 32 bits (single-precision FP). */
5124 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5126 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5130 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5143 /* Shift operands. */
5146 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5149 struct asm_shift_name
5152 enum shift_kind kind
;
5155 /* Third argument to parse_shift. */
5156 enum parse_shift_mode
5158 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5159 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5160 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5161 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5162 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5165 /* Parse a <shift> specifier on an ARM data processing instruction.
5166 This has three forms:
5168 (LSL|LSR|ASL|ASR|ROR) Rs
5169 (LSL|LSR|ASL|ASR|ROR) #imm
5172 Note that ASL is assimilated to LSL in the instruction encoding, and
5173 RRX to ROR #0 (which cannot be written as such). */
5176 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5178 const struct asm_shift_name
*shift_name
;
5179 enum shift_kind shift
;
5184 for (p
= *str
; ISALPHA (*p
); p
++)
5189 inst
.error
= _("shift expression expected");
5193 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5196 if (shift_name
== NULL
)
5198 inst
.error
= _("shift expression expected");
5202 shift
= shift_name
->kind
;
5206 case NO_SHIFT_RESTRICT
:
5207 case SHIFT_IMMEDIATE
: break;
5209 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5210 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5212 inst
.error
= _("'LSL' or 'ASR' required");
5217 case SHIFT_LSL_IMMEDIATE
:
5218 if (shift
!= SHIFT_LSL
)
5220 inst
.error
= _("'LSL' required");
5225 case SHIFT_ASR_IMMEDIATE
:
5226 if (shift
!= SHIFT_ASR
)
5228 inst
.error
= _("'ASR' required");
5236 if (shift
!= SHIFT_RRX
)
5238 /* Whitespace can appear here if the next thing is a bare digit. */
5239 skip_whitespace (p
);
5241 if (mode
== NO_SHIFT_RESTRICT
5242 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5244 inst
.operands
[i
].imm
= reg
;
5245 inst
.operands
[i
].immisreg
= 1;
5247 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5250 inst
.operands
[i
].shift_kind
= shift
;
5251 inst
.operands
[i
].shifted
= 1;
5256 /* Parse a <shifter_operand> for an ARM data processing instruction:
5259 #<immediate>, <rotate>
5263 where <shift> is defined by parse_shift above, and <rotate> is a
5264 multiple of 2 between 0 and 30. Validation of immediate operands
5265 is deferred to md_apply_fix. */
5268 parse_shifter_operand (char **str
, int i
)
5273 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5275 inst
.operands
[i
].reg
= value
;
5276 inst
.operands
[i
].isreg
= 1;
5278 /* parse_shift will override this if appropriate */
5279 inst
.relocs
[0].exp
.X_op
= O_constant
;
5280 inst
.relocs
[0].exp
.X_add_number
= 0;
5282 if (skip_past_comma (str
) == FAIL
)
5285 /* Shift operation on register. */
5286 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5289 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5292 if (skip_past_comma (str
) == SUCCESS
)
5294 /* #x, y -- ie explicit rotation by Y. */
5295 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5298 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5300 inst
.error
= _("constant expression expected");
5304 value
= exp
.X_add_number
;
5305 if (value
< 0 || value
> 30 || value
% 2 != 0)
5307 inst
.error
= _("invalid rotation");
5310 if (inst
.relocs
[0].exp
.X_add_number
< 0
5311 || inst
.relocs
[0].exp
.X_add_number
> 255)
5313 inst
.error
= _("invalid constant");
5317 /* Encode as specified. */
5318 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5322 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5323 inst
.relocs
[0].pc_rel
= 0;
5327 /* Group relocation information. Each entry in the table contains the
5328 textual name of the relocation as may appear in assembler source
5329 and must end with a colon.
5330 Along with this textual name are the relocation codes to be used if
5331 the corresponding instruction is an ALU instruction (ADD or SUB only),
5332 an LDR, an LDRS, or an LDC. */
5334 struct group_reloc_table_entry
5345 /* Varieties of non-ALU group relocation. */
5352 static struct group_reloc_table_entry group_reloc_table
[] =
5353 { /* Program counter relative: */
5355 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5360 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5361 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5362 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5363 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5365 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5370 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5371 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5372 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5373 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5375 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5376 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5377 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5378 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5379 /* Section base relative */
5381 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5386 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5387 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5388 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5389 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5391 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5396 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5397 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5398 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5399 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5401 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5402 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5403 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5404 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5405 /* Absolute thumb alu relocations. */
5407 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5412 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5417 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5422 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5427 /* Given the address of a pointer pointing to the textual name of a group
5428 relocation as may appear in assembler source, attempt to find its details
5429 in group_reloc_table. The pointer will be updated to the character after
5430 the trailing colon. On failure, FAIL will be returned; SUCCESS
5431 otherwise. On success, *entry will be updated to point at the relevant
5432 group_reloc_table entry. */
5435 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5438 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5440 int length
= strlen (group_reloc_table
[i
].name
);
5442 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5443 && (*str
)[length
] == ':')
5445 *out
= &group_reloc_table
[i
];
5446 *str
+= (length
+ 1);
5454 /* Parse a <shifter_operand> for an ARM data processing instruction
5455 (as for parse_shifter_operand) where group relocations are allowed:
5458 #<immediate>, <rotate>
5459 #:<group_reloc>:<expression>
5463 where <group_reloc> is one of the strings defined in group_reloc_table.
5464 The hashes are optional.
5466 Everything else is as for parse_shifter_operand. */
5468 static parse_operand_result
5469 parse_shifter_operand_group_reloc (char **str
, int i
)
5471 /* Determine if we have the sequence of characters #: or just :
5472 coming next. If we do, then we check for a group relocation.
5473 If we don't, punt the whole lot to parse_shifter_operand. */
5475 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5476 || (*str
)[0] == ':')
5478 struct group_reloc_table_entry
*entry
;
5480 if ((*str
)[0] == '#')
5485 /* Try to parse a group relocation. Anything else is an error. */
5486 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5488 inst
.error
= _("unknown group relocation");
5489 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5492 /* We now have the group relocation table entry corresponding to
5493 the name in the assembler source. Next, we parse the expression. */
5494 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5495 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5497 /* Record the relocation type (always the ALU variant here). */
5498 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5499 gas_assert (inst
.relocs
[0].type
!= 0);
5501 return PARSE_OPERAND_SUCCESS
;
5504 return parse_shifter_operand (str
, i
) == SUCCESS
5505 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5507 /* Never reached. */
5510 /* Parse a Neon alignment expression. Information is written to
5511 inst.operands[i]. We assume the initial ':' has been skipped.
5513 align .imm = align << 8, .immisalign=1, .preind=0 */
5514 static parse_operand_result
5515 parse_neon_alignment (char **str
, int i
)
5520 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5522 if (exp
.X_op
!= O_constant
)
5524 inst
.error
= _("alignment must be constant");
5525 return PARSE_OPERAND_FAIL
;
5528 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5529 inst
.operands
[i
].immisalign
= 1;
5530 /* Alignments are not pre-indexes. */
5531 inst
.operands
[i
].preind
= 0;
5534 return PARSE_OPERAND_SUCCESS
;
5537 /* Parse all forms of an ARM address expression. Information is written
5538 to inst.operands[i] and/or inst.relocs[0].
5540 Preindexed addressing (.preind=1):
5542 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5543 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5544 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5545 .shift_kind=shift .relocs[0].exp=shift_imm
5547 These three may have a trailing ! which causes .writeback to be set also.
5549 Postindexed addressing (.postind=1, .writeback=1):
5551 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5552 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5553 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5554 .shift_kind=shift .relocs[0].exp=shift_imm
5556 Unindexed addressing (.preind=0, .postind=0):
5558 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5562 [Rn]{!} shorthand for [Rn,#0]{!}
5563 =immediate .isreg=0 .relocs[0].exp=immediate
5564 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5566 It is the caller's responsibility to check for addressing modes not
5567 supported by the instruction, and to set inst.relocs[0].type. */
5569 static parse_operand_result
5570 parse_address_main (char **str
, int i
, int group_relocations
,
5571 group_reloc_type group_type
)
5576 if (skip_past_char (&p
, '[') == FAIL
)
5578 if (skip_past_char (&p
, '=') == FAIL
)
5580 /* Bare address - translate to PC-relative offset. */
5581 inst
.relocs
[0].pc_rel
= 1;
5582 inst
.operands
[i
].reg
= REG_PC
;
5583 inst
.operands
[i
].isreg
= 1;
5584 inst
.operands
[i
].preind
= 1;
5586 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5587 return PARSE_OPERAND_FAIL
;
5589 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5590 /*allow_symbol_p=*/TRUE
))
5591 return PARSE_OPERAND_FAIL
;
5594 return PARSE_OPERAND_SUCCESS
;
5597 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5598 skip_whitespace (p
);
5600 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5602 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5603 return PARSE_OPERAND_FAIL
;
5605 inst
.operands
[i
].reg
= reg
;
5606 inst
.operands
[i
].isreg
= 1;
5608 if (skip_past_comma (&p
) == SUCCESS
)
5610 inst
.operands
[i
].preind
= 1;
5613 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5615 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5617 inst
.operands
[i
].imm
= reg
;
5618 inst
.operands
[i
].immisreg
= 1;
5620 if (skip_past_comma (&p
) == SUCCESS
)
5621 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5622 return PARSE_OPERAND_FAIL
;
5624 else if (skip_past_char (&p
, ':') == SUCCESS
)
5626 /* FIXME: '@' should be used here, but it's filtered out by generic
5627 code before we get to see it here. This may be subject to
5629 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5631 if (result
!= PARSE_OPERAND_SUCCESS
)
5636 if (inst
.operands
[i
].negative
)
5638 inst
.operands
[i
].negative
= 0;
5642 if (group_relocations
5643 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5645 struct group_reloc_table_entry
*entry
;
5647 /* Skip over the #: or : sequence. */
5653 /* Try to parse a group relocation. Anything else is an
5655 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5657 inst
.error
= _("unknown group relocation");
5658 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5661 /* We now have the group relocation table entry corresponding to
5662 the name in the assembler source. Next, we parse the
5664 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5665 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5667 /* Record the relocation type. */
5672 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
5677 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5682 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
5689 if (inst
.relocs
[0].type
== 0)
5691 inst
.error
= _("this group relocation is not allowed on this instruction");
5692 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5699 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5700 return PARSE_OPERAND_FAIL
;
5701 /* If the offset is 0, find out if it's a +0 or -0. */
5702 if (inst
.relocs
[0].exp
.X_op
== O_constant
5703 && inst
.relocs
[0].exp
.X_add_number
== 0)
5705 skip_whitespace (q
);
5709 skip_whitespace (q
);
5712 inst
.operands
[i
].negative
= 1;
5717 else if (skip_past_char (&p
, ':') == SUCCESS
)
5719 /* FIXME: '@' should be used here, but it's filtered out by generic code
5720 before we get to see it here. This may be subject to change. */
5721 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5723 if (result
!= PARSE_OPERAND_SUCCESS
)
5727 if (skip_past_char (&p
, ']') == FAIL
)
5729 inst
.error
= _("']' expected");
5730 return PARSE_OPERAND_FAIL
;
5733 if (skip_past_char (&p
, '!') == SUCCESS
)
5734 inst
.operands
[i
].writeback
= 1;
5736 else if (skip_past_comma (&p
) == SUCCESS
)
5738 if (skip_past_char (&p
, '{') == SUCCESS
)
5740 /* [Rn], {expr} - unindexed, with option */
5741 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5742 0, 255, TRUE
) == FAIL
)
5743 return PARSE_OPERAND_FAIL
;
5745 if (skip_past_char (&p
, '}') == FAIL
)
5747 inst
.error
= _("'}' expected at end of 'option' field");
5748 return PARSE_OPERAND_FAIL
;
5750 if (inst
.operands
[i
].preind
)
5752 inst
.error
= _("cannot combine index with option");
5753 return PARSE_OPERAND_FAIL
;
5756 return PARSE_OPERAND_SUCCESS
;
5760 inst
.operands
[i
].postind
= 1;
5761 inst
.operands
[i
].writeback
= 1;
5763 if (inst
.operands
[i
].preind
)
5765 inst
.error
= _("cannot combine pre- and post-indexing");
5766 return PARSE_OPERAND_FAIL
;
5770 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5772 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5774 /* We might be using the immediate for alignment already. If we
5775 are, OR the register number into the low-order bits. */
5776 if (inst
.operands
[i
].immisalign
)
5777 inst
.operands
[i
].imm
|= reg
;
5779 inst
.operands
[i
].imm
= reg
;
5780 inst
.operands
[i
].immisreg
= 1;
5782 if (skip_past_comma (&p
) == SUCCESS
)
5783 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5784 return PARSE_OPERAND_FAIL
;
5790 if (inst
.operands
[i
].negative
)
5792 inst
.operands
[i
].negative
= 0;
5795 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5796 return PARSE_OPERAND_FAIL
;
5797 /* If the offset is 0, find out if it's a +0 or -0. */
5798 if (inst
.relocs
[0].exp
.X_op
== O_constant
5799 && inst
.relocs
[0].exp
.X_add_number
== 0)
5801 skip_whitespace (q
);
5805 skip_whitespace (q
);
5808 inst
.operands
[i
].negative
= 1;
5814 /* If at this point neither .preind nor .postind is set, we have a
5815 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5816 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5818 inst
.operands
[i
].preind
= 1;
5819 inst
.relocs
[0].exp
.X_op
= O_constant
;
5820 inst
.relocs
[0].exp
.X_add_number
= 0;
5823 return PARSE_OPERAND_SUCCESS
;
5827 parse_address (char **str
, int i
)
5829 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5833 static parse_operand_result
5834 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5836 return parse_address_main (str
, i
, 1, type
);
5839 /* Parse an operand for a MOVW or MOVT instruction. */
5841 parse_half (char **str
)
5846 skip_past_char (&p
, '#');
5847 if (strncasecmp (p
, ":lower16:", 9) == 0)
5848 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
5849 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5850 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
5852 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
5855 skip_whitespace (p
);
5858 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5861 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
5863 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
5865 inst
.error
= _("constant expression expected");
5868 if (inst
.relocs
[0].exp
.X_add_number
< 0
5869 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
5871 inst
.error
= _("immediate value out of range");
5879 /* Miscellaneous. */
5881 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5882 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5884 parse_psr (char **str
, bfd_boolean lhs
)
5887 unsigned long psr_field
;
5888 const struct asm_psr
*psr
;
5890 bfd_boolean is_apsr
= FALSE
;
5891 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5893 /* PR gas/12698: If the user has specified -march=all then m_profile will
5894 be TRUE, but we want to ignore it in this case as we are building for any
5895 CPU type, including non-m variants. */
5896 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5899 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5900 feature for ease of use and backwards compatibility. */
5902 if (strncasecmp (p
, "SPSR", 4) == 0)
5905 goto unsupported_psr
;
5907 psr_field
= SPSR_BIT
;
5909 else if (strncasecmp (p
, "CPSR", 4) == 0)
5912 goto unsupported_psr
;
5916 else if (strncasecmp (p
, "APSR", 4) == 0)
5918 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5919 and ARMv7-R architecture CPUs. */
5928 while (ISALNUM (*p
) || *p
== '_');
5930 if (strncasecmp (start
, "iapsr", 5) == 0
5931 || strncasecmp (start
, "eapsr", 5) == 0
5932 || strncasecmp (start
, "xpsr", 4) == 0
5933 || strncasecmp (start
, "psr", 3) == 0)
5934 p
= start
+ strcspn (start
, "rR") + 1;
5936 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5942 /* If APSR is being written, a bitfield may be specified. Note that
5943 APSR itself is handled above. */
5944 if (psr
->field
<= 3)
5946 psr_field
= psr
->field
;
5952 /* M-profile MSR instructions have the mask field set to "10", except
5953 *PSR variants which modify APSR, which may use a different mask (and
5954 have been handled already). Do that by setting the PSR_f field
5956 return psr
->field
| (lhs
? PSR_f
: 0);
5959 goto unsupported_psr
;
5965 /* A suffix follows. */
5971 while (ISALNUM (*p
) || *p
== '_');
5975 /* APSR uses a notation for bits, rather than fields. */
5976 unsigned int nzcvq_bits
= 0;
5977 unsigned int g_bit
= 0;
5980 for (bit
= start
; bit
!= p
; bit
++)
5982 switch (TOLOWER (*bit
))
5985 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5989 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5993 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5997 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6001 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6005 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6009 inst
.error
= _("unexpected bit specified after APSR");
6014 if (nzcvq_bits
== 0x1f)
6019 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6021 inst
.error
= _("selected processor does not "
6022 "support DSP extension");
6029 if ((nzcvq_bits
& 0x20) != 0
6030 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6031 || (g_bit
& 0x2) != 0)
6033 inst
.error
= _("bad bitmask specified after APSR");
6039 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
6044 psr_field
|= psr
->field
;
6050 goto error
; /* Garbage after "[CS]PSR". */
6052 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6053 is deprecated, but allow it anyway. */
6057 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6060 else if (!m_profile
)
6061 /* These bits are never right for M-profile devices: don't set them
6062 (only code paths which read/write APSR reach here). */
6063 psr_field
|= (PSR_c
| PSR_f
);
6069 inst
.error
= _("selected processor does not support requested special "
6070 "purpose register");
6074 inst
.error
= _("flag for {c}psr instruction expected");
6078 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6079 value suitable for splatting into the AIF field of the instruction. */
6082 parse_cps_flags (char **str
)
6091 case '\0': case ',':
6094 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6095 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6096 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6099 inst
.error
= _("unrecognized CPS flag");
6104 if (saw_a_flag
== 0)
6106 inst
.error
= _("missing CPS flags");
6114 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6115 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6118 parse_endian_specifier (char **str
)
6123 if (strncasecmp (s
, "BE", 2))
6125 else if (strncasecmp (s
, "LE", 2))
6129 inst
.error
= _("valid endian specifiers are be or le");
6133 if (ISALNUM (s
[2]) || s
[2] == '_')
6135 inst
.error
= _("valid endian specifiers are be or le");
6140 return little_endian
;
6143 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6144 value suitable for poking into the rotate field of an sxt or sxta
6145 instruction, or FAIL on error. */
6148 parse_ror (char **str
)
6153 if (strncasecmp (s
, "ROR", 3) == 0)
6157 inst
.error
= _("missing rotation field after comma");
6161 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6166 case 0: *str
= s
; return 0x0;
6167 case 8: *str
= s
; return 0x1;
6168 case 16: *str
= s
; return 0x2;
6169 case 24: *str
= s
; return 0x3;
6172 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6177 /* Parse a conditional code (from conds[] below). The value returned is in the
6178 range 0 .. 14, or FAIL. */
6180 parse_cond (char **str
)
6183 const struct asm_cond
*c
;
6185 /* Condition codes are always 2 characters, so matching up to
6186 3 characters is sufficient. */
6191 while (ISALPHA (*q
) && n
< 3)
6193 cond
[n
] = TOLOWER (*q
);
6198 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6201 inst
.error
= _("condition required");
6209 /* Record a use of the given feature. */
6211 record_feature_use (const arm_feature_set
*feature
)
6214 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6216 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6219 /* If the given feature is currently allowed, mark it as used and return TRUE.
6220 Return FALSE otherwise. */
6222 mark_feature_used (const arm_feature_set
*feature
)
6224 /* Ensure the option is currently allowed. */
6225 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6228 /* Add the appropriate architecture feature for the barrier option used. */
6229 record_feature_use (feature
);
6234 /* Parse an option for a barrier instruction. Returns the encoding for the
6237 parse_barrier (char **str
)
6240 const struct asm_barrier_opt
*o
;
6243 while (ISALPHA (*q
))
6246 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6251 if (!mark_feature_used (&o
->arch
))
6258 /* Parse the operands of a table branch instruction. Similar to a memory
6261 parse_tb (char **str
)
6266 if (skip_past_char (&p
, '[') == FAIL
)
6268 inst
.error
= _("'[' expected");
6272 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6274 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6277 inst
.operands
[0].reg
= reg
;
6279 if (skip_past_comma (&p
) == FAIL
)
6281 inst
.error
= _("',' expected");
6285 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6287 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6290 inst
.operands
[0].imm
= reg
;
6292 if (skip_past_comma (&p
) == SUCCESS
)
6294 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6296 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6298 inst
.error
= _("invalid shift");
6301 inst
.operands
[0].shifted
= 1;
6304 if (skip_past_char (&p
, ']') == FAIL
)
6306 inst
.error
= _("']' expected");
6313 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6314 information on the types the operands can take and how they are encoded.
6315 Up to four operands may be read; this function handles setting the
6316 ".present" field for each read operand itself.
6317 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6318 else returns FAIL. */
6321 parse_neon_mov (char **str
, int *which_operand
)
6323 int i
= *which_operand
, val
;
6324 enum arm_reg_type rtype
;
6326 struct neon_type_el optype
;
6328 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6330 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6331 inst
.operands
[i
].reg
= val
;
6332 inst
.operands
[i
].isscalar
= 1;
6333 inst
.operands
[i
].vectype
= optype
;
6334 inst
.operands
[i
++].present
= 1;
6336 if (skip_past_comma (&ptr
) == FAIL
)
6339 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6342 inst
.operands
[i
].reg
= val
;
6343 inst
.operands
[i
].isreg
= 1;
6344 inst
.operands
[i
].present
= 1;
6346 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6349 /* Cases 0, 1, 2, 3, 5 (D only). */
6350 if (skip_past_comma (&ptr
) == FAIL
)
6353 inst
.operands
[i
].reg
= val
;
6354 inst
.operands
[i
].isreg
= 1;
6355 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6356 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6357 inst
.operands
[i
].isvec
= 1;
6358 inst
.operands
[i
].vectype
= optype
;
6359 inst
.operands
[i
++].present
= 1;
6361 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6363 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6364 Case 13: VMOV <Sd>, <Rm> */
6365 inst
.operands
[i
].reg
= val
;
6366 inst
.operands
[i
].isreg
= 1;
6367 inst
.operands
[i
].present
= 1;
6369 if (rtype
== REG_TYPE_NQ
)
6371 first_error (_("can't use Neon quad register here"));
6374 else if (rtype
!= REG_TYPE_VFS
)
6377 if (skip_past_comma (&ptr
) == FAIL
)
6379 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6381 inst
.operands
[i
].reg
= val
;
6382 inst
.operands
[i
].isreg
= 1;
6383 inst
.operands
[i
].present
= 1;
6386 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6389 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6390 Case 1: VMOV<c><q> <Dd>, <Dm>
6391 Case 8: VMOV.F32 <Sd>, <Sm>
6392 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6394 inst
.operands
[i
].reg
= val
;
6395 inst
.operands
[i
].isreg
= 1;
6396 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6397 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6398 inst
.operands
[i
].isvec
= 1;
6399 inst
.operands
[i
].vectype
= optype
;
6400 inst
.operands
[i
].present
= 1;
6402 if (skip_past_comma (&ptr
) == SUCCESS
)
6407 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6410 inst
.operands
[i
].reg
= val
;
6411 inst
.operands
[i
].isreg
= 1;
6412 inst
.operands
[i
++].present
= 1;
6414 if (skip_past_comma (&ptr
) == FAIL
)
6417 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6420 inst
.operands
[i
].reg
= val
;
6421 inst
.operands
[i
].isreg
= 1;
6422 inst
.operands
[i
].present
= 1;
6425 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6426 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6427 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6428 Case 10: VMOV.F32 <Sd>, #<imm>
6429 Case 11: VMOV.F64 <Dd>, #<imm> */
6430 inst
.operands
[i
].immisfloat
= 1;
6431 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6433 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6434 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6438 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6442 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6445 inst
.operands
[i
].reg
= val
;
6446 inst
.operands
[i
].isreg
= 1;
6447 inst
.operands
[i
++].present
= 1;
6449 if (skip_past_comma (&ptr
) == FAIL
)
6452 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6454 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6455 inst
.operands
[i
].reg
= val
;
6456 inst
.operands
[i
].isscalar
= 1;
6457 inst
.operands
[i
].present
= 1;
6458 inst
.operands
[i
].vectype
= optype
;
6460 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6462 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6463 inst
.operands
[i
].reg
= val
;
6464 inst
.operands
[i
].isreg
= 1;
6465 inst
.operands
[i
++].present
= 1;
6467 if (skip_past_comma (&ptr
) == FAIL
)
6470 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6473 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6477 inst
.operands
[i
].reg
= val
;
6478 inst
.operands
[i
].isreg
= 1;
6479 inst
.operands
[i
].isvec
= 1;
6480 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6481 inst
.operands
[i
].vectype
= optype
;
6482 inst
.operands
[i
].present
= 1;
6484 if (rtype
== REG_TYPE_VFS
)
6488 if (skip_past_comma (&ptr
) == FAIL
)
6490 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6493 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6496 inst
.operands
[i
].reg
= val
;
6497 inst
.operands
[i
].isreg
= 1;
6498 inst
.operands
[i
].isvec
= 1;
6499 inst
.operands
[i
].issingle
= 1;
6500 inst
.operands
[i
].vectype
= optype
;
6501 inst
.operands
[i
].present
= 1;
6504 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6508 inst
.operands
[i
].reg
= val
;
6509 inst
.operands
[i
].isreg
= 1;
6510 inst
.operands
[i
].isvec
= 1;
6511 inst
.operands
[i
].issingle
= 1;
6512 inst
.operands
[i
].vectype
= optype
;
6513 inst
.operands
[i
].present
= 1;
6518 first_error (_("parse error"));
6522 /* Successfully parsed the operands. Update args. */
6528 first_error (_("expected comma"));
6532 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6536 /* Use this macro when the operand constraints are different
6537 for ARM and THUMB (e.g. ldrd). */
6538 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6539 ((arm_operand) | ((thumb_operand) << 16))
6541 /* Matcher codes for parse_operands. */
6542 enum operand_parse_code
6544 OP_stop
, /* end of line */
6546 OP_RR
, /* ARM register */
6547 OP_RRnpc
, /* ARM register, not r15 */
6548 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6549 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6550 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6551 optional trailing ! */
6552 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6553 OP_RCP
, /* Coprocessor number */
6554 OP_RCN
, /* Coprocessor register */
6555 OP_RF
, /* FPA register */
6556 OP_RVS
, /* VFP single precision register */
6557 OP_RVD
, /* VFP double precision register (0..15) */
6558 OP_RND
, /* Neon double precision register (0..31) */
6559 OP_RNQ
, /* Neon quad precision register */
6560 OP_RVSD
, /* VFP single or double precision register */
6561 OP_RNSD
, /* Neon single or double precision register */
6562 OP_RNDQ
, /* Neon double or quad precision register */
6563 OP_RNSDQ
, /* Neon single, double or quad precision register */
6564 OP_RNSC
, /* Neon scalar D[X] */
6565 OP_RVC
, /* VFP control register */
6566 OP_RMF
, /* Maverick F register */
6567 OP_RMD
, /* Maverick D register */
6568 OP_RMFX
, /* Maverick FX register */
6569 OP_RMDX
, /* Maverick DX register */
6570 OP_RMAX
, /* Maverick AX register */
6571 OP_RMDS
, /* Maverick DSPSC register */
6572 OP_RIWR
, /* iWMMXt wR register */
6573 OP_RIWC
, /* iWMMXt wC register */
6574 OP_RIWG
, /* iWMMXt wCG register */
6575 OP_RXA
, /* XScale accumulator register */
6577 /* New operands for Armv8.1-M Mainline. */
6578 OP_LR
, /* ARM LR register */
6579 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
6581 OP_REGLST
, /* ARM register list */
6582 OP_CLRMLST
, /* CLRM register list */
6583 OP_VRSLST
, /* VFP single-precision register list */
6584 OP_VRDLST
, /* VFP double-precision register list */
6585 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6586 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6587 OP_NSTRLST
, /* Neon element/structure list */
6589 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6590 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6591 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6592 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6593 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
6594 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6595 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6596 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6597 OP_VMOV
, /* Neon VMOV operands. */
6598 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6599 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6600 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6602 OP_I0
, /* immediate zero */
6603 OP_I7
, /* immediate value 0 .. 7 */
6604 OP_I15
, /* 0 .. 15 */
6605 OP_I16
, /* 1 .. 16 */
6606 OP_I16z
, /* 0 .. 16 */
6607 OP_I31
, /* 0 .. 31 */
6608 OP_I31w
, /* 0 .. 31, optional trailing ! */
6609 OP_I32
, /* 1 .. 32 */
6610 OP_I32z
, /* 0 .. 32 */
6611 OP_I63
, /* 0 .. 63 */
6612 OP_I63s
, /* -64 .. 63 */
6613 OP_I64
, /* 1 .. 64 */
6614 OP_I64z
, /* 0 .. 64 */
6615 OP_I255
, /* 0 .. 255 */
6617 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6618 OP_I7b
, /* 0 .. 7 */
6619 OP_I15b
, /* 0 .. 15 */
6620 OP_I31b
, /* 0 .. 31 */
6622 OP_SH
, /* shifter operand */
6623 OP_SHG
, /* shifter operand with possible group relocation */
6624 OP_ADDR
, /* Memory address expression (any mode) */
6625 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6626 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6627 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6628 OP_EXP
, /* arbitrary expression */
6629 OP_EXPi
, /* same, with optional immediate prefix */
6630 OP_EXPr
, /* same, with optional relocation suffix */
6631 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
6632 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6633 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6634 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6636 OP_CPSF
, /* CPS flags */
6637 OP_ENDI
, /* Endianness specifier */
6638 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6639 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6640 OP_COND
, /* conditional code */
6641 OP_TB
, /* Table branch. */
6643 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6645 OP_RRnpc_I0
, /* ARM register or literal 0 */
6646 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6647 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6648 OP_RF_IF
, /* FPA register or immediate */
6649 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6650 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6652 /* Optional operands. */
6653 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6654 OP_oI31b
, /* 0 .. 31 */
6655 OP_oI32b
, /* 1 .. 32 */
6656 OP_oI32z
, /* 0 .. 32 */
6657 OP_oIffffb
, /* 0 .. 65535 */
6658 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6660 OP_oRR
, /* ARM register */
6661 OP_oLR
, /* ARM LR register */
6662 OP_oRRnpc
, /* ARM register, not the PC */
6663 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6664 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6665 OP_oRND
, /* Optional Neon double precision register */
6666 OP_oRNQ
, /* Optional Neon quad precision register */
6667 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6668 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6669 OP_oSHll
, /* LSL immediate */
6670 OP_oSHar
, /* ASR immediate */
6671 OP_oSHllar
, /* LSL or ASR immediate */
6672 OP_oROR
, /* ROR 0/8/16/24 */
6673 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6675 /* Some pre-defined mixed (ARM/THUMB) operands. */
6676 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6677 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6678 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6680 OP_FIRST_OPTIONAL
= OP_oI7b
6683 /* Generic instruction operand parser. This does no encoding and no
6684 semantic validation; it merely squirrels values away in the inst
6685 structure. Returns SUCCESS or FAIL depending on whether the
6686 specified grammar matched. */
6688 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6690 unsigned const int *upat
= pattern
;
6691 char *backtrack_pos
= 0;
6692 const char *backtrack_error
= 0;
6693 int i
, val
= 0, backtrack_index
= 0;
6694 enum arm_reg_type rtype
;
6695 parse_operand_result result
;
6696 unsigned int op_parse_code
;
6698 #define po_char_or_fail(chr) \
6701 if (skip_past_char (&str, chr) == FAIL) \
6706 #define po_reg_or_fail(regtype) \
6709 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6710 & inst.operands[i].vectype); \
6713 first_error (_(reg_expected_msgs[regtype])); \
6716 inst.operands[i].reg = val; \
6717 inst.operands[i].isreg = 1; \
6718 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6719 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6720 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6721 || rtype == REG_TYPE_VFD \
6722 || rtype == REG_TYPE_NQ); \
6726 #define po_reg_or_goto(regtype, label) \
6729 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6730 & inst.operands[i].vectype); \
6734 inst.operands[i].reg = val; \
6735 inst.operands[i].isreg = 1; \
6736 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6737 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6738 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6739 || rtype == REG_TYPE_VFD \
6740 || rtype == REG_TYPE_NQ); \
6744 #define po_imm_or_fail(min, max, popt) \
6747 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6749 inst.operands[i].imm = val; \
6753 #define po_scalar_or_goto(elsz, label) \
6756 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6759 inst.operands[i].reg = val; \
6760 inst.operands[i].isscalar = 1; \
6764 #define po_misc_or_fail(expr) \
6772 #define po_misc_or_fail_no_backtrack(expr) \
6776 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6777 backtrack_pos = 0; \
6778 if (result != PARSE_OPERAND_SUCCESS) \
6783 #define po_barrier_or_imm(str) \
6786 val = parse_barrier (&str); \
6787 if (val == FAIL && ! ISALPHA (*str)) \
6790 /* ISB can only take SY as an option. */ \
6791 || ((inst.instruction & 0xf0) == 0x60 \
6794 inst.error = _("invalid barrier type"); \
6795 backtrack_pos = 0; \
6801 skip_whitespace (str
);
6803 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6805 op_parse_code
= upat
[i
];
6806 if (op_parse_code
>= 1<<16)
6807 op_parse_code
= thumb
? (op_parse_code
>> 16)
6808 : (op_parse_code
& ((1<<16)-1));
6810 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6812 /* Remember where we are in case we need to backtrack. */
6813 gas_assert (!backtrack_pos
);
6814 backtrack_pos
= str
;
6815 backtrack_error
= inst
.error
;
6816 backtrack_index
= i
;
6819 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6820 po_char_or_fail (',');
6822 switch (op_parse_code
)
6832 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6833 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6834 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6835 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6836 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6837 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6839 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6841 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6843 /* Also accept generic coprocessor regs for unknown registers. */
6845 po_reg_or_fail (REG_TYPE_CN
);
6847 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6848 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6849 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6850 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6851 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6852 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6853 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6854 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6855 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6856 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6858 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6859 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
6861 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6862 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6864 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6866 /* Neon scalar. Using an element size of 8 means that some invalid
6867 scalars are accepted here, so deal with those in later code. */
6868 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6872 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6875 po_imm_or_fail (0, 0, TRUE
);
6880 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6885 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6888 if (parse_ifimm_zero (&str
))
6889 inst
.operands
[i
].imm
= 0;
6893 = _("only floating point zero is allowed as immediate value");
6901 po_scalar_or_goto (8, try_rr
);
6904 po_reg_or_fail (REG_TYPE_RN
);
6910 po_scalar_or_goto (8, try_nsdq
);
6913 po_reg_or_fail (REG_TYPE_NSDQ
);
6919 po_scalar_or_goto (8, try_s_scalar
);
6922 po_scalar_or_goto (4, try_nsd
);
6925 po_reg_or_fail (REG_TYPE_NSD
);
6931 po_scalar_or_goto (8, try_ndq
);
6934 po_reg_or_fail (REG_TYPE_NDQ
);
6940 po_scalar_or_goto (8, try_vfd
);
6943 po_reg_or_fail (REG_TYPE_VFD
);
6948 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6949 not careful then bad things might happen. */
6950 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6955 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6958 /* There's a possibility of getting a 64-bit immediate here, so
6959 we need special handling. */
6960 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6963 inst
.error
= _("immediate value is out of range");
6971 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6974 po_imm_or_fail (0, 63, TRUE
);
6979 po_char_or_fail ('[');
6980 po_reg_or_fail (REG_TYPE_RN
);
6981 po_char_or_fail (']');
6987 po_reg_or_fail (REG_TYPE_RN
);
6988 if (skip_past_char (&str
, '!') == SUCCESS
)
6989 inst
.operands
[i
].writeback
= 1;
6993 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6994 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6995 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6996 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6997 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6998 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6999 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
7000 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
7001 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
7002 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
7003 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
7004 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
7006 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
7008 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
7009 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
7011 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
7012 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
7013 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
7014 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
7016 /* Immediate variants */
7018 po_char_or_fail ('{');
7019 po_imm_or_fail (0, 255, TRUE
);
7020 po_char_or_fail ('}');
7024 /* The expression parser chokes on a trailing !, so we have
7025 to find it first and zap it. */
7028 while (*s
&& *s
!= ',')
7033 inst
.operands
[i
].writeback
= 1;
7035 po_imm_or_fail (0, 31, TRUE
);
7043 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7048 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7053 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7055 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7057 val
= parse_reloc (&str
);
7060 inst
.error
= _("unrecognized relocation suffix");
7063 else if (val
!= BFD_RELOC_UNUSED
)
7065 inst
.operands
[i
].imm
= val
;
7066 inst
.operands
[i
].hasreloc
= 1;
7072 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7074 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7076 inst
.operands
[i
].hasreloc
= 1;
7078 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7080 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7081 inst
.operands
[i
].hasreloc
= 0;
7085 /* Operand for MOVW or MOVT. */
7087 po_misc_or_fail (parse_half (&str
));
7090 /* Register or expression. */
7091 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7092 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7094 /* Register or immediate. */
7095 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7096 I0
: po_imm_or_fail (0, 0, FALSE
); break;
7098 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7100 if (!is_immediate_prefix (*str
))
7103 val
= parse_fpa_immediate (&str
);
7106 /* FPA immediates are encoded as registers 8-15.
7107 parse_fpa_immediate has already applied the offset. */
7108 inst
.operands
[i
].reg
= val
;
7109 inst
.operands
[i
].isreg
= 1;
7112 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7113 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7115 /* Two kinds of register. */
7118 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7120 || (rege
->type
!= REG_TYPE_MMXWR
7121 && rege
->type
!= REG_TYPE_MMXWC
7122 && rege
->type
!= REG_TYPE_MMXWCG
))
7124 inst
.error
= _("iWMMXt data or control register expected");
7127 inst
.operands
[i
].reg
= rege
->number
;
7128 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7134 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7136 || (rege
->type
!= REG_TYPE_MMXWC
7137 && rege
->type
!= REG_TYPE_MMXWCG
))
7139 inst
.error
= _("iWMMXt control register expected");
7142 inst
.operands
[i
].reg
= rege
->number
;
7143 inst
.operands
[i
].isreg
= 1;
7148 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7149 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7150 case OP_oROR
: val
= parse_ror (&str
); break;
7151 case OP_COND
: val
= parse_cond (&str
); break;
7152 case OP_oBARRIER_I15
:
7153 po_barrier_or_imm (str
); break;
7155 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7161 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7162 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7164 inst
.error
= _("Banked registers are not available with this "
7170 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7174 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7177 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7179 if (strncasecmp (str
, "APSR_", 5) == 0)
7186 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7187 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7188 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7189 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7190 default: found
= 16;
7194 inst
.operands
[i
].isvec
= 1;
7195 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7196 inst
.operands
[i
].reg
= REG_PC
;
7203 po_misc_or_fail (parse_tb (&str
));
7206 /* Register lists. */
7208 val
= parse_reg_list (&str
, REGLIST_RN
);
7211 inst
.operands
[i
].writeback
= 1;
7217 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7221 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7225 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7229 /* Allow Q registers too. */
7230 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7235 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7237 inst
.operands
[i
].issingle
= 1;
7242 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7247 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7248 &inst
.operands
[i
].vectype
);
7251 /* Addressing modes */
7253 po_misc_or_fail (parse_address (&str
, i
));
7257 po_misc_or_fail_no_backtrack (
7258 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7262 po_misc_or_fail_no_backtrack (
7263 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7267 po_misc_or_fail_no_backtrack (
7268 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7272 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7276 po_misc_or_fail_no_backtrack (
7277 parse_shifter_operand_group_reloc (&str
, i
));
7281 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7285 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7289 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7293 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7296 /* Various value-based sanity checks and shared operations. We
7297 do not signal immediate failures for the register constraints;
7298 this allows a syntax error to take precedence. */
7299 switch (op_parse_code
)
7307 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7308 inst
.error
= BAD_PC
;
7313 if (inst
.operands
[i
].isreg
)
7315 if (inst
.operands
[i
].reg
== REG_PC
)
7316 inst
.error
= BAD_PC
;
7317 else if (inst
.operands
[i
].reg
== REG_SP
7318 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7319 relaxed since ARMv8-A. */
7320 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7323 inst
.error
= BAD_SP
;
7329 if (inst
.operands
[i
].isreg
7330 && inst
.operands
[i
].reg
== REG_PC
7331 && (inst
.operands
[i
].writeback
|| thumb
))
7332 inst
.error
= BAD_PC
;
7341 case OP_oBARRIER_I15
:
7351 inst
.operands
[i
].imm
= val
;
7356 if (inst
.operands
[i
].reg
!= REG_LR
)
7357 inst
.error
= _("operand must be LR register");
7364 /* If we get here, this operand was successfully parsed. */
7365 inst
.operands
[i
].present
= 1;
7369 inst
.error
= BAD_ARGS
;
7374 /* The parse routine should already have set inst.error, but set a
7375 default here just in case. */
7377 inst
.error
= _("syntax error");
7381 /* Do not backtrack over a trailing optional argument that
7382 absorbed some text. We will only fail again, with the
7383 'garbage following instruction' error message, which is
7384 probably less helpful than the current one. */
7385 if (backtrack_index
== i
&& backtrack_pos
!= str
7386 && upat
[i
+1] == OP_stop
)
7389 inst
.error
= _("syntax error");
7393 /* Try again, skipping the optional argument at backtrack_pos. */
7394 str
= backtrack_pos
;
7395 inst
.error
= backtrack_error
;
7396 inst
.operands
[backtrack_index
].present
= 0;
7397 i
= backtrack_index
;
7401 /* Check that we have parsed all the arguments. */
7402 if (*str
!= '\0' && !inst
.error
)
7403 inst
.error
= _("garbage following instruction");
7405 return inst
.error
? FAIL
: SUCCESS
;
7408 #undef po_char_or_fail
7409 #undef po_reg_or_fail
7410 #undef po_reg_or_goto
7411 #undef po_imm_or_fail
7412 #undef po_scalar_or_fail
7413 #undef po_barrier_or_imm
7415 /* Shorthand macro for instruction encoding functions issuing errors. */
7416 #define constraint(expr, err) \
7427 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7428 instructions are unpredictable if these registers are used. This
7429 is the BadReg predicate in ARM's Thumb-2 documentation.
7431 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7432 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7433 #define reject_bad_reg(reg) \
7435 if (reg == REG_PC) \
7437 inst.error = BAD_PC; \
7440 else if (reg == REG_SP \
7441 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7443 inst.error = BAD_SP; \
7448 /* If REG is R13 (the stack pointer), warn that its use is
7450 #define warn_deprecated_sp(reg) \
7452 if (warn_on_deprecated && reg == REG_SP) \
7453 as_tsktsk (_("use of r13 is deprecated")); \
7456 /* Functions for operand encoding. ARM, then Thumb. */
7458 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7460 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7462 The only binary encoding difference is the Coprocessor number. Coprocessor
7463 9 is used for half-precision calculations or conversions. The format of the
7464 instruction is the same as the equivalent Coprocessor 10 instruction that
7465 exists for Single-Precision operation. */
7468 do_scalar_fp16_v82_encode (void)
7470 if (inst
.cond
!= COND_ALWAYS
)
7471 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7472 " the behaviour is UNPREDICTABLE"));
7473 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7476 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7477 mark_feature_used (&arm_ext_fp16
);
7480 /* If VAL can be encoded in the immediate field of an ARM instruction,
7481 return the encoded form. Otherwise, return FAIL. */
7484 encode_arm_immediate (unsigned int val
)
7491 for (i
= 2; i
< 32; i
+= 2)
7492 if ((a
= rotate_left (val
, i
)) <= 0xff)
7493 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7498 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7499 return the encoded form. Otherwise, return FAIL. */
7501 encode_thumb32_immediate (unsigned int val
)
7508 for (i
= 1; i
<= 24; i
++)
7511 if ((val
& ~(0xff << i
)) == 0)
7512 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7516 if (val
== ((a
<< 16) | a
))
7518 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7522 if (val
== ((a
<< 16) | a
))
7523 return 0x200 | (a
>> 8);
7527 /* Encode a VFP SP or DP register number into inst.instruction. */
7530 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7532 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7535 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7538 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7541 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7546 first_error (_("D register out of range for selected VFP version"));
7554 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7558 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7562 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7566 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7570 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7574 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7582 /* Encode a <shift> in an ARM-format instruction. The immediate,
7583 if any, is handled by md_apply_fix. */
7585 encode_arm_shift (int i
)
7587 /* register-shifted register. */
7588 if (inst
.operands
[i
].immisreg
)
7591 for (op_index
= 0; op_index
<= i
; ++op_index
)
7593 /* Check the operand only when it's presented. In pre-UAL syntax,
7594 if the destination register is the same as the first operand, two
7595 register form of the instruction can be used. */
7596 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7597 && inst
.operands
[op_index
].reg
== REG_PC
)
7598 as_warn (UNPRED_REG ("r15"));
7601 if (inst
.operands
[i
].imm
== REG_PC
)
7602 as_warn (UNPRED_REG ("r15"));
7605 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7606 inst
.instruction
|= SHIFT_ROR
<< 5;
7609 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7610 if (inst
.operands
[i
].immisreg
)
7612 inst
.instruction
|= SHIFT_BY_REG
;
7613 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7616 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7621 encode_arm_shifter_operand (int i
)
7623 if (inst
.operands
[i
].isreg
)
7625 inst
.instruction
|= inst
.operands
[i
].reg
;
7626 encode_arm_shift (i
);
7630 inst
.instruction
|= INST_IMMEDIATE
;
7631 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
7632 inst
.instruction
|= inst
.operands
[i
].imm
;
7636 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7638 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7641 Generate an error if the operand is not a register. */
7642 constraint (!inst
.operands
[i
].isreg
,
7643 _("Instruction does not support =N addresses"));
7645 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7647 if (inst
.operands
[i
].preind
)
7651 inst
.error
= _("instruction does not accept preindexed addressing");
7654 inst
.instruction
|= PRE_INDEX
;
7655 if (inst
.operands
[i
].writeback
)
7656 inst
.instruction
|= WRITE_BACK
;
7659 else if (inst
.operands
[i
].postind
)
7661 gas_assert (inst
.operands
[i
].writeback
);
7663 inst
.instruction
|= WRITE_BACK
;
7665 else /* unindexed - only for coprocessor */
7667 inst
.error
= _("instruction does not accept unindexed addressing");
7671 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7672 && (((inst
.instruction
& 0x000f0000) >> 16)
7673 == ((inst
.instruction
& 0x0000f000) >> 12)))
7674 as_warn ((inst
.instruction
& LOAD_BIT
)
7675 ? _("destination register same as write-back base")
7676 : _("source register same as write-back base"));
7679 /* inst.operands[i] was set up by parse_address. Encode it into an
7680 ARM-format mode 2 load or store instruction. If is_t is true,
7681 reject forms that cannot be used with a T instruction (i.e. not
7684 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7686 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7688 encode_arm_addr_mode_common (i
, is_t
);
7690 if (inst
.operands
[i
].immisreg
)
7692 constraint ((inst
.operands
[i
].imm
== REG_PC
7693 || (is_pc
&& inst
.operands
[i
].writeback
)),
7695 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7696 inst
.instruction
|= inst
.operands
[i
].imm
;
7697 if (!inst
.operands
[i
].negative
)
7698 inst
.instruction
|= INDEX_UP
;
7699 if (inst
.operands
[i
].shifted
)
7701 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7702 inst
.instruction
|= SHIFT_ROR
<< 5;
7705 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7706 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7710 else /* immediate offset in inst.relocs[0] */
7712 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
7714 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7716 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7717 cannot use PC in addressing.
7718 PC cannot be used in writeback addressing, either. */
7719 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7722 /* Use of PC in str is deprecated for ARMv7. */
7723 if (warn_on_deprecated
7725 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7726 as_tsktsk (_("use of PC in this instruction is deprecated"));
7729 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7731 /* Prefer + for zero encoded value. */
7732 if (!inst
.operands
[i
].negative
)
7733 inst
.instruction
|= INDEX_UP
;
7734 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
7739 /* inst.operands[i] was set up by parse_address. Encode it into an
7740 ARM-format mode 3 load or store instruction. Reject forms that
7741 cannot be used with such instructions. If is_t is true, reject
7742 forms that cannot be used with a T instruction (i.e. not
7745 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7747 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7749 inst
.error
= _("instruction does not accept scaled register index");
7753 encode_arm_addr_mode_common (i
, is_t
);
7755 if (inst
.operands
[i
].immisreg
)
7757 constraint ((inst
.operands
[i
].imm
== REG_PC
7758 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7760 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7762 inst
.instruction
|= inst
.operands
[i
].imm
;
7763 if (!inst
.operands
[i
].negative
)
7764 inst
.instruction
|= INDEX_UP
;
7766 else /* immediate offset in inst.relocs[0] */
7768 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
7769 && inst
.operands
[i
].writeback
),
7771 inst
.instruction
|= HWOFFSET_IMM
;
7772 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7774 /* Prefer + for zero encoded value. */
7775 if (!inst
.operands
[i
].negative
)
7776 inst
.instruction
|= INDEX_UP
;
7778 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7783 /* Write immediate bits [7:0] to the following locations:
7785 |28/24|23 19|18 16|15 4|3 0|
7786 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7788 This function is used by VMOV/VMVN/VORR/VBIC. */
7791 neon_write_immbits (unsigned immbits
)
7793 inst
.instruction
|= immbits
& 0xf;
7794 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7795 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7798 /* Invert low-order SIZE bits of XHI:XLO. */
7801 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7803 unsigned immlo
= xlo
? *xlo
: 0;
7804 unsigned immhi
= xhi
? *xhi
: 0;
7809 immlo
= (~immlo
) & 0xff;
7813 immlo
= (~immlo
) & 0xffff;
7817 immhi
= (~immhi
) & 0xffffffff;
7821 immlo
= (~immlo
) & 0xffffffff;
7835 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7839 neon_bits_same_in_bytes (unsigned imm
)
7841 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7842 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7843 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7844 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7847 /* For immediate of above form, return 0bABCD. */
7850 neon_squash_bits (unsigned imm
)
7852 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7853 | ((imm
& 0x01000000) >> 21);
7856 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7859 neon_qfloat_bits (unsigned imm
)
7861 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7864 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7865 the instruction. *OP is passed as the initial value of the op field, and
7866 may be set to a different value depending on the constant (i.e.
7867 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7868 MVN). If the immediate looks like a repeated pattern then also
7869 try smaller element sizes. */
7872 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7873 unsigned *immbits
, int *op
, int size
,
7874 enum neon_el_type type
)
7876 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7878 if (type
== NT_float
&& !float_p
)
7881 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7883 if (size
!= 32 || *op
== 1)
7885 *immbits
= neon_qfloat_bits (immlo
);
7891 if (neon_bits_same_in_bytes (immhi
)
7892 && neon_bits_same_in_bytes (immlo
))
7896 *immbits
= (neon_squash_bits (immhi
) << 4)
7897 | neon_squash_bits (immlo
);
7908 if (immlo
== (immlo
& 0x000000ff))
7913 else if (immlo
== (immlo
& 0x0000ff00))
7915 *immbits
= immlo
>> 8;
7918 else if (immlo
== (immlo
& 0x00ff0000))
7920 *immbits
= immlo
>> 16;
7923 else if (immlo
== (immlo
& 0xff000000))
7925 *immbits
= immlo
>> 24;
7928 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7930 *immbits
= (immlo
>> 8) & 0xff;
7933 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7935 *immbits
= (immlo
>> 16) & 0xff;
7939 if ((immlo
& 0xffff) != (immlo
>> 16))
7946 if (immlo
== (immlo
& 0x000000ff))
7951 else if (immlo
== (immlo
& 0x0000ff00))
7953 *immbits
= immlo
>> 8;
7957 if ((immlo
& 0xff) != (immlo
>> 8))
7962 if (immlo
== (immlo
& 0x000000ff))
7964 /* Don't allow MVN with 8-bit immediate. */
7974 #if defined BFD_HOST_64_BIT
7975 /* Returns TRUE if double precision value V may be cast
7976 to single precision without loss of accuracy. */
7979 is_double_a_single (bfd_int64_t v
)
7981 int exp
= (int)((v
>> 52) & 0x7FF);
7982 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7984 return (exp
== 0 || exp
== 0x7FF
7985 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7986 && (mantissa
& 0x1FFFFFFFl
) == 0;
7989 /* Returns a double precision value casted to single precision
7990 (ignoring the least significant bits in exponent and mantissa). */
7993 double_to_single (bfd_int64_t v
)
7995 int sign
= (int) ((v
>> 63) & 1l);
7996 int exp
= (int) ((v
>> 52) & 0x7FF);
7997 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8003 exp
= exp
- 1023 + 127;
8012 /* No denormalized numbers. */
8018 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8020 #endif /* BFD_HOST_64_BIT */
8029 static void do_vfp_nsyn_opcode (const char *);
8031 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8032 Determine whether it can be performed with a move instruction; if
8033 it can, convert inst.instruction to that move instruction and
8034 return TRUE; if it can't, convert inst.instruction to a literal-pool
8035 load and return FALSE. If this is not a valid thing to do in the
8036 current context, set inst.error and return TRUE.
8038 inst.operands[i] describes the destination register. */
8041 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
8044 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
8045 bfd_boolean arm_p
= (t
== CONST_ARM
);
8048 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8052 if ((inst
.instruction
& tbit
) == 0)
8054 inst
.error
= _("invalid pseudo operation");
8058 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8059 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8060 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8062 inst
.error
= _("constant expression expected");
8066 if (inst
.relocs
[0].exp
.X_op
== O_constant
8067 || inst
.relocs
[0].exp
.X_op
== O_big
)
8069 #if defined BFD_HOST_64_BIT
8074 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8076 LITTLENUM_TYPE w
[X_PRECISION
];
8079 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8081 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8083 /* FIXME: Should we check words w[2..5] ? */
8088 #if defined BFD_HOST_64_BIT
8090 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
8091 << LITTLENUM_NUMBER_OF_BITS
)
8092 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
8093 << LITTLENUM_NUMBER_OF_BITS
)
8094 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
8095 << LITTLENUM_NUMBER_OF_BITS
)
8096 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
8098 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
8099 | (l
[0] & LITTLENUM_MASK
);
8103 v
= inst
.relocs
[0].exp
.X_add_number
;
8105 if (!inst
.operands
[i
].issingle
)
8109 /* LDR should not use lead in a flag-setting instruction being
8110 chosen so we do not check whether movs can be used. */
8112 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8113 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8114 && inst
.operands
[i
].reg
!= 13
8115 && inst
.operands
[i
].reg
!= 15)
8117 /* Check if on thumb2 it can be done with a mov.w, mvn or
8118 movw instruction. */
8119 unsigned int newimm
;
8120 bfd_boolean isNegated
;
8122 newimm
= encode_thumb32_immediate (v
);
8123 if (newimm
!= (unsigned int) FAIL
)
8127 newimm
= encode_thumb32_immediate (~v
);
8128 if (newimm
!= (unsigned int) FAIL
)
8132 /* The number can be loaded with a mov.w or mvn
8134 if (newimm
!= (unsigned int) FAIL
8135 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8137 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8138 | (inst
.operands
[i
].reg
<< 8));
8139 /* Change to MOVN. */
8140 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8141 inst
.instruction
|= (newimm
& 0x800) << 15;
8142 inst
.instruction
|= (newimm
& 0x700) << 4;
8143 inst
.instruction
|= (newimm
& 0x0ff);
8146 /* The number can be loaded with a movw instruction. */
8147 else if ((v
& ~0xFFFF) == 0
8148 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8150 int imm
= v
& 0xFFFF;
8152 inst
.instruction
= 0xf2400000; /* MOVW. */
8153 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8154 inst
.instruction
|= (imm
& 0xf000) << 4;
8155 inst
.instruction
|= (imm
& 0x0800) << 15;
8156 inst
.instruction
|= (imm
& 0x0700) << 4;
8157 inst
.instruction
|= (imm
& 0x00ff);
8164 int value
= encode_arm_immediate (v
);
8168 /* This can be done with a mov instruction. */
8169 inst
.instruction
&= LITERAL_MASK
;
8170 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8171 inst
.instruction
|= value
& 0xfff;
8175 value
= encode_arm_immediate (~ v
);
8178 /* This can be done with a mvn instruction. */
8179 inst
.instruction
&= LITERAL_MASK
;
8180 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8181 inst
.instruction
|= value
& 0xfff;
8185 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8188 unsigned immbits
= 0;
8189 unsigned immlo
= inst
.operands
[1].imm
;
8190 unsigned immhi
= inst
.operands
[1].regisimm
8191 ? inst
.operands
[1].reg
8192 : inst
.relocs
[0].exp
.X_unsigned
8194 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8195 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8196 &op
, 64, NT_invtype
);
8200 neon_invert_size (&immlo
, &immhi
, 64);
8202 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8203 &op
, 64, NT_invtype
);
8208 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8214 /* Fill other bits in vmov encoding for both thumb and arm. */
8216 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8218 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8219 neon_write_immbits (immbits
);
8227 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8228 if (inst
.operands
[i
].issingle
8229 && is_quarter_float (inst
.operands
[1].imm
)
8230 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8232 inst
.operands
[1].imm
=
8233 neon_qfloat_bits (v
);
8234 do_vfp_nsyn_opcode ("fconsts");
8238 /* If our host does not support a 64-bit type then we cannot perform
8239 the following optimization. This mean that there will be a
8240 discrepancy between the output produced by an assembler built for
8241 a 32-bit-only host and the output produced from a 64-bit host, but
8242 this cannot be helped. */
8243 #if defined BFD_HOST_64_BIT
8244 else if (!inst
.operands
[1].issingle
8245 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8247 if (is_double_a_single (v
)
8248 && is_quarter_float (double_to_single (v
)))
8250 inst
.operands
[1].imm
=
8251 neon_qfloat_bits (double_to_single (v
));
8252 do_vfp_nsyn_opcode ("fconstd");
8260 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8261 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8264 inst
.operands
[1].reg
= REG_PC
;
8265 inst
.operands
[1].isreg
= 1;
8266 inst
.operands
[1].preind
= 1;
8267 inst
.relocs
[0].pc_rel
= 1;
8268 inst
.relocs
[0].type
= (thumb_p
8269 ? BFD_RELOC_ARM_THUMB_OFFSET
8271 ? BFD_RELOC_ARM_HWLITERAL
8272 : BFD_RELOC_ARM_LITERAL
));
8276 /* inst.operands[i] was set up by parse_address. Encode it into an
8277 ARM-format instruction. Reject all forms which cannot be encoded
8278 into a coprocessor load/store instruction. If wb_ok is false,
8279 reject use of writeback; if unind_ok is false, reject use of
8280 unindexed addressing. If reloc_override is not 0, use it instead
8281 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8282 (in which case it is preserved). */
8285 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8287 if (!inst
.operands
[i
].isreg
)
8290 if (! inst
.operands
[0].isvec
)
8292 inst
.error
= _("invalid co-processor operand");
8295 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8299 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8301 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8303 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8305 gas_assert (!inst
.operands
[i
].writeback
);
8308 inst
.error
= _("instruction does not support unindexed addressing");
8311 inst
.instruction
|= inst
.operands
[i
].imm
;
8312 inst
.instruction
|= INDEX_UP
;
8316 if (inst
.operands
[i
].preind
)
8317 inst
.instruction
|= PRE_INDEX
;
8319 if (inst
.operands
[i
].writeback
)
8321 if (inst
.operands
[i
].reg
== REG_PC
)
8323 inst
.error
= _("pc may not be used with write-back");
8328 inst
.error
= _("instruction does not support writeback");
8331 inst
.instruction
|= WRITE_BACK
;
8335 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
8336 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8337 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
8338 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8341 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8343 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8346 /* Prefer + for zero encoded value. */
8347 if (!inst
.operands
[i
].negative
)
8348 inst
.instruction
|= INDEX_UP
;
8353 /* Functions for instruction encoding, sorted by sub-architecture.
8354 First some generics; their names are taken from the conventional
8355 bit positions for register arguments in ARM format instructions. */
8365 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8371 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8377 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8378 inst
.instruction
|= inst
.operands
[1].reg
;
8384 inst
.instruction
|= inst
.operands
[0].reg
;
8385 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8391 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8392 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8398 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8399 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8405 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8406 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8410 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8412 if (ARM_CPU_IS_ANY (cpu_variant
))
8414 as_tsktsk ("%s", msg
);
8417 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8429 unsigned Rn
= inst
.operands
[2].reg
;
8430 /* Enforce restrictions on SWP instruction. */
8431 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8433 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8434 _("Rn must not overlap other operands"));
8436 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8438 if (!check_obsolete (&arm_ext_v8
,
8439 _("swp{b} use is obsoleted for ARMv8 and later"))
8440 && warn_on_deprecated
8441 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8442 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8445 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8446 inst
.instruction
|= inst
.operands
[1].reg
;
8447 inst
.instruction
|= Rn
<< 16;
8453 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8454 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8455 inst
.instruction
|= inst
.operands
[2].reg
;
8461 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8462 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
8463 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
8464 || inst
.relocs
[0].exp
.X_add_number
!= 0),
8466 inst
.instruction
|= inst
.operands
[0].reg
;
8467 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8468 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8474 inst
.instruction
|= inst
.operands
[0].imm
;
8480 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8481 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8484 /* ARM instructions, in alphabetical order by function name (except
8485 that wrapper functions appear immediately after the function they
8488 /* This is a pseudo-op of the form "adr rd, label" to be converted
8489 into a relative address of the form "add rd, pc, #label-.-8". */
8494 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8496 /* Frag hacking will turn this into a sub instruction if the offset turns
8497 out to be negative. */
8498 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
8499 inst
.relocs
[0].pc_rel
= 1;
8500 inst
.relocs
[0].exp
.X_add_number
-= 8;
8502 if (support_interwork
8503 && inst
.relocs
[0].exp
.X_op
== O_symbol
8504 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8505 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8506 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8507 inst
.relocs
[0].exp
.X_add_number
|= 1;
8510 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8511 into a relative address of the form:
8512 add rd, pc, #low(label-.-8)"
8513 add rd, rd, #high(label-.-8)" */
8518 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8520 /* Frag hacking will turn this into a sub instruction if the offset turns
8521 out to be negative. */
8522 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8523 inst
.relocs
[0].pc_rel
= 1;
8524 inst
.size
= INSN_SIZE
* 2;
8525 inst
.relocs
[0].exp
.X_add_number
-= 8;
8527 if (support_interwork
8528 && inst
.relocs
[0].exp
.X_op
== O_symbol
8529 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8530 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8531 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8532 inst
.relocs
[0].exp
.X_add_number
|= 1;
8538 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8539 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8541 if (!inst
.operands
[1].present
)
8542 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8543 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8544 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8545 encode_arm_shifter_operand (2);
8551 if (inst
.operands
[0].present
)
8552 inst
.instruction
|= inst
.operands
[0].imm
;
8554 inst
.instruction
|= 0xf;
8560 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8561 constraint (msb
> 32, _("bit-field extends past end of register"));
8562 /* The instruction encoding stores the LSB and MSB,
8563 not the LSB and width. */
8564 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8565 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8566 inst
.instruction
|= (msb
- 1) << 16;
8574 /* #0 in second position is alternative syntax for bfc, which is
8575 the same instruction but with REG_PC in the Rm field. */
8576 if (!inst
.operands
[1].isreg
)
8577 inst
.operands
[1].reg
= REG_PC
;
8579 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8580 constraint (msb
> 32, _("bit-field extends past end of register"));
8581 /* The instruction encoding stores the LSB and MSB,
8582 not the LSB and width. */
8583 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8584 inst
.instruction
|= inst
.operands
[1].reg
;
8585 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8586 inst
.instruction
|= (msb
- 1) << 16;
8592 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8593 _("bit-field extends past end of register"));
8594 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8595 inst
.instruction
|= inst
.operands
[1].reg
;
8596 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8597 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8600 /* ARM V5 breakpoint instruction (argument parse)
8601 BKPT <16 bit unsigned immediate>
8602 Instruction is not conditional.
8603 The bit pattern given in insns[] has the COND_ALWAYS condition,
8604 and it is an error if the caller tried to override that. */
8609 /* Top 12 of 16 bits to bits 19:8. */
8610 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8612 /* Bottom 4 of 16 bits to bits 3:0. */
8613 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8617 encode_branch (int default_reloc
)
8619 if (inst
.operands
[0].hasreloc
)
8621 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8622 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8623 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8624 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8625 ? BFD_RELOC_ARM_PLT32
8626 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8629 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
8630 inst
.relocs
[0].pc_rel
= 1;
8637 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8638 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8641 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8648 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8650 if (inst
.cond
== COND_ALWAYS
)
8651 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8653 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8657 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8660 /* ARM V5 branch-link-exchange instruction (argument parse)
8661 BLX <target_addr> ie BLX(1)
8662 BLX{<condition>} <Rm> ie BLX(2)
8663 Unfortunately, there are two different opcodes for this mnemonic.
8664 So, the insns[].value is not used, and the code here zaps values
8665 into inst.instruction.
8666 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8671 if (inst
.operands
[0].isreg
)
8673 /* Arg is a register; the opcode provided by insns[] is correct.
8674 It is not illegal to do "blx pc", just useless. */
8675 if (inst
.operands
[0].reg
== REG_PC
)
8676 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8678 inst
.instruction
|= inst
.operands
[0].reg
;
8682 /* Arg is an address; this instruction cannot be executed
8683 conditionally, and the opcode must be adjusted.
8684 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8685 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8686 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8687 inst
.instruction
= 0xfa000000;
8688 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8695 bfd_boolean want_reloc
;
8697 if (inst
.operands
[0].reg
== REG_PC
)
8698 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8700 inst
.instruction
|= inst
.operands
[0].reg
;
8701 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8702 it is for ARMv4t or earlier. */
8703 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8704 if (!ARM_FEATURE_ZERO (selected_object_arch
)
8705 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
8709 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8714 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
8718 /* ARM v5TEJ. Jump to Jazelle code. */
8723 if (inst
.operands
[0].reg
== REG_PC
)
8724 as_tsktsk (_("use of r15 in bxj is not really useful"));
8726 inst
.instruction
|= inst
.operands
[0].reg
;
8729 /* Co-processor data operation:
8730 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8731 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8735 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8736 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8737 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8738 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8739 inst
.instruction
|= inst
.operands
[4].reg
;
8740 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8746 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8747 encode_arm_shifter_operand (1);
8750 /* Transfer between coprocessor and ARM registers.
8751 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8756 No special properties. */
8758 struct deprecated_coproc_regs_s
8765 arm_feature_set deprecated
;
8766 arm_feature_set obsoleted
;
8767 const char *dep_msg
;
8768 const char *obs_msg
;
8771 #define DEPR_ACCESS_V8 \
8772 N_("This coprocessor register access is deprecated in ARMv8")
8774 /* Table of all deprecated coprocessor registers. */
8775 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8777 {15, 0, 7, 10, 5, /* CP15DMB. */
8778 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8779 DEPR_ACCESS_V8
, NULL
},
8780 {15, 0, 7, 10, 4, /* CP15DSB. */
8781 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8782 DEPR_ACCESS_V8
, NULL
},
8783 {15, 0, 7, 5, 4, /* CP15ISB. */
8784 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8785 DEPR_ACCESS_V8
, NULL
},
8786 {14, 6, 1, 0, 0, /* TEEHBR. */
8787 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8788 DEPR_ACCESS_V8
, NULL
},
8789 {14, 6, 0, 0, 0, /* TEECR. */
8790 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8791 DEPR_ACCESS_V8
, NULL
},
8794 #undef DEPR_ACCESS_V8
8796 static const size_t deprecated_coproc_reg_count
=
8797 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8805 Rd
= inst
.operands
[2].reg
;
8808 if (inst
.instruction
== 0xee000010
8809 || inst
.instruction
== 0xfe000010)
8811 reject_bad_reg (Rd
);
8812 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
8814 constraint (Rd
== REG_SP
, BAD_SP
);
8819 if (inst
.instruction
== 0xe000010)
8820 constraint (Rd
== REG_PC
, BAD_PC
);
8823 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8825 const struct deprecated_coproc_regs_s
*r
=
8826 deprecated_coproc_regs
+ i
;
8828 if (inst
.operands
[0].reg
== r
->cp
8829 && inst
.operands
[1].imm
== r
->opc1
8830 && inst
.operands
[3].reg
== r
->crn
8831 && inst
.operands
[4].reg
== r
->crm
8832 && inst
.operands
[5].imm
== r
->opc2
)
8834 if (! ARM_CPU_IS_ANY (cpu_variant
)
8835 && warn_on_deprecated
8836 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8837 as_tsktsk ("%s", r
->dep_msg
);
8841 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8842 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8843 inst
.instruction
|= Rd
<< 12;
8844 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8845 inst
.instruction
|= inst
.operands
[4].reg
;
8846 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8849 /* Transfer between coprocessor register and pair of ARM registers.
8850 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8855 Two XScale instructions are special cases of these:
8857 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8858 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8860 Result unpredictable if Rd or Rn is R15. */
8867 Rd
= inst
.operands
[2].reg
;
8868 Rn
= inst
.operands
[3].reg
;
8872 reject_bad_reg (Rd
);
8873 reject_bad_reg (Rn
);
8877 constraint (Rd
== REG_PC
, BAD_PC
);
8878 constraint (Rn
== REG_PC
, BAD_PC
);
8881 /* Only check the MRRC{2} variants. */
8882 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
8884 /* If Rd == Rn, error that the operation is
8885 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8886 constraint (Rd
== Rn
, BAD_OVERLAP
);
8889 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8890 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8891 inst
.instruction
|= Rd
<< 12;
8892 inst
.instruction
|= Rn
<< 16;
8893 inst
.instruction
|= inst
.operands
[4].reg
;
8899 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8900 if (inst
.operands
[1].present
)
8902 inst
.instruction
|= CPSI_MMOD
;
8903 inst
.instruction
|= inst
.operands
[1].imm
;
8910 inst
.instruction
|= inst
.operands
[0].imm
;
8916 unsigned Rd
, Rn
, Rm
;
8918 Rd
= inst
.operands
[0].reg
;
8919 Rn
= (inst
.operands
[1].present
8920 ? inst
.operands
[1].reg
: Rd
);
8921 Rm
= inst
.operands
[2].reg
;
8923 constraint ((Rd
== REG_PC
), BAD_PC
);
8924 constraint ((Rn
== REG_PC
), BAD_PC
);
8925 constraint ((Rm
== REG_PC
), BAD_PC
);
8927 inst
.instruction
|= Rd
<< 16;
8928 inst
.instruction
|= Rn
<< 0;
8929 inst
.instruction
|= Rm
<< 8;
8935 /* There is no IT instruction in ARM mode. We
8936 process it to do the validation as if in
8937 thumb mode, just in case the code gets
8938 assembled for thumb using the unified syntax. */
8943 set_it_insn_type (IT_INSN
);
8944 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8945 now_it
.cc
= inst
.operands
[0].imm
;
8949 /* If there is only one register in the register list,
8950 then return its register number. Otherwise return -1. */
8952 only_one_reg_in_list (int range
)
8954 int i
= ffs (range
) - 1;
8955 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8959 encode_ldmstm(int from_push_pop_mnem
)
8961 int base_reg
= inst
.operands
[0].reg
;
8962 int range
= inst
.operands
[1].imm
;
8965 inst
.instruction
|= base_reg
<< 16;
8966 inst
.instruction
|= range
;
8968 if (inst
.operands
[1].writeback
)
8969 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8971 if (inst
.operands
[0].writeback
)
8973 inst
.instruction
|= WRITE_BACK
;
8974 /* Check for unpredictable uses of writeback. */
8975 if (inst
.instruction
& LOAD_BIT
)
8977 /* Not allowed in LDM type 2. */
8978 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8979 && ((range
& (1 << REG_PC
)) == 0))
8980 as_warn (_("writeback of base register is UNPREDICTABLE"));
8981 /* Only allowed if base reg not in list for other types. */
8982 else if (range
& (1 << base_reg
))
8983 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8987 /* Not allowed for type 2. */
8988 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8989 as_warn (_("writeback of base register is UNPREDICTABLE"));
8990 /* Only allowed if base reg not in list, or first in list. */
8991 else if ((range
& (1 << base_reg
))
8992 && (range
& ((1 << base_reg
) - 1)))
8993 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8997 /* If PUSH/POP has only one register, then use the A2 encoding. */
8998 one_reg
= only_one_reg_in_list (range
);
8999 if (from_push_pop_mnem
&& one_reg
>= 0)
9001 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9003 if (is_push
&& one_reg
== 13 /* SP */)
9004 /* PR 22483: The A2 encoding cannot be used when
9005 pushing the stack pointer as this is UNPREDICTABLE. */
9008 inst
.instruction
&= A_COND_MASK
;
9009 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9010 inst
.instruction
|= one_reg
<< 12;
9017 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
9020 /* ARMv5TE load-consecutive (argument parse)
9029 constraint (inst
.operands
[0].reg
% 2 != 0,
9030 _("first transfer register must be even"));
9031 constraint (inst
.operands
[1].present
9032 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9033 _("can only transfer two consecutive registers"));
9034 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9035 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9037 if (!inst
.operands
[1].present
)
9038 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9040 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9041 register and the first register written; we have to diagnose
9042 overlap between the base and the second register written here. */
9044 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9045 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9046 as_warn (_("base register written back, and overlaps "
9047 "second transfer register"));
9049 if (!(inst
.instruction
& V4_STR_BIT
))
9051 /* For an index-register load, the index register must not overlap the
9052 destination (even if not write-back). */
9053 if (inst
.operands
[2].immisreg
9054 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9055 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9056 as_warn (_("index register overlaps transfer register"));
9058 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9059 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
9065 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9066 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9067 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9068 || inst
.operands
[1].negative
9069 /* This can arise if the programmer has written
9071 or if they have mistakenly used a register name as the last
9074 It is very difficult to distinguish between these two cases
9075 because "rX" might actually be a label. ie the register
9076 name has been occluded by a symbol of the same name. So we
9077 just generate a general 'bad addressing mode' type error
9078 message and leave it up to the programmer to discover the
9079 true cause and fix their mistake. */
9080 || (inst
.operands
[1].reg
== REG_PC
),
9083 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9084 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9085 _("offset must be zero in ARM encoding"));
9087 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9089 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9090 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9091 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9097 constraint (inst
.operands
[0].reg
% 2 != 0,
9098 _("even register required"));
9099 constraint (inst
.operands
[1].present
9100 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9101 _("can only load two consecutive registers"));
9102 /* If op 1 were present and equal to PC, this function wouldn't
9103 have been called in the first place. */
9104 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9106 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9107 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9110 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9111 which is not a multiple of four is UNPREDICTABLE. */
9113 check_ldr_r15_aligned (void)
9115 constraint (!(inst
.operands
[1].immisreg
)
9116 && (inst
.operands
[0].reg
== REG_PC
9117 && inst
.operands
[1].reg
== REG_PC
9118 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9119 _("ldr to register 15 must be 4-byte aligned"));
9125 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9126 if (!inst
.operands
[1].isreg
)
9127 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9129 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9130 check_ldr_r15_aligned ();
9136 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9138 if (inst
.operands
[1].preind
)
9140 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9141 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9142 _("this instruction requires a post-indexed address"));
9144 inst
.operands
[1].preind
= 0;
9145 inst
.operands
[1].postind
= 1;
9146 inst
.operands
[1].writeback
= 1;
9148 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9149 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9152 /* Halfword and signed-byte load/store operations. */
9157 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9158 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9159 if (!inst
.operands
[1].isreg
)
9160 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9162 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9168 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9170 if (inst
.operands
[1].preind
)
9172 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9173 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9174 _("this instruction requires a post-indexed address"));
9176 inst
.operands
[1].preind
= 0;
9177 inst
.operands
[1].postind
= 1;
9178 inst
.operands
[1].writeback
= 1;
9180 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9181 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9184 /* Co-processor register load/store.
9185 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9189 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9190 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9191 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9197 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9198 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9199 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9200 && !(inst
.instruction
& 0x00400000))
9201 as_tsktsk (_("Rd and Rm should be different in mla"));
9203 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9204 inst
.instruction
|= inst
.operands
[1].reg
;
9205 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9206 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9212 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9213 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9215 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9216 encode_arm_shifter_operand (1);
9219 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9226 top
= (inst
.instruction
& 0x00400000) != 0;
9227 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
9228 _(":lower16: not allowed in this instruction"));
9229 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
9230 _(":upper16: not allowed in this instruction"));
9231 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9232 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
9234 imm
= inst
.relocs
[0].exp
.X_add_number
;
9235 /* The value is in two pieces: 0:11, 16:19. */
9236 inst
.instruction
|= (imm
& 0x00000fff);
9237 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9242 do_vfp_nsyn_mrs (void)
9244 if (inst
.operands
[0].isvec
)
9246 if (inst
.operands
[1].reg
!= 1)
9247 first_error (_("operand 1 must be FPSCR"));
9248 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9249 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9250 do_vfp_nsyn_opcode ("fmstat");
9252 else if (inst
.operands
[1].isvec
)
9253 do_vfp_nsyn_opcode ("fmrx");
9261 do_vfp_nsyn_msr (void)
9263 if (inst
.operands
[0].isvec
)
9264 do_vfp_nsyn_opcode ("fmxr");
9274 unsigned Rt
= inst
.operands
[0].reg
;
9276 if (thumb_mode
&& Rt
== REG_SP
)
9278 inst
.error
= BAD_SP
;
9282 /* MVFR2 is only valid at ARMv8-A. */
9283 if (inst
.operands
[1].reg
== 5)
9284 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9287 /* APSR_ sets isvec. All other refs to PC are illegal. */
9288 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9290 inst
.error
= BAD_PC
;
9294 /* If we get through parsing the register name, we just insert the number
9295 generated into the instruction without further validation. */
9296 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9297 inst
.instruction
|= (Rt
<< 12);
9303 unsigned Rt
= inst
.operands
[1].reg
;
9306 reject_bad_reg (Rt
);
9307 else if (Rt
== REG_PC
)
9309 inst
.error
= BAD_PC
;
9313 /* MVFR2 is only valid for ARMv8-A. */
9314 if (inst
.operands
[0].reg
== 5)
9315 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9318 /* If we get through parsing the register name, we just insert the number
9319 generated into the instruction without further validation. */
9320 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9321 inst
.instruction
|= (Rt
<< 12);
9329 if (do_vfp_nsyn_mrs () == SUCCESS
)
9332 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9333 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9335 if (inst
.operands
[1].isreg
)
9337 br
= inst
.operands
[1].reg
;
9338 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
9339 as_bad (_("bad register for mrs"));
9343 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9344 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9346 _("'APSR', 'CPSR' or 'SPSR' expected"));
9347 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9350 inst
.instruction
|= br
;
9353 /* Two possible forms:
9354 "{C|S}PSR_<field>, Rm",
9355 "{C|S}PSR_f, #expression". */
9360 if (do_vfp_nsyn_msr () == SUCCESS
)
9363 inst
.instruction
|= inst
.operands
[0].imm
;
9364 if (inst
.operands
[1].isreg
)
9365 inst
.instruction
|= inst
.operands
[1].reg
;
9368 inst
.instruction
|= INST_IMMEDIATE
;
9369 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9370 inst
.relocs
[0].pc_rel
= 0;
9377 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9379 if (!inst
.operands
[2].present
)
9380 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9381 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9382 inst
.instruction
|= inst
.operands
[1].reg
;
9383 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9385 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9386 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9387 as_tsktsk (_("Rd and Rm should be different in mul"));
9390 /* Long Multiply Parser
9391 UMULL RdLo, RdHi, Rm, Rs
9392 SMULL RdLo, RdHi, Rm, Rs
9393 UMLAL RdLo, RdHi, Rm, Rs
9394 SMLAL RdLo, RdHi, Rm, Rs. */
9399 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9400 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9401 inst
.instruction
|= inst
.operands
[2].reg
;
9402 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9404 /* rdhi and rdlo must be different. */
9405 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9406 as_tsktsk (_("rdhi and rdlo must be different"));
9408 /* rdhi, rdlo and rm must all be different before armv6. */
9409 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9410 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9411 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9412 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9418 if (inst
.operands
[0].present
9419 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9421 /* Architectural NOP hints are CPSR sets with no bits selected. */
9422 inst
.instruction
&= 0xf0000000;
9423 inst
.instruction
|= 0x0320f000;
9424 if (inst
.operands
[0].present
)
9425 inst
.instruction
|= inst
.operands
[0].imm
;
9429 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9430 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9431 Condition defaults to COND_ALWAYS.
9432 Error if Rd, Rn or Rm are R15. */
9437 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9438 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9439 inst
.instruction
|= inst
.operands
[2].reg
;
9440 if (inst
.operands
[3].present
)
9441 encode_arm_shift (3);
9444 /* ARM V6 PKHTB (Argument Parse). */
9449 if (!inst
.operands
[3].present
)
9451 /* If the shift specifier is omitted, turn the instruction
9452 into pkhbt rd, rm, rn. */
9453 inst
.instruction
&= 0xfff00010;
9454 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9455 inst
.instruction
|= inst
.operands
[1].reg
;
9456 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9460 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9461 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9462 inst
.instruction
|= inst
.operands
[2].reg
;
9463 encode_arm_shift (3);
9467 /* ARMv5TE: Preload-Cache
9468 MP Extensions: Preload for write
9472 Syntactically, like LDR with B=1, W=0, L=1. */
9477 constraint (!inst
.operands
[0].isreg
,
9478 _("'[' expected after PLD mnemonic"));
9479 constraint (inst
.operands
[0].postind
,
9480 _("post-indexed expression used in preload instruction"));
9481 constraint (inst
.operands
[0].writeback
,
9482 _("writeback used in preload instruction"));
9483 constraint (!inst
.operands
[0].preind
,
9484 _("unindexed addressing used in preload instruction"));
9485 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9488 /* ARMv7: PLI <addr_mode> */
9492 constraint (!inst
.operands
[0].isreg
,
9493 _("'[' expected after PLI mnemonic"));
9494 constraint (inst
.operands
[0].postind
,
9495 _("post-indexed expression used in preload instruction"));
9496 constraint (inst
.operands
[0].writeback
,
9497 _("writeback used in preload instruction"));
9498 constraint (!inst
.operands
[0].preind
,
9499 _("unindexed addressing used in preload instruction"));
9500 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9501 inst
.instruction
&= ~PRE_INDEX
;
9507 constraint (inst
.operands
[0].writeback
,
9508 _("push/pop do not support {reglist}^"));
9509 inst
.operands
[1] = inst
.operands
[0];
9510 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9511 inst
.operands
[0].isreg
= 1;
9512 inst
.operands
[0].writeback
= 1;
9513 inst
.operands
[0].reg
= REG_SP
;
9514 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9517 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9518 word at the specified address and the following word
9520 Unconditionally executed.
9521 Error if Rn is R15. */
9526 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9527 if (inst
.operands
[0].writeback
)
9528 inst
.instruction
|= WRITE_BACK
;
9531 /* ARM V6 ssat (argument parse). */
9536 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9537 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9538 inst
.instruction
|= inst
.operands
[2].reg
;
9540 if (inst
.operands
[3].present
)
9541 encode_arm_shift (3);
9544 /* ARM V6 usat (argument parse). */
9549 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9550 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9551 inst
.instruction
|= inst
.operands
[2].reg
;
9553 if (inst
.operands
[3].present
)
9554 encode_arm_shift (3);
9557 /* ARM V6 ssat16 (argument parse). */
9562 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9563 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9564 inst
.instruction
|= inst
.operands
[2].reg
;
9570 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9571 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9572 inst
.instruction
|= inst
.operands
[2].reg
;
9575 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9576 preserving the other bits.
9578 setend <endian_specifier>, where <endian_specifier> is either
9584 if (warn_on_deprecated
9585 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9586 as_tsktsk (_("setend use is deprecated for ARMv8"));
9588 if (inst
.operands
[0].imm
)
9589 inst
.instruction
|= 0x200;
9595 unsigned int Rm
= (inst
.operands
[1].present
9596 ? inst
.operands
[1].reg
9597 : inst
.operands
[0].reg
);
9599 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9600 inst
.instruction
|= Rm
;
9601 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9603 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9604 inst
.instruction
|= SHIFT_BY_REG
;
9605 /* PR 12854: Error on extraneous shifts. */
9606 constraint (inst
.operands
[2].shifted
,
9607 _("extraneous shift as part of operand to shift insn"));
9610 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
9616 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
9617 inst
.relocs
[0].pc_rel
= 0;
9623 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
9624 inst
.relocs
[0].pc_rel
= 0;
9630 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
9631 inst
.relocs
[0].pc_rel
= 0;
9637 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9638 _("selected processor does not support SETPAN instruction"));
9640 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9646 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9647 _("selected processor does not support SETPAN instruction"));
9649 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9652 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9653 SMLAxy{cond} Rd,Rm,Rs,Rn
9654 SMLAWy{cond} Rd,Rm,Rs,Rn
9655 Error if any register is R15. */
9660 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9661 inst
.instruction
|= inst
.operands
[1].reg
;
9662 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9663 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9666 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9667 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9668 Error if any register is R15.
9669 Warning if Rdlo == Rdhi. */
9674 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9675 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9676 inst
.instruction
|= inst
.operands
[2].reg
;
9677 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9679 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9680 as_tsktsk (_("rdhi and rdlo must be different"));
9683 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9684 SMULxy{cond} Rd,Rm,Rs
9685 Error if any register is R15. */
9690 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9691 inst
.instruction
|= inst
.operands
[1].reg
;
9692 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9695 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9696 the same for both ARM and Thumb-2. */
9703 if (inst
.operands
[0].present
)
9705 reg
= inst
.operands
[0].reg
;
9706 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9711 inst
.instruction
|= reg
<< 16;
9712 inst
.instruction
|= inst
.operands
[1].imm
;
9713 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9714 inst
.instruction
|= WRITE_BACK
;
9717 /* ARM V6 strex (argument parse). */
9722 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9723 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9724 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9725 || inst
.operands
[2].negative
9726 /* See comment in do_ldrex(). */
9727 || (inst
.operands
[2].reg
== REG_PC
),
9730 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9731 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9733 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9734 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9735 _("offset must be zero in ARM encoding"));
9737 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9738 inst
.instruction
|= inst
.operands
[1].reg
;
9739 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9740 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9746 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9747 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9748 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9749 || inst
.operands
[2].negative
,
9752 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9753 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9761 constraint (inst
.operands
[1].reg
% 2 != 0,
9762 _("even register required"));
9763 constraint (inst
.operands
[2].present
9764 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9765 _("can only store two consecutive registers"));
9766 /* If op 2 were present and equal to PC, this function wouldn't
9767 have been called in the first place. */
9768 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9770 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9771 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9772 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9775 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9776 inst
.instruction
|= inst
.operands
[1].reg
;
9777 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9784 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9785 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9793 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9794 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9799 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9800 extends it to 32-bits, and adds the result to a value in another
9801 register. You can specify a rotation by 0, 8, 16, or 24 bits
9802 before extracting the 16-bit value.
9803 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9804 Condition defaults to COND_ALWAYS.
9805 Error if any register uses R15. */
9810 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9811 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9812 inst
.instruction
|= inst
.operands
[2].reg
;
9813 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9818 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9819 Condition defaults to COND_ALWAYS.
9820 Error if any register uses R15. */
9825 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9826 inst
.instruction
|= inst
.operands
[1].reg
;
9827 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9830 /* VFP instructions. In a logical order: SP variant first, monad
9831 before dyad, arithmetic then move then load/store. */
9834 do_vfp_sp_monadic (void)
9836 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9837 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9841 do_vfp_sp_dyadic (void)
9843 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9844 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9845 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9849 do_vfp_sp_compare_z (void)
9851 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9855 do_vfp_dp_sp_cvt (void)
9857 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9858 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9862 do_vfp_sp_dp_cvt (void)
9864 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9865 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9869 do_vfp_reg_from_sp (void)
9871 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9872 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9876 do_vfp_reg2_from_sp2 (void)
9878 constraint (inst
.operands
[2].imm
!= 2,
9879 _("only two consecutive VFP SP registers allowed here"));
9880 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9881 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9882 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9886 do_vfp_sp_from_reg (void)
9888 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9889 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9893 do_vfp_sp2_from_reg2 (void)
9895 constraint (inst
.operands
[0].imm
!= 2,
9896 _("only two consecutive VFP SP registers allowed here"));
9897 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9898 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9899 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9903 do_vfp_sp_ldst (void)
9905 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9906 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9910 do_vfp_dp_ldst (void)
9912 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9913 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9918 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9920 if (inst
.operands
[0].writeback
)
9921 inst
.instruction
|= WRITE_BACK
;
9923 constraint (ldstm_type
!= VFP_LDSTMIA
,
9924 _("this addressing mode requires base-register writeback"));
9925 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9926 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9927 inst
.instruction
|= inst
.operands
[1].imm
;
9931 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9935 if (inst
.operands
[0].writeback
)
9936 inst
.instruction
|= WRITE_BACK
;
9938 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9939 _("this addressing mode requires base-register writeback"));
9941 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9942 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9944 count
= inst
.operands
[1].imm
<< 1;
9945 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9948 inst
.instruction
|= count
;
9952 do_vfp_sp_ldstmia (void)
9954 vfp_sp_ldstm (VFP_LDSTMIA
);
9958 do_vfp_sp_ldstmdb (void)
9960 vfp_sp_ldstm (VFP_LDSTMDB
);
9964 do_vfp_dp_ldstmia (void)
9966 vfp_dp_ldstm (VFP_LDSTMIA
);
9970 do_vfp_dp_ldstmdb (void)
9972 vfp_dp_ldstm (VFP_LDSTMDB
);
9976 do_vfp_xp_ldstmia (void)
9978 vfp_dp_ldstm (VFP_LDSTMIAX
);
9982 do_vfp_xp_ldstmdb (void)
9984 vfp_dp_ldstm (VFP_LDSTMDBX
);
9988 do_vfp_dp_rd_rm (void)
9990 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9991 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9995 do_vfp_dp_rn_rd (void)
9997 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9998 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10002 do_vfp_dp_rd_rn (void)
10004 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10005 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10009 do_vfp_dp_rd_rn_rm (void)
10011 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10012 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10013 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10017 do_vfp_dp_rd (void)
10019 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10023 do_vfp_dp_rm_rd_rn (void)
10025 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10026 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10027 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10030 /* VFPv3 instructions. */
10032 do_vfp_sp_const (void)
10034 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10035 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10036 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10040 do_vfp_dp_const (void)
10042 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10043 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10044 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10048 vfp_conv (int srcsize
)
10050 int immbits
= srcsize
- inst
.operands
[1].imm
;
10052 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10054 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10055 i.e. immbits must be in range 0 - 16. */
10056 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10059 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10061 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10062 i.e. immbits must be in range 0 - 31. */
10063 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10067 inst
.instruction
|= (immbits
& 1) << 5;
10068 inst
.instruction
|= (immbits
>> 1);
10072 do_vfp_sp_conv_16 (void)
10074 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10079 do_vfp_dp_conv_16 (void)
10081 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10086 do_vfp_sp_conv_32 (void)
10088 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10093 do_vfp_dp_conv_32 (void)
10095 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10099 /* FPA instructions. Also in a logical order. */
10104 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10105 inst
.instruction
|= inst
.operands
[1].reg
;
10109 do_fpa_ldmstm (void)
10111 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10112 switch (inst
.operands
[1].imm
)
10114 case 1: inst
.instruction
|= CP_T_X
; break;
10115 case 2: inst
.instruction
|= CP_T_Y
; break;
10116 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
10121 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
10123 /* The instruction specified "ea" or "fd", so we can only accept
10124 [Rn]{!}. The instruction does not really support stacking or
10125 unstacking, so we have to emulate these by setting appropriate
10126 bits and offsets. */
10127 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10128 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10129 _("this instruction does not support indexing"));
10131 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10132 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10134 if (!(inst
.instruction
& INDEX_UP
))
10135 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
10137 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10139 inst
.operands
[2].preind
= 0;
10140 inst
.operands
[2].postind
= 1;
10144 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10147 /* iWMMXt instructions: strictly in alphabetical order. */
10150 do_iwmmxt_tandorc (void)
10152 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10156 do_iwmmxt_textrc (void)
10158 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10159 inst
.instruction
|= inst
.operands
[1].imm
;
10163 do_iwmmxt_textrm (void)
10165 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10166 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10167 inst
.instruction
|= inst
.operands
[2].imm
;
10171 do_iwmmxt_tinsr (void)
10173 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10174 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10175 inst
.instruction
|= inst
.operands
[2].imm
;
10179 do_iwmmxt_tmia (void)
10181 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10182 inst
.instruction
|= inst
.operands
[1].reg
;
10183 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10187 do_iwmmxt_waligni (void)
10189 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10190 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10191 inst
.instruction
|= inst
.operands
[2].reg
;
10192 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10196 do_iwmmxt_wmerge (void)
10198 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10199 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10200 inst
.instruction
|= inst
.operands
[2].reg
;
10201 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10205 do_iwmmxt_wmov (void)
10207 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10208 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10209 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10210 inst
.instruction
|= inst
.operands
[1].reg
;
10214 do_iwmmxt_wldstbh (void)
10217 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10219 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10221 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10222 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10226 do_iwmmxt_wldstw (void)
10228 /* RIWR_RIWC clears .isreg for a control register. */
10229 if (!inst
.operands
[0].isreg
)
10231 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10232 inst
.instruction
|= 0xf0000000;
10235 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10236 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10240 do_iwmmxt_wldstd (void)
10242 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10243 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10244 && inst
.operands
[1].immisreg
)
10246 inst
.instruction
&= ~0x1a000ff;
10247 inst
.instruction
|= (0xfU
<< 28);
10248 if (inst
.operands
[1].preind
)
10249 inst
.instruction
|= PRE_INDEX
;
10250 if (!inst
.operands
[1].negative
)
10251 inst
.instruction
|= INDEX_UP
;
10252 if (inst
.operands
[1].writeback
)
10253 inst
.instruction
|= WRITE_BACK
;
10254 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10255 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10256 inst
.instruction
|= inst
.operands
[1].imm
;
10259 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10263 do_iwmmxt_wshufh (void)
10265 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10266 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10267 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10268 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10272 do_iwmmxt_wzero (void)
10274 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10275 inst
.instruction
|= inst
.operands
[0].reg
;
10276 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10277 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10281 do_iwmmxt_wrwrwr_or_imm5 (void)
10283 if (inst
.operands
[2].isreg
)
10286 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10287 _("immediate operand requires iWMMXt2"));
10289 if (inst
.operands
[2].imm
== 0)
10291 switch ((inst
.instruction
>> 20) & 0xf)
10297 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10298 inst
.operands
[2].imm
= 16;
10299 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10305 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10306 inst
.operands
[2].imm
= 32;
10307 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10314 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10316 wrn
= (inst
.instruction
>> 16) & 0xf;
10317 inst
.instruction
&= 0xff0fff0f;
10318 inst
.instruction
|= wrn
;
10319 /* Bail out here; the instruction is now assembled. */
10324 /* Map 32 -> 0, etc. */
10325 inst
.operands
[2].imm
&= 0x1f;
10326 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10330 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10331 operations first, then control, shift, and load/store. */
10333 /* Insns like "foo X,Y,Z". */
10336 do_mav_triple (void)
10338 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10339 inst
.instruction
|= inst
.operands
[1].reg
;
10340 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10343 /* Insns like "foo W,X,Y,Z".
10344 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10349 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10350 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10351 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10352 inst
.instruction
|= inst
.operands
[3].reg
;
10355 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10357 do_mav_dspsc (void)
10359 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10362 /* Maverick shift immediate instructions.
10363 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10364 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10367 do_mav_shift (void)
10369 int imm
= inst
.operands
[2].imm
;
10371 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10372 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10374 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10375 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10376 Bit 4 should be 0. */
10377 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10379 inst
.instruction
|= imm
;
10382 /* XScale instructions. Also sorted arithmetic before move. */
10384 /* Xscale multiply-accumulate (argument parse)
10387 MIAxycc acc0,Rm,Rs. */
10392 inst
.instruction
|= inst
.operands
[1].reg
;
10393 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10396 /* Xscale move-accumulator-register (argument parse)
10398 MARcc acc0,RdLo,RdHi. */
10403 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10404 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10407 /* Xscale move-register-accumulator (argument parse)
10409 MRAcc RdLo,RdHi,acc0. */
10414 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10415 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10416 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10419 /* Encoding functions relevant only to Thumb. */
10421 /* inst.operands[i] is a shifted-register operand; encode
10422 it into inst.instruction in the format used by Thumb32. */
10425 encode_thumb32_shifted_operand (int i
)
10427 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10428 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10430 constraint (inst
.operands
[i
].immisreg
,
10431 _("shift by register not allowed in thumb mode"));
10432 inst
.instruction
|= inst
.operands
[i
].reg
;
10433 if (shift
== SHIFT_RRX
)
10434 inst
.instruction
|= SHIFT_ROR
<< 4;
10437 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10438 _("expression too complex"));
10440 constraint (value
> 32
10441 || (value
== 32 && (shift
== SHIFT_LSL
10442 || shift
== SHIFT_ROR
)),
10443 _("shift expression is too large"));
10447 else if (value
== 32)
10450 inst
.instruction
|= shift
<< 4;
10451 inst
.instruction
|= (value
& 0x1c) << 10;
10452 inst
.instruction
|= (value
& 0x03) << 6;
10457 /* inst.operands[i] was set up by parse_address. Encode it into a
10458 Thumb32 format load or store instruction. Reject forms that cannot
10459 be used with such instructions. If is_t is true, reject forms that
10460 cannot be used with a T instruction; if is_d is true, reject forms
10461 that cannot be used with a D instruction. If it is a store insn,
10462 reject PC in Rn. */
10465 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10467 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10469 constraint (!inst
.operands
[i
].isreg
,
10470 _("Instruction does not support =N addresses"));
10472 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10473 if (inst
.operands
[i
].immisreg
)
10475 constraint (is_pc
, BAD_PC_ADDRESSING
);
10476 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10477 constraint (inst
.operands
[i
].negative
,
10478 _("Thumb does not support negative register indexing"));
10479 constraint (inst
.operands
[i
].postind
,
10480 _("Thumb does not support register post-indexing"));
10481 constraint (inst
.operands
[i
].writeback
,
10482 _("Thumb does not support register indexing with writeback"));
10483 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10484 _("Thumb supports only LSL in shifted register indexing"));
10486 inst
.instruction
|= inst
.operands
[i
].imm
;
10487 if (inst
.operands
[i
].shifted
)
10489 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10490 _("expression too complex"));
10491 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10492 || inst
.relocs
[0].exp
.X_add_number
> 3,
10493 _("shift out of range"));
10494 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10496 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10498 else if (inst
.operands
[i
].preind
)
10500 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10501 constraint (is_t
&& inst
.operands
[i
].writeback
,
10502 _("cannot use writeback with this instruction"));
10503 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10504 BAD_PC_ADDRESSING
);
10508 inst
.instruction
|= 0x01000000;
10509 if (inst
.operands
[i
].writeback
)
10510 inst
.instruction
|= 0x00200000;
10514 inst
.instruction
|= 0x00000c00;
10515 if (inst
.operands
[i
].writeback
)
10516 inst
.instruction
|= 0x00000100;
10518 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10520 else if (inst
.operands
[i
].postind
)
10522 gas_assert (inst
.operands
[i
].writeback
);
10523 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10524 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10527 inst
.instruction
|= 0x00200000;
10529 inst
.instruction
|= 0x00000900;
10530 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10532 else /* unindexed - only for coprocessor */
10533 inst
.error
= _("instruction does not accept unindexed addressing");
10536 /* Table of Thumb instructions which exist in both 16- and 32-bit
10537 encodings (the latter only in post-V6T2 cores). The index is the
10538 value used in the insns table below. When there is more than one
10539 possible 16-bit encoding for the instruction, this table always
10541 Also contains several pseudo-instructions used during relaxation. */
10542 #define T16_32_TAB \
10543 X(_adc, 4140, eb400000), \
10544 X(_adcs, 4140, eb500000), \
10545 X(_add, 1c00, eb000000), \
10546 X(_adds, 1c00, eb100000), \
10547 X(_addi, 0000, f1000000), \
10548 X(_addis, 0000, f1100000), \
10549 X(_add_pc,000f, f20f0000), \
10550 X(_add_sp,000d, f10d0000), \
10551 X(_adr, 000f, f20f0000), \
10552 X(_and, 4000, ea000000), \
10553 X(_ands, 4000, ea100000), \
10554 X(_asr, 1000, fa40f000), \
10555 X(_asrs, 1000, fa50f000), \
10556 X(_b, e000, f000b000), \
10557 X(_bcond, d000, f0008000), \
10558 X(_bf, 0000, f040e001), \
10559 X(_bfcsel,0000, f000e001), \
10560 X(_bfx, 0000, f060e001), \
10561 X(_bfl, 0000, f000c001), \
10562 X(_bflx, 0000, f070e001), \
10563 X(_bic, 4380, ea200000), \
10564 X(_bics, 4380, ea300000), \
10565 X(_cmn, 42c0, eb100f00), \
10566 X(_cmp, 2800, ebb00f00), \
10567 X(_cpsie, b660, f3af8400), \
10568 X(_cpsid, b670, f3af8600), \
10569 X(_cpy, 4600, ea4f0000), \
10570 X(_dec_sp,80dd, f1ad0d00), \
10571 X(_dls, 0000, f040e001), \
10572 X(_eor, 4040, ea800000), \
10573 X(_eors, 4040, ea900000), \
10574 X(_inc_sp,00dd, f10d0d00), \
10575 X(_ldmia, c800, e8900000), \
10576 X(_ldr, 6800, f8500000), \
10577 X(_ldrb, 7800, f8100000), \
10578 X(_ldrh, 8800, f8300000), \
10579 X(_ldrsb, 5600, f9100000), \
10580 X(_ldrsh, 5e00, f9300000), \
10581 X(_ldr_pc,4800, f85f0000), \
10582 X(_ldr_pc2,4800, f85f0000), \
10583 X(_ldr_sp,9800, f85d0000), \
10584 X(_le, 0000, f00fc001), \
10585 X(_lsl, 0000, fa00f000), \
10586 X(_lsls, 0000, fa10f000), \
10587 X(_lsr, 0800, fa20f000), \
10588 X(_lsrs, 0800, fa30f000), \
10589 X(_mov, 2000, ea4f0000), \
10590 X(_movs, 2000, ea5f0000), \
10591 X(_mul, 4340, fb00f000), \
10592 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10593 X(_mvn, 43c0, ea6f0000), \
10594 X(_mvns, 43c0, ea7f0000), \
10595 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10596 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10597 X(_orr, 4300, ea400000), \
10598 X(_orrs, 4300, ea500000), \
10599 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10600 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10601 X(_rev, ba00, fa90f080), \
10602 X(_rev16, ba40, fa90f090), \
10603 X(_revsh, bac0, fa90f0b0), \
10604 X(_ror, 41c0, fa60f000), \
10605 X(_rors, 41c0, fa70f000), \
10606 X(_sbc, 4180, eb600000), \
10607 X(_sbcs, 4180, eb700000), \
10608 X(_stmia, c000, e8800000), \
10609 X(_str, 6000, f8400000), \
10610 X(_strb, 7000, f8000000), \
10611 X(_strh, 8000, f8200000), \
10612 X(_str_sp,9000, f84d0000), \
10613 X(_sub, 1e00, eba00000), \
10614 X(_subs, 1e00, ebb00000), \
10615 X(_subi, 8000, f1a00000), \
10616 X(_subis, 8000, f1b00000), \
10617 X(_sxtb, b240, fa4ff080), \
10618 X(_sxth, b200, fa0ff080), \
10619 X(_tst, 4200, ea100f00), \
10620 X(_uxtb, b2c0, fa5ff080), \
10621 X(_uxth, b280, fa1ff080), \
10622 X(_nop, bf00, f3af8000), \
10623 X(_yield, bf10, f3af8001), \
10624 X(_wfe, bf20, f3af8002), \
10625 X(_wfi, bf30, f3af8003), \
10626 X(_wls, 0000, f040c001), \
10627 X(_sev, bf40, f3af8004), \
10628 X(_sevl, bf50, f3af8005), \
10629 X(_udf, de00, f7f0a000)
10631 /* To catch errors in encoding functions, the codes are all offset by
10632 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10633 as 16-bit instructions. */
10634 #define X(a,b,c) T_MNEM##a
10635 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10638 #define X(a,b,c) 0x##b
10639 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10640 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10643 #define X(a,b,c) 0x##c
10644 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10645 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10646 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10650 /* Thumb instruction encoders, in alphabetical order. */
10652 /* ADDW or SUBW. */
10655 do_t_add_sub_w (void)
10659 Rd
= inst
.operands
[0].reg
;
10660 Rn
= inst
.operands
[1].reg
;
10662 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10663 is the SP-{plus,minus}-immediate form of the instruction. */
10665 constraint (Rd
== REG_PC
, BAD_PC
);
10667 reject_bad_reg (Rd
);
10669 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10670 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10673 /* Parse an add or subtract instruction. We get here with inst.instruction
10674 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10677 do_t_add_sub (void)
10681 Rd
= inst
.operands
[0].reg
;
10682 Rs
= (inst
.operands
[1].present
10683 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10684 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10687 set_it_insn_type_last ();
10689 if (unified_syntax
)
10692 bfd_boolean narrow
;
10695 flags
= (inst
.instruction
== T_MNEM_adds
10696 || inst
.instruction
== T_MNEM_subs
);
10698 narrow
= !in_it_block ();
10700 narrow
= in_it_block ();
10701 if (!inst
.operands
[2].isreg
)
10705 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10706 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10708 add
= (inst
.instruction
== T_MNEM_add
10709 || inst
.instruction
== T_MNEM_adds
);
10711 if (inst
.size_req
!= 4)
10713 /* Attempt to use a narrow opcode, with relaxation if
10715 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10716 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10717 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10718 opcode
= T_MNEM_add_sp
;
10719 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10720 opcode
= T_MNEM_add_pc
;
10721 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10724 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10726 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10730 inst
.instruction
= THUMB_OP16(opcode
);
10731 inst
.instruction
|= (Rd
<< 4) | Rs
;
10732 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10733 || (inst
.relocs
[0].type
10734 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
10736 if (inst
.size_req
== 2)
10737 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10739 inst
.relax
= opcode
;
10743 constraint (inst
.size_req
== 2, BAD_HIREG
);
10745 if (inst
.size_req
== 4
10746 || (inst
.size_req
!= 2 && !opcode
))
10748 constraint ((inst
.relocs
[0].type
10749 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
10750 && (inst
.relocs
[0].type
10751 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
10752 THUMB1_RELOC_ONLY
);
10755 constraint (add
, BAD_PC
);
10756 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10757 _("only SUBS PC, LR, #const allowed"));
10758 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10759 _("expression too complex"));
10760 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10761 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
10762 _("immediate value out of range"));
10763 inst
.instruction
= T2_SUBS_PC_LR
10764 | inst
.relocs
[0].exp
.X_add_number
;
10765 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10768 else if (Rs
== REG_PC
)
10770 /* Always use addw/subw. */
10771 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10772 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10776 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10777 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10780 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10782 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10784 inst
.instruction
|= Rd
<< 8;
10785 inst
.instruction
|= Rs
<< 16;
10790 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10791 unsigned int shift
= inst
.operands
[2].shift_kind
;
10793 Rn
= inst
.operands
[2].reg
;
10794 /* See if we can do this with a 16-bit instruction. */
10795 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10797 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10802 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10803 || inst
.instruction
== T_MNEM_add
)
10805 : T_OPCODE_SUB_R3
);
10806 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10810 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10812 /* Thumb-1 cores (except v6-M) require at least one high
10813 register in a narrow non flag setting add. */
10814 if (Rd
> 7 || Rn
> 7
10815 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10816 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10823 inst
.instruction
= T_OPCODE_ADD_HI
;
10824 inst
.instruction
|= (Rd
& 8) << 4;
10825 inst
.instruction
|= (Rd
& 7);
10826 inst
.instruction
|= Rn
<< 3;
10832 constraint (Rd
== REG_PC
, BAD_PC
);
10833 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10834 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10835 constraint (Rs
== REG_PC
, BAD_PC
);
10836 reject_bad_reg (Rn
);
10838 /* If we get here, it can't be done in 16 bits. */
10839 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10840 _("shift must be constant"));
10841 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10842 inst
.instruction
|= Rd
<< 8;
10843 inst
.instruction
|= Rs
<< 16;
10844 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10845 _("shift value over 3 not allowed in thumb mode"));
10846 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10847 _("only LSL shift allowed in thumb mode"));
10848 encode_thumb32_shifted_operand (2);
10853 constraint (inst
.instruction
== T_MNEM_adds
10854 || inst
.instruction
== T_MNEM_subs
,
10857 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10859 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10860 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10863 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10864 ? 0x0000 : 0x8000);
10865 inst
.instruction
|= (Rd
<< 4) | Rs
;
10866 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10870 Rn
= inst
.operands
[2].reg
;
10871 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10873 /* We now have Rd, Rs, and Rn set to registers. */
10874 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10876 /* Can't do this for SUB. */
10877 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10878 inst
.instruction
= T_OPCODE_ADD_HI
;
10879 inst
.instruction
|= (Rd
& 8) << 4;
10880 inst
.instruction
|= (Rd
& 7);
10882 inst
.instruction
|= Rn
<< 3;
10884 inst
.instruction
|= Rs
<< 3;
10886 constraint (1, _("dest must overlap one source register"));
10890 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10891 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10892 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10902 Rd
= inst
.operands
[0].reg
;
10903 reject_bad_reg (Rd
);
10905 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10907 /* Defer to section relaxation. */
10908 inst
.relax
= inst
.instruction
;
10909 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10910 inst
.instruction
|= Rd
<< 4;
10912 else if (unified_syntax
&& inst
.size_req
!= 2)
10914 /* Generate a 32-bit opcode. */
10915 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10916 inst
.instruction
|= Rd
<< 8;
10917 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10918 inst
.relocs
[0].pc_rel
= 1;
10922 /* Generate a 16-bit opcode. */
10923 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10924 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10925 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
10926 inst
.relocs
[0].pc_rel
= 1;
10927 inst
.instruction
|= Rd
<< 4;
10930 if (inst
.relocs
[0].exp
.X_op
== O_symbol
10931 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
10932 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
10933 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
10934 inst
.relocs
[0].exp
.X_add_number
+= 1;
10937 /* Arithmetic instructions for which there is just one 16-bit
10938 instruction encoding, and it allows only two low registers.
10939 For maximal compatibility with ARM syntax, we allow three register
10940 operands even when Thumb-32 instructions are not available, as long
10941 as the first two are identical. For instance, both "sbc r0,r1" and
10942 "sbc r0,r0,r1" are allowed. */
10948 Rd
= inst
.operands
[0].reg
;
10949 Rs
= (inst
.operands
[1].present
10950 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10951 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10952 Rn
= inst
.operands
[2].reg
;
10954 reject_bad_reg (Rd
);
10955 reject_bad_reg (Rs
);
10956 if (inst
.operands
[2].isreg
)
10957 reject_bad_reg (Rn
);
10959 if (unified_syntax
)
10961 if (!inst
.operands
[2].isreg
)
10963 /* For an immediate, we always generate a 32-bit opcode;
10964 section relaxation will shrink it later if possible. */
10965 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10966 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10967 inst
.instruction
|= Rd
<< 8;
10968 inst
.instruction
|= Rs
<< 16;
10969 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10973 bfd_boolean narrow
;
10975 /* See if we can do this with a 16-bit instruction. */
10976 if (THUMB_SETS_FLAGS (inst
.instruction
))
10977 narrow
= !in_it_block ();
10979 narrow
= in_it_block ();
10981 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10983 if (inst
.operands
[2].shifted
)
10985 if (inst
.size_req
== 4)
10991 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10992 inst
.instruction
|= Rd
;
10993 inst
.instruction
|= Rn
<< 3;
10997 /* If we get here, it can't be done in 16 bits. */
10998 constraint (inst
.operands
[2].shifted
10999 && inst
.operands
[2].immisreg
,
11000 _("shift must be constant"));
11001 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11002 inst
.instruction
|= Rd
<< 8;
11003 inst
.instruction
|= Rs
<< 16;
11004 encode_thumb32_shifted_operand (2);
11009 /* On its face this is a lie - the instruction does set the
11010 flags. However, the only supported mnemonic in this mode
11011 says it doesn't. */
11012 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11014 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11015 _("unshifted register required"));
11016 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11017 constraint (Rd
!= Rs
,
11018 _("dest and source1 must be the same register"));
11020 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11021 inst
.instruction
|= Rd
;
11022 inst
.instruction
|= Rn
<< 3;
11026 /* Similarly, but for instructions where the arithmetic operation is
11027 commutative, so we can allow either of them to be different from
11028 the destination operand in a 16-bit instruction. For instance, all
11029 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11036 Rd
= inst
.operands
[0].reg
;
11037 Rs
= (inst
.operands
[1].present
11038 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11039 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11040 Rn
= inst
.operands
[2].reg
;
11042 reject_bad_reg (Rd
);
11043 reject_bad_reg (Rs
);
11044 if (inst
.operands
[2].isreg
)
11045 reject_bad_reg (Rn
);
11047 if (unified_syntax
)
11049 if (!inst
.operands
[2].isreg
)
11051 /* For an immediate, we always generate a 32-bit opcode;
11052 section relaxation will shrink it later if possible. */
11053 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11054 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11055 inst
.instruction
|= Rd
<< 8;
11056 inst
.instruction
|= Rs
<< 16;
11057 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11061 bfd_boolean narrow
;
11063 /* See if we can do this with a 16-bit instruction. */
11064 if (THUMB_SETS_FLAGS (inst
.instruction
))
11065 narrow
= !in_it_block ();
11067 narrow
= in_it_block ();
11069 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11071 if (inst
.operands
[2].shifted
)
11073 if (inst
.size_req
== 4)
11080 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11081 inst
.instruction
|= Rd
;
11082 inst
.instruction
|= Rn
<< 3;
11087 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11088 inst
.instruction
|= Rd
;
11089 inst
.instruction
|= Rs
<< 3;
11094 /* If we get here, it can't be done in 16 bits. */
11095 constraint (inst
.operands
[2].shifted
11096 && inst
.operands
[2].immisreg
,
11097 _("shift must be constant"));
11098 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11099 inst
.instruction
|= Rd
<< 8;
11100 inst
.instruction
|= Rs
<< 16;
11101 encode_thumb32_shifted_operand (2);
11106 /* On its face this is a lie - the instruction does set the
11107 flags. However, the only supported mnemonic in this mode
11108 says it doesn't. */
11109 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11111 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11112 _("unshifted register required"));
11113 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11115 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11116 inst
.instruction
|= Rd
;
11119 inst
.instruction
|= Rn
<< 3;
11121 inst
.instruction
|= Rs
<< 3;
11123 constraint (1, _("dest must overlap one source register"));
11131 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
11132 constraint (msb
> 32, _("bit-field extends past end of register"));
11133 /* The instruction encoding stores the LSB and MSB,
11134 not the LSB and width. */
11135 Rd
= inst
.operands
[0].reg
;
11136 reject_bad_reg (Rd
);
11137 inst
.instruction
|= Rd
<< 8;
11138 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
11139 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11140 inst
.instruction
|= msb
- 1;
11149 Rd
= inst
.operands
[0].reg
;
11150 reject_bad_reg (Rd
);
11152 /* #0 in second position is alternative syntax for bfc, which is
11153 the same instruction but with REG_PC in the Rm field. */
11154 if (!inst
.operands
[1].isreg
)
11158 Rn
= inst
.operands
[1].reg
;
11159 reject_bad_reg (Rn
);
11162 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11163 constraint (msb
> 32, _("bit-field extends past end of register"));
11164 /* The instruction encoding stores the LSB and MSB,
11165 not the LSB and width. */
11166 inst
.instruction
|= Rd
<< 8;
11167 inst
.instruction
|= Rn
<< 16;
11168 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11169 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11170 inst
.instruction
|= msb
- 1;
11178 Rd
= inst
.operands
[0].reg
;
11179 Rn
= inst
.operands
[1].reg
;
11181 reject_bad_reg (Rd
);
11182 reject_bad_reg (Rn
);
11184 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11185 _("bit-field extends past end of register"));
11186 inst
.instruction
|= Rd
<< 8;
11187 inst
.instruction
|= Rn
<< 16;
11188 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11189 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11190 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11193 /* ARM V5 Thumb BLX (argument parse)
11194 BLX <target_addr> which is BLX(1)
11195 BLX <Rm> which is BLX(2)
11196 Unfortunately, there are two different opcodes for this mnemonic.
11197 So, the insns[].value is not used, and the code here zaps values
11198 into inst.instruction.
11200 ??? How to take advantage of the additional two bits of displacement
11201 available in Thumb32 mode? Need new relocation? */
11206 set_it_insn_type_last ();
11208 if (inst
.operands
[0].isreg
)
11210 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11211 /* We have a register, so this is BLX(2). */
11212 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11216 /* No register. This must be BLX(1). */
11217 inst
.instruction
= 0xf000e800;
11218 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11227 bfd_reloc_code_real_type reloc
;
11230 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
11232 if (in_it_block ())
11234 /* Conditional branches inside IT blocks are encoded as unconditional
11236 cond
= COND_ALWAYS
;
11241 if (cond
!= COND_ALWAYS
)
11242 opcode
= T_MNEM_bcond
;
11244 opcode
= inst
.instruction
;
11247 && (inst
.size_req
== 4
11248 || (inst
.size_req
!= 2
11249 && (inst
.operands
[0].hasreloc
11250 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
11252 inst
.instruction
= THUMB_OP32(opcode
);
11253 if (cond
== COND_ALWAYS
)
11254 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11257 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11258 _("selected architecture does not support "
11259 "wide conditional branch instruction"));
11261 gas_assert (cond
!= 0xF);
11262 inst
.instruction
|= cond
<< 22;
11263 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11268 inst
.instruction
= THUMB_OP16(opcode
);
11269 if (cond
== COND_ALWAYS
)
11270 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11273 inst
.instruction
|= cond
<< 8;
11274 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11276 /* Allow section relaxation. */
11277 if (unified_syntax
&& inst
.size_req
!= 2)
11278 inst
.relax
= opcode
;
11280 inst
.relocs
[0].type
= reloc
;
11281 inst
.relocs
[0].pc_rel
= 1;
11284 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11285 between the two is the maximum immediate allowed - which is passed in
11288 do_t_bkpt_hlt1 (int range
)
11290 constraint (inst
.cond
!= COND_ALWAYS
,
11291 _("instruction is always unconditional"));
11292 if (inst
.operands
[0].present
)
11294 constraint (inst
.operands
[0].imm
> range
,
11295 _("immediate value out of range"));
11296 inst
.instruction
|= inst
.operands
[0].imm
;
11299 set_it_insn_type (NEUTRAL_IT_INSN
);
11305 do_t_bkpt_hlt1 (63);
11311 do_t_bkpt_hlt1 (255);
11315 do_t_branch23 (void)
11317 set_it_insn_type_last ();
11318 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11320 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11321 this file. We used to simply ignore the PLT reloc type here --
11322 the branch encoding is now needed to deal with TLSCALL relocs.
11323 So if we see a PLT reloc now, put it back to how it used to be to
11324 keep the preexisting behaviour. */
11325 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
11326 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11328 #if defined(OBJ_COFF)
11329 /* If the destination of the branch is a defined symbol which does not have
11330 the THUMB_FUNC attribute, then we must be calling a function which has
11331 the (interfacearm) attribute. We look for the Thumb entry point to that
11332 function and change the branch to refer to that function instead. */
11333 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
11334 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11335 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11336 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11337 inst
.relocs
[0].exp
.X_add_symbol
11338 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
11345 set_it_insn_type_last ();
11346 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11347 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11348 should cause the alignment to be checked once it is known. This is
11349 because BX PC only works if the instruction is word aligned. */
11357 set_it_insn_type_last ();
11358 Rm
= inst
.operands
[0].reg
;
11359 reject_bad_reg (Rm
);
11360 inst
.instruction
|= Rm
<< 16;
11369 Rd
= inst
.operands
[0].reg
;
11370 Rm
= inst
.operands
[1].reg
;
11372 reject_bad_reg (Rd
);
11373 reject_bad_reg (Rm
);
11375 inst
.instruction
|= Rd
<< 8;
11376 inst
.instruction
|= Rm
<< 16;
11377 inst
.instruction
|= Rm
;
11383 set_it_insn_type (OUTSIDE_IT_INSN
);
11389 set_it_insn_type (OUTSIDE_IT_INSN
);
11390 inst
.instruction
|= inst
.operands
[0].imm
;
11396 set_it_insn_type (OUTSIDE_IT_INSN
);
11398 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11399 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11401 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11402 inst
.instruction
= 0xf3af8000;
11403 inst
.instruction
|= imod
<< 9;
11404 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11405 if (inst
.operands
[1].present
)
11406 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11410 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11411 && (inst
.operands
[0].imm
& 4),
11412 _("selected processor does not support 'A' form "
11413 "of this instruction"));
11414 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11415 _("Thumb does not support the 2-argument "
11416 "form of this instruction"));
11417 inst
.instruction
|= inst
.operands
[0].imm
;
11421 /* THUMB CPY instruction (argument parse). */
11426 if (inst
.size_req
== 4)
11428 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11429 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11430 inst
.instruction
|= inst
.operands
[1].reg
;
11434 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11435 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11436 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11443 set_it_insn_type (OUTSIDE_IT_INSN
);
11444 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11445 inst
.instruction
|= inst
.operands
[0].reg
;
11446 inst
.relocs
[0].pc_rel
= 1;
11447 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11453 inst
.instruction
|= inst
.operands
[0].imm
;
11459 unsigned Rd
, Rn
, Rm
;
11461 Rd
= inst
.operands
[0].reg
;
11462 Rn
= (inst
.operands
[1].present
11463 ? inst
.operands
[1].reg
: Rd
);
11464 Rm
= inst
.operands
[2].reg
;
11466 reject_bad_reg (Rd
);
11467 reject_bad_reg (Rn
);
11468 reject_bad_reg (Rm
);
11470 inst
.instruction
|= Rd
<< 8;
11471 inst
.instruction
|= Rn
<< 16;
11472 inst
.instruction
|= Rm
;
11478 if (unified_syntax
&& inst
.size_req
== 4)
11479 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11481 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11487 unsigned int cond
= inst
.operands
[0].imm
;
11489 set_it_insn_type (IT_INSN
);
11490 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11492 now_it
.warn_deprecated
= FALSE
;
11494 /* If the condition is a negative condition, invert the mask. */
11495 if ((cond
& 0x1) == 0x0)
11497 unsigned int mask
= inst
.instruction
& 0x000f;
11499 if ((mask
& 0x7) == 0)
11501 /* No conversion needed. */
11502 now_it
.block_length
= 1;
11504 else if ((mask
& 0x3) == 0)
11507 now_it
.block_length
= 2;
11509 else if ((mask
& 0x1) == 0)
11512 now_it
.block_length
= 3;
11517 now_it
.block_length
= 4;
11520 inst
.instruction
&= 0xfff0;
11521 inst
.instruction
|= mask
;
11524 inst
.instruction
|= cond
<< 4;
11527 /* Helper function used for both push/pop and ldm/stm. */
11529 encode_thumb2_multi (bfd_boolean do_io
, int base
, unsigned mask
,
11530 bfd_boolean writeback
)
11532 bfd_boolean load
, store
;
11534 gas_assert (base
!= -1 || !do_io
);
11535 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
11536 store
= do_io
&& !load
;
11538 if (mask
& (1 << 13))
11539 inst
.error
= _("SP not allowed in register list");
11541 if (do_io
&& (mask
& (1 << base
)) != 0
11543 inst
.error
= _("having the base register in the register list when "
11544 "using write back is UNPREDICTABLE");
11548 if (mask
& (1 << 15))
11550 if (mask
& (1 << 14))
11551 inst
.error
= _("LR and PC should not both be in register list");
11553 set_it_insn_type_last ();
11558 if (mask
& (1 << 15))
11559 inst
.error
= _("PC not allowed in register list");
11562 if (do_io
&& ((mask
& (mask
- 1)) == 0))
11564 /* Single register transfers implemented as str/ldr. */
11567 if (inst
.instruction
& (1 << 23))
11568 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11570 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11574 if (inst
.instruction
& (1 << 23))
11575 inst
.instruction
= 0x00800000; /* ia -> [base] */
11577 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11580 inst
.instruction
|= 0xf8400000;
11582 inst
.instruction
|= 0x00100000;
11584 mask
= ffs (mask
) - 1;
11587 else if (writeback
)
11588 inst
.instruction
|= WRITE_BACK
;
11590 inst
.instruction
|= mask
;
11592 inst
.instruction
|= base
<< 16;
11598 /* This really doesn't seem worth it. */
11599 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
11600 _("expression too complex"));
11601 constraint (inst
.operands
[1].writeback
,
11602 _("Thumb load/store multiple does not support {reglist}^"));
11604 if (unified_syntax
)
11606 bfd_boolean narrow
;
11610 /* See if we can use a 16-bit instruction. */
11611 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11612 && inst
.size_req
!= 4
11613 && !(inst
.operands
[1].imm
& ~0xff))
11615 mask
= 1 << inst
.operands
[0].reg
;
11617 if (inst
.operands
[0].reg
<= 7)
11619 if (inst
.instruction
== T_MNEM_stmia
11620 ? inst
.operands
[0].writeback
11621 : (inst
.operands
[0].writeback
11622 == !(inst
.operands
[1].imm
& mask
)))
11624 if (inst
.instruction
== T_MNEM_stmia
11625 && (inst
.operands
[1].imm
& mask
)
11626 && (inst
.operands
[1].imm
& (mask
- 1)))
11627 as_warn (_("value stored for r%d is UNKNOWN"),
11628 inst
.operands
[0].reg
);
11630 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11631 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11632 inst
.instruction
|= inst
.operands
[1].imm
;
11635 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11637 /* This means 1 register in reg list one of 3 situations:
11638 1. Instruction is stmia, but without writeback.
11639 2. lmdia without writeback, but with Rn not in
11641 3. ldmia with writeback, but with Rn in reglist.
11642 Case 3 is UNPREDICTABLE behaviour, so we handle
11643 case 1 and 2 which can be converted into a 16-bit
11644 str or ldr. The SP cases are handled below. */
11645 unsigned long opcode
;
11646 /* First, record an error for Case 3. */
11647 if (inst
.operands
[1].imm
& mask
11648 && inst
.operands
[0].writeback
)
11650 _("having the base register in the register list when "
11651 "using write back is UNPREDICTABLE");
11653 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11655 inst
.instruction
= THUMB_OP16 (opcode
);
11656 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11657 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11661 else if (inst
.operands
[0] .reg
== REG_SP
)
11663 if (inst
.operands
[0].writeback
)
11666 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11667 ? T_MNEM_push
: T_MNEM_pop
);
11668 inst
.instruction
|= inst
.operands
[1].imm
;
11671 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11674 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11675 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11676 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11684 if (inst
.instruction
< 0xffff)
11685 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11687 encode_thumb2_multi (TRUE
/* do_io */, inst
.operands
[0].reg
,
11688 inst
.operands
[1].imm
,
11689 inst
.operands
[0].writeback
);
11694 constraint (inst
.operands
[0].reg
> 7
11695 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11696 constraint (inst
.instruction
!= T_MNEM_ldmia
11697 && inst
.instruction
!= T_MNEM_stmia
,
11698 _("Thumb-2 instruction only valid in unified syntax"));
11699 if (inst
.instruction
== T_MNEM_stmia
)
11701 if (!inst
.operands
[0].writeback
)
11702 as_warn (_("this instruction will write back the base register"));
11703 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11704 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11705 as_warn (_("value stored for r%d is UNKNOWN"),
11706 inst
.operands
[0].reg
);
11710 if (!inst
.operands
[0].writeback
11711 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11712 as_warn (_("this instruction will write back the base register"));
11713 else if (inst
.operands
[0].writeback
11714 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11715 as_warn (_("this instruction will not write back the base register"));
11718 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11719 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11720 inst
.instruction
|= inst
.operands
[1].imm
;
11727 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11728 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11729 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11730 || inst
.operands
[1].negative
,
11733 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11735 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11736 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11737 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11743 if (!inst
.operands
[1].present
)
11745 constraint (inst
.operands
[0].reg
== REG_LR
,
11746 _("r14 not allowed as first register "
11747 "when second register is omitted"));
11748 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11750 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11753 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11754 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11755 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11761 unsigned long opcode
;
11764 if (inst
.operands
[0].isreg
11765 && !inst
.operands
[0].preind
11766 && inst
.operands
[0].reg
== REG_PC
)
11767 set_it_insn_type_last ();
11769 opcode
= inst
.instruction
;
11770 if (unified_syntax
)
11772 if (!inst
.operands
[1].isreg
)
11774 if (opcode
<= 0xffff)
11775 inst
.instruction
= THUMB_OP32 (opcode
);
11776 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11779 if (inst
.operands
[1].isreg
11780 && !inst
.operands
[1].writeback
11781 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11782 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11783 && opcode
<= 0xffff
11784 && inst
.size_req
!= 4)
11786 /* Insn may have a 16-bit form. */
11787 Rn
= inst
.operands
[1].reg
;
11788 if (inst
.operands
[1].immisreg
)
11790 inst
.instruction
= THUMB_OP16 (opcode
);
11792 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11794 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11795 reject_bad_reg (inst
.operands
[1].imm
);
11797 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11798 && opcode
!= T_MNEM_ldrsb
)
11799 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11800 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11807 if (inst
.relocs
[0].pc_rel
)
11808 opcode
= T_MNEM_ldr_pc2
;
11810 opcode
= T_MNEM_ldr_pc
;
11814 if (opcode
== T_MNEM_ldr
)
11815 opcode
= T_MNEM_ldr_sp
;
11817 opcode
= T_MNEM_str_sp
;
11819 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11823 inst
.instruction
= inst
.operands
[0].reg
;
11824 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11826 inst
.instruction
|= THUMB_OP16 (opcode
);
11827 if (inst
.size_req
== 2)
11828 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11830 inst
.relax
= opcode
;
11834 /* Definitely a 32-bit variant. */
11836 /* Warning for Erratum 752419. */
11837 if (opcode
== T_MNEM_ldr
11838 && inst
.operands
[0].reg
== REG_SP
11839 && inst
.operands
[1].writeback
== 1
11840 && !inst
.operands
[1].immisreg
)
11842 if (no_cpu_selected ()
11843 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11844 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11845 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11846 as_warn (_("This instruction may be unpredictable "
11847 "if executed on M-profile cores "
11848 "with interrupts enabled."));
11851 /* Do some validations regarding addressing modes. */
11852 if (inst
.operands
[1].immisreg
)
11853 reject_bad_reg (inst
.operands
[1].imm
);
11855 constraint (inst
.operands
[1].writeback
== 1
11856 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11859 inst
.instruction
= THUMB_OP32 (opcode
);
11860 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11861 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11862 check_ldr_r15_aligned ();
11866 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11868 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11870 /* Only [Rn,Rm] is acceptable. */
11871 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11872 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11873 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11874 || inst
.operands
[1].negative
,
11875 _("Thumb does not support this addressing mode"));
11876 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11880 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11881 if (!inst
.operands
[1].isreg
)
11882 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11885 constraint (!inst
.operands
[1].preind
11886 || inst
.operands
[1].shifted
11887 || inst
.operands
[1].writeback
,
11888 _("Thumb does not support this addressing mode"));
11889 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11891 constraint (inst
.instruction
& 0x0600,
11892 _("byte or halfword not valid for base register"));
11893 constraint (inst
.operands
[1].reg
== REG_PC
11894 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11895 _("r15 based store not allowed"));
11896 constraint (inst
.operands
[1].immisreg
,
11897 _("invalid base register for register offset"));
11899 if (inst
.operands
[1].reg
== REG_PC
)
11900 inst
.instruction
= T_OPCODE_LDR_PC
;
11901 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11902 inst
.instruction
= T_OPCODE_LDR_SP
;
11904 inst
.instruction
= T_OPCODE_STR_SP
;
11906 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11907 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11911 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11912 if (!inst
.operands
[1].immisreg
)
11914 /* Immediate offset. */
11915 inst
.instruction
|= inst
.operands
[0].reg
;
11916 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11917 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11921 /* Register offset. */
11922 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11923 constraint (inst
.operands
[1].negative
,
11924 _("Thumb does not support this addressing mode"));
11927 switch (inst
.instruction
)
11929 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11930 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11931 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11932 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11933 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11934 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11935 case 0x5600 /* ldrsb */:
11936 case 0x5e00 /* ldrsh */: break;
11940 inst
.instruction
|= inst
.operands
[0].reg
;
11941 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11942 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11948 if (!inst
.operands
[1].present
)
11950 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11951 constraint (inst
.operands
[0].reg
== REG_LR
,
11952 _("r14 not allowed here"));
11953 constraint (inst
.operands
[0].reg
== REG_R12
,
11954 _("r12 not allowed here"));
11957 if (inst
.operands
[2].writeback
11958 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11959 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11960 as_warn (_("base register written back, and overlaps "
11961 "one of transfer registers"));
11963 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11964 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11965 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11971 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11972 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11978 unsigned Rd
, Rn
, Rm
, Ra
;
11980 Rd
= inst
.operands
[0].reg
;
11981 Rn
= inst
.operands
[1].reg
;
11982 Rm
= inst
.operands
[2].reg
;
11983 Ra
= inst
.operands
[3].reg
;
11985 reject_bad_reg (Rd
);
11986 reject_bad_reg (Rn
);
11987 reject_bad_reg (Rm
);
11988 reject_bad_reg (Ra
);
11990 inst
.instruction
|= Rd
<< 8;
11991 inst
.instruction
|= Rn
<< 16;
11992 inst
.instruction
|= Rm
;
11993 inst
.instruction
|= Ra
<< 12;
11999 unsigned RdLo
, RdHi
, Rn
, Rm
;
12001 RdLo
= inst
.operands
[0].reg
;
12002 RdHi
= inst
.operands
[1].reg
;
12003 Rn
= inst
.operands
[2].reg
;
12004 Rm
= inst
.operands
[3].reg
;
12006 reject_bad_reg (RdLo
);
12007 reject_bad_reg (RdHi
);
12008 reject_bad_reg (Rn
);
12009 reject_bad_reg (Rm
);
12011 inst
.instruction
|= RdLo
<< 12;
12012 inst
.instruction
|= RdHi
<< 8;
12013 inst
.instruction
|= Rn
<< 16;
12014 inst
.instruction
|= Rm
;
12018 do_t_mov_cmp (void)
12022 Rn
= inst
.operands
[0].reg
;
12023 Rm
= inst
.operands
[1].reg
;
12026 set_it_insn_type_last ();
12028 if (unified_syntax
)
12030 int r0off
= (inst
.instruction
== T_MNEM_mov
12031 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
12032 unsigned long opcode
;
12033 bfd_boolean narrow
;
12034 bfd_boolean low_regs
;
12036 low_regs
= (Rn
<= 7 && Rm
<= 7);
12037 opcode
= inst
.instruction
;
12038 if (in_it_block ())
12039 narrow
= opcode
!= T_MNEM_movs
;
12041 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
12042 if (inst
.size_req
== 4
12043 || inst
.operands
[1].shifted
)
12046 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12047 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
12048 && !inst
.operands
[1].shifted
12052 inst
.instruction
= T2_SUBS_PC_LR
;
12056 if (opcode
== T_MNEM_cmp
)
12058 constraint (Rn
== REG_PC
, BAD_PC
);
12061 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12063 warn_deprecated_sp (Rm
);
12064 /* R15 was documented as a valid choice for Rm in ARMv6,
12065 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12066 tools reject R15, so we do too. */
12067 constraint (Rm
== REG_PC
, BAD_PC
);
12070 reject_bad_reg (Rm
);
12072 else if (opcode
== T_MNEM_mov
12073 || opcode
== T_MNEM_movs
)
12075 if (inst
.operands
[1].isreg
)
12077 if (opcode
== T_MNEM_movs
)
12079 reject_bad_reg (Rn
);
12080 reject_bad_reg (Rm
);
12084 /* This is mov.n. */
12085 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
12086 && (Rm
== REG_SP
|| Rm
== REG_PC
))
12088 as_tsktsk (_("Use of r%u as a source register is "
12089 "deprecated when r%u is the destination "
12090 "register."), Rm
, Rn
);
12095 /* This is mov.w. */
12096 constraint (Rn
== REG_PC
, BAD_PC
);
12097 constraint (Rm
== REG_PC
, BAD_PC
);
12098 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12099 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
12103 reject_bad_reg (Rn
);
12106 if (!inst
.operands
[1].isreg
)
12108 /* Immediate operand. */
12109 if (!in_it_block () && opcode
== T_MNEM_mov
)
12111 if (low_regs
&& narrow
)
12113 inst
.instruction
= THUMB_OP16 (opcode
);
12114 inst
.instruction
|= Rn
<< 8;
12115 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12116 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
12118 if (inst
.size_req
== 2)
12119 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12121 inst
.relax
= opcode
;
12126 constraint ((inst
.relocs
[0].type
12127 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
12128 && (inst
.relocs
[0].type
12129 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
12130 THUMB1_RELOC_ONLY
);
12132 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12133 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12134 inst
.instruction
|= Rn
<< r0off
;
12135 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12138 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
12139 && (inst
.instruction
== T_MNEM_mov
12140 || inst
.instruction
== T_MNEM_movs
))
12142 /* Register shifts are encoded as separate shift instructions. */
12143 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
12145 if (in_it_block ())
12150 if (inst
.size_req
== 4)
12153 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12159 switch (inst
.operands
[1].shift_kind
)
12162 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12165 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12168 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12171 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12177 inst
.instruction
= opcode
;
12180 inst
.instruction
|= Rn
;
12181 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12186 inst
.instruction
|= CONDS_BIT
;
12188 inst
.instruction
|= Rn
<< 8;
12189 inst
.instruction
|= Rm
<< 16;
12190 inst
.instruction
|= inst
.operands
[1].imm
;
12195 /* Some mov with immediate shift have narrow variants.
12196 Register shifts are handled above. */
12197 if (low_regs
&& inst
.operands
[1].shifted
12198 && (inst
.instruction
== T_MNEM_mov
12199 || inst
.instruction
== T_MNEM_movs
))
12201 if (in_it_block ())
12202 narrow
= (inst
.instruction
== T_MNEM_mov
);
12204 narrow
= (inst
.instruction
== T_MNEM_movs
);
12209 switch (inst
.operands
[1].shift_kind
)
12211 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12212 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12213 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12214 default: narrow
= FALSE
; break;
12220 inst
.instruction
|= Rn
;
12221 inst
.instruction
|= Rm
<< 3;
12222 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12226 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12227 inst
.instruction
|= Rn
<< r0off
;
12228 encode_thumb32_shifted_operand (1);
12232 switch (inst
.instruction
)
12235 /* In v4t or v5t a move of two lowregs produces unpredictable
12236 results. Don't allow this. */
12239 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12240 "MOV Rd, Rs with two low registers is not "
12241 "permitted on this architecture");
12242 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12246 inst
.instruction
= T_OPCODE_MOV_HR
;
12247 inst
.instruction
|= (Rn
& 0x8) << 4;
12248 inst
.instruction
|= (Rn
& 0x7);
12249 inst
.instruction
|= Rm
<< 3;
12253 /* We know we have low registers at this point.
12254 Generate LSLS Rd, Rs, #0. */
12255 inst
.instruction
= T_OPCODE_LSL_I
;
12256 inst
.instruction
|= Rn
;
12257 inst
.instruction
|= Rm
<< 3;
12263 inst
.instruction
= T_OPCODE_CMP_LR
;
12264 inst
.instruction
|= Rn
;
12265 inst
.instruction
|= Rm
<< 3;
12269 inst
.instruction
= T_OPCODE_CMP_HR
;
12270 inst
.instruction
|= (Rn
& 0x8) << 4;
12271 inst
.instruction
|= (Rn
& 0x7);
12272 inst
.instruction
|= Rm
<< 3;
12279 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12281 /* PR 10443: Do not silently ignore shifted operands. */
12282 constraint (inst
.operands
[1].shifted
,
12283 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12285 if (inst
.operands
[1].isreg
)
12287 if (Rn
< 8 && Rm
< 8)
12289 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12290 since a MOV instruction produces unpredictable results. */
12291 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12292 inst
.instruction
= T_OPCODE_ADD_I3
;
12294 inst
.instruction
= T_OPCODE_CMP_LR
;
12296 inst
.instruction
|= Rn
;
12297 inst
.instruction
|= Rm
<< 3;
12301 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12302 inst
.instruction
= T_OPCODE_MOV_HR
;
12304 inst
.instruction
= T_OPCODE_CMP_HR
;
12310 constraint (Rn
> 7,
12311 _("only lo regs allowed with immediate"));
12312 inst
.instruction
|= Rn
<< 8;
12313 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12324 top
= (inst
.instruction
& 0x00800000) != 0;
12325 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
12327 constraint (top
, _(":lower16: not allowed in this instruction"));
12328 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
12330 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
12332 constraint (!top
, _(":upper16: not allowed in this instruction"));
12333 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
12336 Rd
= inst
.operands
[0].reg
;
12337 reject_bad_reg (Rd
);
12339 inst
.instruction
|= Rd
<< 8;
12340 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
12342 imm
= inst
.relocs
[0].exp
.X_add_number
;
12343 inst
.instruction
|= (imm
& 0xf000) << 4;
12344 inst
.instruction
|= (imm
& 0x0800) << 15;
12345 inst
.instruction
|= (imm
& 0x0700) << 4;
12346 inst
.instruction
|= (imm
& 0x00ff);
12351 do_t_mvn_tst (void)
12355 Rn
= inst
.operands
[0].reg
;
12356 Rm
= inst
.operands
[1].reg
;
12358 if (inst
.instruction
== T_MNEM_cmp
12359 || inst
.instruction
== T_MNEM_cmn
)
12360 constraint (Rn
== REG_PC
, BAD_PC
);
12362 reject_bad_reg (Rn
);
12363 reject_bad_reg (Rm
);
12365 if (unified_syntax
)
12367 int r0off
= (inst
.instruction
== T_MNEM_mvn
12368 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12369 bfd_boolean narrow
;
12371 if (inst
.size_req
== 4
12372 || inst
.instruction
> 0xffff
12373 || inst
.operands
[1].shifted
12374 || Rn
> 7 || Rm
> 7)
12376 else if (inst
.instruction
== T_MNEM_cmn
12377 || inst
.instruction
== T_MNEM_tst
)
12379 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12380 narrow
= !in_it_block ();
12382 narrow
= in_it_block ();
12384 if (!inst
.operands
[1].isreg
)
12386 /* For an immediate, we always generate a 32-bit opcode;
12387 section relaxation will shrink it later if possible. */
12388 if (inst
.instruction
< 0xffff)
12389 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12390 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12391 inst
.instruction
|= Rn
<< r0off
;
12392 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12396 /* See if we can do this with a 16-bit instruction. */
12399 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12400 inst
.instruction
|= Rn
;
12401 inst
.instruction
|= Rm
<< 3;
12405 constraint (inst
.operands
[1].shifted
12406 && inst
.operands
[1].immisreg
,
12407 _("shift must be constant"));
12408 if (inst
.instruction
< 0xffff)
12409 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12410 inst
.instruction
|= Rn
<< r0off
;
12411 encode_thumb32_shifted_operand (1);
12417 constraint (inst
.instruction
> 0xffff
12418 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12419 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12420 _("unshifted register required"));
12421 constraint (Rn
> 7 || Rm
> 7,
12424 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12425 inst
.instruction
|= Rn
;
12426 inst
.instruction
|= Rm
<< 3;
12435 if (do_vfp_nsyn_mrs () == SUCCESS
)
12438 Rd
= inst
.operands
[0].reg
;
12439 reject_bad_reg (Rd
);
12440 inst
.instruction
|= Rd
<< 8;
12442 if (inst
.operands
[1].isreg
)
12444 unsigned br
= inst
.operands
[1].reg
;
12445 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12446 as_bad (_("bad register for mrs"));
12448 inst
.instruction
|= br
& (0xf << 16);
12449 inst
.instruction
|= (br
& 0x300) >> 4;
12450 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12454 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12456 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12458 /* PR gas/12698: The constraint is only applied for m_profile.
12459 If the user has specified -march=all, we want to ignore it as
12460 we are building for any CPU type, including non-m variants. */
12461 bfd_boolean m_profile
=
12462 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12463 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12464 "not support requested special purpose register"));
12467 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12469 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12470 _("'APSR', 'CPSR' or 'SPSR' expected"));
12472 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12473 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12474 inst
.instruction
|= 0xf0000;
12484 if (do_vfp_nsyn_msr () == SUCCESS
)
12487 constraint (!inst
.operands
[1].isreg
,
12488 _("Thumb encoding does not support an immediate here"));
12490 if (inst
.operands
[0].isreg
)
12491 flags
= (int)(inst
.operands
[0].reg
);
12493 flags
= inst
.operands
[0].imm
;
12495 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12497 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12499 /* PR gas/12698: The constraint is only applied for m_profile.
12500 If the user has specified -march=all, we want to ignore it as
12501 we are building for any CPU type, including non-m variants. */
12502 bfd_boolean m_profile
=
12503 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12504 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12505 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12506 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12507 && bits
!= PSR_f
)) && m_profile
,
12508 _("selected processor does not support requested special "
12509 "purpose register"));
12512 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12513 "requested special purpose register"));
12515 Rn
= inst
.operands
[1].reg
;
12516 reject_bad_reg (Rn
);
12518 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12519 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12520 inst
.instruction
|= (flags
& 0x300) >> 4;
12521 inst
.instruction
|= (flags
& 0xff);
12522 inst
.instruction
|= Rn
<< 16;
12528 bfd_boolean narrow
;
12529 unsigned Rd
, Rn
, Rm
;
12531 if (!inst
.operands
[2].present
)
12532 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12534 Rd
= inst
.operands
[0].reg
;
12535 Rn
= inst
.operands
[1].reg
;
12536 Rm
= inst
.operands
[2].reg
;
12538 if (unified_syntax
)
12540 if (inst
.size_req
== 4
12546 else if (inst
.instruction
== T_MNEM_muls
)
12547 narrow
= !in_it_block ();
12549 narrow
= in_it_block ();
12553 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12554 constraint (Rn
> 7 || Rm
> 7,
12561 /* 16-bit MULS/Conditional MUL. */
12562 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12563 inst
.instruction
|= Rd
;
12566 inst
.instruction
|= Rm
<< 3;
12568 inst
.instruction
|= Rn
<< 3;
12570 constraint (1, _("dest must overlap one source register"));
12574 constraint (inst
.instruction
!= T_MNEM_mul
,
12575 _("Thumb-2 MUL must not set flags"));
12577 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12578 inst
.instruction
|= Rd
<< 8;
12579 inst
.instruction
|= Rn
<< 16;
12580 inst
.instruction
|= Rm
<< 0;
12582 reject_bad_reg (Rd
);
12583 reject_bad_reg (Rn
);
12584 reject_bad_reg (Rm
);
12591 unsigned RdLo
, RdHi
, Rn
, Rm
;
12593 RdLo
= inst
.operands
[0].reg
;
12594 RdHi
= inst
.operands
[1].reg
;
12595 Rn
= inst
.operands
[2].reg
;
12596 Rm
= inst
.operands
[3].reg
;
12598 reject_bad_reg (RdLo
);
12599 reject_bad_reg (RdHi
);
12600 reject_bad_reg (Rn
);
12601 reject_bad_reg (Rm
);
12603 inst
.instruction
|= RdLo
<< 12;
12604 inst
.instruction
|= RdHi
<< 8;
12605 inst
.instruction
|= Rn
<< 16;
12606 inst
.instruction
|= Rm
;
12609 as_tsktsk (_("rdhi and rdlo must be different"));
12615 set_it_insn_type (NEUTRAL_IT_INSN
);
12617 if (unified_syntax
)
12619 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12621 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12622 inst
.instruction
|= inst
.operands
[0].imm
;
12626 /* PR9722: Check for Thumb2 availability before
12627 generating a thumb2 nop instruction. */
12628 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12630 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12631 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12634 inst
.instruction
= 0x46c0;
12639 constraint (inst
.operands
[0].present
,
12640 _("Thumb does not support NOP with hints"));
12641 inst
.instruction
= 0x46c0;
12648 if (unified_syntax
)
12650 bfd_boolean narrow
;
12652 if (THUMB_SETS_FLAGS (inst
.instruction
))
12653 narrow
= !in_it_block ();
12655 narrow
= in_it_block ();
12656 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12658 if (inst
.size_req
== 4)
12663 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12664 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12665 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12669 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12670 inst
.instruction
|= inst
.operands
[0].reg
;
12671 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12676 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12678 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12680 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12681 inst
.instruction
|= inst
.operands
[0].reg
;
12682 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12691 Rd
= inst
.operands
[0].reg
;
12692 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12694 reject_bad_reg (Rd
);
12695 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12696 reject_bad_reg (Rn
);
12698 inst
.instruction
|= Rd
<< 8;
12699 inst
.instruction
|= Rn
<< 16;
12701 if (!inst
.operands
[2].isreg
)
12703 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12704 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12710 Rm
= inst
.operands
[2].reg
;
12711 reject_bad_reg (Rm
);
12713 constraint (inst
.operands
[2].shifted
12714 && inst
.operands
[2].immisreg
,
12715 _("shift must be constant"));
12716 encode_thumb32_shifted_operand (2);
12723 unsigned Rd
, Rn
, Rm
;
12725 Rd
= inst
.operands
[0].reg
;
12726 Rn
= inst
.operands
[1].reg
;
12727 Rm
= inst
.operands
[2].reg
;
12729 reject_bad_reg (Rd
);
12730 reject_bad_reg (Rn
);
12731 reject_bad_reg (Rm
);
12733 inst
.instruction
|= Rd
<< 8;
12734 inst
.instruction
|= Rn
<< 16;
12735 inst
.instruction
|= Rm
;
12736 if (inst
.operands
[3].present
)
12738 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
12739 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
12740 _("expression too complex"));
12741 inst
.instruction
|= (val
& 0x1c) << 10;
12742 inst
.instruction
|= (val
& 0x03) << 6;
12749 if (!inst
.operands
[3].present
)
12753 inst
.instruction
&= ~0x00000020;
12755 /* PR 10168. Swap the Rm and Rn registers. */
12756 Rtmp
= inst
.operands
[1].reg
;
12757 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12758 inst
.operands
[2].reg
= Rtmp
;
12766 if (inst
.operands
[0].immisreg
)
12767 reject_bad_reg (inst
.operands
[0].imm
);
12769 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12773 do_t_push_pop (void)
12777 constraint (inst
.operands
[0].writeback
,
12778 _("push/pop do not support {reglist}^"));
12779 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
12780 _("expression too complex"));
12782 mask
= inst
.operands
[0].imm
;
12783 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12784 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12785 else if (inst
.size_req
!= 4
12786 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
12787 ? REG_LR
: REG_PC
)))
12789 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12790 inst
.instruction
|= THUMB_PP_PC_LR
;
12791 inst
.instruction
|= mask
& 0xff;
12793 else if (unified_syntax
)
12795 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12796 encode_thumb2_multi (TRUE
/* do_io */, 13, mask
, TRUE
);
12800 inst
.error
= _("invalid register list to push/pop instruction");
12808 if (unified_syntax
)
12809 encode_thumb2_multi (FALSE
/* do_io */, -1, inst
.operands
[0].imm
, FALSE
);
12812 inst
.error
= _("invalid register list to push/pop instruction");
12822 Rd
= inst
.operands
[0].reg
;
12823 Rm
= inst
.operands
[1].reg
;
12825 reject_bad_reg (Rd
);
12826 reject_bad_reg (Rm
);
12828 inst
.instruction
|= Rd
<< 8;
12829 inst
.instruction
|= Rm
<< 16;
12830 inst
.instruction
|= Rm
;
12838 Rd
= inst
.operands
[0].reg
;
12839 Rm
= inst
.operands
[1].reg
;
12841 reject_bad_reg (Rd
);
12842 reject_bad_reg (Rm
);
12844 if (Rd
<= 7 && Rm
<= 7
12845 && inst
.size_req
!= 4)
12847 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12848 inst
.instruction
|= Rd
;
12849 inst
.instruction
|= Rm
<< 3;
12851 else if (unified_syntax
)
12853 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12854 inst
.instruction
|= Rd
<< 8;
12855 inst
.instruction
|= Rm
<< 16;
12856 inst
.instruction
|= Rm
;
12859 inst
.error
= BAD_HIREG
;
12867 Rd
= inst
.operands
[0].reg
;
12868 Rm
= inst
.operands
[1].reg
;
12870 reject_bad_reg (Rd
);
12871 reject_bad_reg (Rm
);
12873 inst
.instruction
|= Rd
<< 8;
12874 inst
.instruction
|= Rm
;
12882 Rd
= inst
.operands
[0].reg
;
12883 Rs
= (inst
.operands
[1].present
12884 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12885 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12887 reject_bad_reg (Rd
);
12888 reject_bad_reg (Rs
);
12889 if (inst
.operands
[2].isreg
)
12890 reject_bad_reg (inst
.operands
[2].reg
);
12892 inst
.instruction
|= Rd
<< 8;
12893 inst
.instruction
|= Rs
<< 16;
12894 if (!inst
.operands
[2].isreg
)
12896 bfd_boolean narrow
;
12898 if ((inst
.instruction
& 0x00100000) != 0)
12899 narrow
= !in_it_block ();
12901 narrow
= in_it_block ();
12903 if (Rd
> 7 || Rs
> 7)
12906 if (inst
.size_req
== 4 || !unified_syntax
)
12909 if (inst
.relocs
[0].exp
.X_op
!= O_constant
12910 || inst
.relocs
[0].exp
.X_add_number
!= 0)
12913 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12914 relaxation, but it doesn't seem worth the hassle. */
12917 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
12918 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12919 inst
.instruction
|= Rs
<< 3;
12920 inst
.instruction
|= Rd
;
12924 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12925 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12929 encode_thumb32_shifted_operand (2);
12935 if (warn_on_deprecated
12936 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12937 as_tsktsk (_("setend use is deprecated for ARMv8"));
12939 set_it_insn_type (OUTSIDE_IT_INSN
);
12940 if (inst
.operands
[0].imm
)
12941 inst
.instruction
|= 0x8;
12947 if (!inst
.operands
[1].present
)
12948 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12950 if (unified_syntax
)
12952 bfd_boolean narrow
;
12955 switch (inst
.instruction
)
12958 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12960 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12962 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12964 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12968 if (THUMB_SETS_FLAGS (inst
.instruction
))
12969 narrow
= !in_it_block ();
12971 narrow
= in_it_block ();
12972 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12974 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12976 if (inst
.operands
[2].isreg
12977 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12978 || inst
.operands
[2].reg
> 7))
12980 if (inst
.size_req
== 4)
12983 reject_bad_reg (inst
.operands
[0].reg
);
12984 reject_bad_reg (inst
.operands
[1].reg
);
12988 if (inst
.operands
[2].isreg
)
12990 reject_bad_reg (inst
.operands
[2].reg
);
12991 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12992 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12993 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12994 inst
.instruction
|= inst
.operands
[2].reg
;
12996 /* PR 12854: Error on extraneous shifts. */
12997 constraint (inst
.operands
[2].shifted
,
12998 _("extraneous shift as part of operand to shift insn"));
13002 inst
.operands
[1].shifted
= 1;
13003 inst
.operands
[1].shift_kind
= shift_kind
;
13004 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
13005 ? T_MNEM_movs
: T_MNEM_mov
);
13006 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13007 encode_thumb32_shifted_operand (1);
13008 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13009 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13014 if (inst
.operands
[2].isreg
)
13016 switch (shift_kind
)
13018 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13019 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13020 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13021 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13025 inst
.instruction
|= inst
.operands
[0].reg
;
13026 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13028 /* PR 12854: Error on extraneous shifts. */
13029 constraint (inst
.operands
[2].shifted
,
13030 _("extraneous shift as part of operand to shift insn"));
13034 switch (shift_kind
)
13036 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13037 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13038 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13041 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13042 inst
.instruction
|= inst
.operands
[0].reg
;
13043 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13049 constraint (inst
.operands
[0].reg
> 7
13050 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
13051 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13053 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
13055 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
13056 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13057 _("source1 and dest must be same register"));
13059 switch (inst
.instruction
)
13061 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13062 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13063 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13064 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13068 inst
.instruction
|= inst
.operands
[0].reg
;
13069 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13071 /* PR 12854: Error on extraneous shifts. */
13072 constraint (inst
.operands
[2].shifted
,
13073 _("extraneous shift as part of operand to shift insn"));
13077 switch (inst
.instruction
)
13079 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13080 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13081 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13082 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
13085 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13086 inst
.instruction
|= inst
.operands
[0].reg
;
13087 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13095 unsigned Rd
, Rn
, Rm
;
13097 Rd
= inst
.operands
[0].reg
;
13098 Rn
= inst
.operands
[1].reg
;
13099 Rm
= inst
.operands
[2].reg
;
13101 reject_bad_reg (Rd
);
13102 reject_bad_reg (Rn
);
13103 reject_bad_reg (Rm
);
13105 inst
.instruction
|= Rd
<< 8;
13106 inst
.instruction
|= Rn
<< 16;
13107 inst
.instruction
|= Rm
;
13113 unsigned Rd
, Rn
, Rm
;
13115 Rd
= inst
.operands
[0].reg
;
13116 Rm
= inst
.operands
[1].reg
;
13117 Rn
= inst
.operands
[2].reg
;
13119 reject_bad_reg (Rd
);
13120 reject_bad_reg (Rn
);
13121 reject_bad_reg (Rm
);
13123 inst
.instruction
|= Rd
<< 8;
13124 inst
.instruction
|= Rn
<< 16;
13125 inst
.instruction
|= Rm
;
13131 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13132 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
13133 _("SMC is not permitted on this architecture"));
13134 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13135 _("expression too complex"));
13136 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13137 inst
.instruction
|= (value
& 0xf000) >> 12;
13138 inst
.instruction
|= (value
& 0x0ff0);
13139 inst
.instruction
|= (value
& 0x000f) << 16;
13140 /* PR gas/15623: SMC instructions must be last in an IT block. */
13141 set_it_insn_type_last ();
13147 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13149 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13150 inst
.instruction
|= (value
& 0x0fff);
13151 inst
.instruction
|= (value
& 0xf000) << 4;
13155 do_t_ssat_usat (int bias
)
13159 Rd
= inst
.operands
[0].reg
;
13160 Rn
= inst
.operands
[2].reg
;
13162 reject_bad_reg (Rd
);
13163 reject_bad_reg (Rn
);
13165 inst
.instruction
|= Rd
<< 8;
13166 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13167 inst
.instruction
|= Rn
<< 16;
13169 if (inst
.operands
[3].present
)
13171 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
13173 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13175 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13176 _("expression too complex"));
13178 if (shift_amount
!= 0)
13180 constraint (shift_amount
> 31,
13181 _("shift expression is too large"));
13183 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13184 inst
.instruction
|= 0x00200000; /* sh bit. */
13186 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13187 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13195 do_t_ssat_usat (1);
13203 Rd
= inst
.operands
[0].reg
;
13204 Rn
= inst
.operands
[2].reg
;
13206 reject_bad_reg (Rd
);
13207 reject_bad_reg (Rn
);
13209 inst
.instruction
|= Rd
<< 8;
13210 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13211 inst
.instruction
|= Rn
<< 16;
13217 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13218 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13219 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13220 || inst
.operands
[2].negative
,
13223 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13225 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13226 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13227 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13228 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13234 if (!inst
.operands
[2].present
)
13235 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13237 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13238 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13239 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13242 inst
.instruction
|= inst
.operands
[0].reg
;
13243 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13244 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13245 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13251 unsigned Rd
, Rn
, Rm
;
13253 Rd
= inst
.operands
[0].reg
;
13254 Rn
= inst
.operands
[1].reg
;
13255 Rm
= inst
.operands
[2].reg
;
13257 reject_bad_reg (Rd
);
13258 reject_bad_reg (Rn
);
13259 reject_bad_reg (Rm
);
13261 inst
.instruction
|= Rd
<< 8;
13262 inst
.instruction
|= Rn
<< 16;
13263 inst
.instruction
|= Rm
;
13264 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13272 Rd
= inst
.operands
[0].reg
;
13273 Rm
= inst
.operands
[1].reg
;
13275 reject_bad_reg (Rd
);
13276 reject_bad_reg (Rm
);
13278 if (inst
.instruction
<= 0xffff
13279 && inst
.size_req
!= 4
13280 && Rd
<= 7 && Rm
<= 7
13281 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13283 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13284 inst
.instruction
|= Rd
;
13285 inst
.instruction
|= Rm
<< 3;
13287 else if (unified_syntax
)
13289 if (inst
.instruction
<= 0xffff)
13290 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13291 inst
.instruction
|= Rd
<< 8;
13292 inst
.instruction
|= Rm
;
13293 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13297 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13298 _("Thumb encoding does not support rotation"));
13299 constraint (1, BAD_HIREG
);
13306 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
13315 half
= (inst
.instruction
& 0x10) != 0;
13316 set_it_insn_type_last ();
13317 constraint (inst
.operands
[0].immisreg
,
13318 _("instruction requires register index"));
13320 Rn
= inst
.operands
[0].reg
;
13321 Rm
= inst
.operands
[0].imm
;
13323 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13324 constraint (Rn
== REG_SP
, BAD_SP
);
13325 reject_bad_reg (Rm
);
13327 constraint (!half
&& inst
.operands
[0].shifted
,
13328 _("instruction does not allow shifted index"));
13329 inst
.instruction
|= (Rn
<< 16) | Rm
;
13335 if (!inst
.operands
[0].present
)
13336 inst
.operands
[0].imm
= 0;
13338 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13340 constraint (inst
.size_req
== 2,
13341 _("immediate value out of range"));
13342 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13343 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13344 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13348 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13349 inst
.instruction
|= inst
.operands
[0].imm
;
13352 set_it_insn_type (NEUTRAL_IT_INSN
);
13359 do_t_ssat_usat (0);
13367 Rd
= inst
.operands
[0].reg
;
13368 Rn
= inst
.operands
[2].reg
;
13370 reject_bad_reg (Rd
);
13371 reject_bad_reg (Rn
);
13373 inst
.instruction
|= Rd
<< 8;
13374 inst
.instruction
|= inst
.operands
[1].imm
;
13375 inst
.instruction
|= Rn
<< 16;
13378 /* Checking the range of the branch offset (VAL) with NBITS bits
13379 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13381 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
13383 gas_assert (nbits
> 0 && nbits
<= 32);
13386 int cmp
= (1 << (nbits
- 1));
13387 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
13392 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
13398 /* For branches in Armv8.1-M Mainline. */
13400 do_t_branch_future (void)
13402 unsigned long insn
= inst
.instruction
;
13404 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13405 if (inst
.operands
[0].hasreloc
== 0)
13407 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, FALSE
) == FAIL
)
13408 as_bad (BAD_BRANCH_OFF
);
13410 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
13414 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
13415 inst
.relocs
[0].pc_rel
= 1;
13421 if (inst
.operands
[1].hasreloc
== 0)
13423 int val
= inst
.operands
[1].imm
;
13424 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, TRUE
) == FAIL
)
13425 as_bad (BAD_BRANCH_OFF
);
13427 int immA
= (val
& 0x0001f000) >> 12;
13428 int immB
= (val
& 0x00000ffc) >> 2;
13429 int immC
= (val
& 0x00000002) >> 1;
13430 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13434 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
13435 inst
.relocs
[1].pc_rel
= 1;
13440 if (inst
.operands
[1].hasreloc
== 0)
13442 int val
= inst
.operands
[1].imm
;
13443 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, TRUE
) == FAIL
)
13444 as_bad (BAD_BRANCH_OFF
);
13446 int immA
= (val
& 0x0007f000) >> 12;
13447 int immB
= (val
& 0x00000ffc) >> 2;
13448 int immC
= (val
& 0x00000002) >> 1;
13449 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13453 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
13454 inst
.relocs
[1].pc_rel
= 1;
13458 case T_MNEM_bfcsel
:
13460 if (inst
.operands
[1].hasreloc
== 0)
13462 int val
= inst
.operands
[1].imm
;
13463 int immA
= (val
& 0x00001000) >> 12;
13464 int immB
= (val
& 0x00000ffc) >> 2;
13465 int immC
= (val
& 0x00000002) >> 1;
13466 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13470 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
13471 inst
.relocs
[1].pc_rel
= 1;
13475 if (inst
.operands
[2].hasreloc
== 0)
13477 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
13478 int val2
= inst
.operands
[2].imm
;
13479 int val0
= inst
.operands
[0].imm
& 0x1f;
13480 int diff
= val2
- val0
;
13482 inst
.instruction
|= 1 << 17; /* T bit. */
13483 else if (diff
!= 2)
13484 as_bad (_("out of range label-relative fixup value"));
13488 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
13489 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
13490 inst
.relocs
[2].pc_rel
= 1;
13494 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
13495 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
13500 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13507 /* Helper function for do_t_loloop to handle relocations. */
13509 v8_1_loop_reloc (int is_le
)
13511 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
13513 int value
= inst
.relocs
[0].exp
.X_add_number
;
13514 value
= (is_le
) ? -value
: value
;
13516 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
13517 as_bad (BAD_BRANCH_OFF
);
13521 immh
= (value
& 0x00000ffc) >> 2;
13522 imml
= (value
& 0x00000002) >> 1;
13524 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
13528 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
13529 inst
.relocs
[0].pc_rel
= 1;
13533 /* To handle the Scalar Low Overhead Loop instructions
13534 in Armv8.1-M Mainline. */
13538 unsigned long insn
= inst
.instruction
;
13540 set_it_insn_type (OUTSIDE_IT_INSN
);
13541 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13547 if (!inst
.operands
[0].present
)
13548 inst
.instruction
|= 1 << 21;
13550 v8_1_loop_reloc (TRUE
);
13554 v8_1_loop_reloc (FALSE
);
13555 /* Fall through. */
13557 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
13558 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
13565 /* Neon instruction encoder helpers. */
13567 /* Encodings for the different types for various Neon opcodes. */
13569 /* An "invalid" code for the following tables. */
13572 struct neon_tab_entry
13575 unsigned float_or_poly
;
13576 unsigned scalar_or_imm
;
13579 /* Map overloaded Neon opcodes to their respective encodings. */
13580 #define NEON_ENC_TAB \
13581 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13582 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13583 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13584 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13585 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13586 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13587 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13588 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13589 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13590 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13591 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13592 /* Register variants of the following two instructions are encoded as
13593 vcge / vcgt with the operands reversed. */ \
13594 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13595 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13596 X(vfma, N_INV, 0x0000c10, N_INV), \
13597 X(vfms, N_INV, 0x0200c10, N_INV), \
13598 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13599 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13600 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13601 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13602 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13603 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13604 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13605 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13606 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13607 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13608 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13609 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13610 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13611 X(vshl, 0x0000400, N_INV, 0x0800510), \
13612 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13613 X(vand, 0x0000110, N_INV, 0x0800030), \
13614 X(vbic, 0x0100110, N_INV, 0x0800030), \
13615 X(veor, 0x1000110, N_INV, N_INV), \
13616 X(vorn, 0x0300110, N_INV, 0x0800010), \
13617 X(vorr, 0x0200110, N_INV, 0x0800010), \
13618 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13619 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13620 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13621 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13622 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13623 X(vst1, 0x0000000, 0x0800000, N_INV), \
13624 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13625 X(vst2, 0x0000100, 0x0800100, N_INV), \
13626 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13627 X(vst3, 0x0000200, 0x0800200, N_INV), \
13628 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13629 X(vst4, 0x0000300, 0x0800300, N_INV), \
13630 X(vmovn, 0x1b20200, N_INV, N_INV), \
13631 X(vtrn, 0x1b20080, N_INV, N_INV), \
13632 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13633 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13634 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13635 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13636 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13637 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13638 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13639 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13640 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13641 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13642 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13643 X(vseleq, 0xe000a00, N_INV, N_INV), \
13644 X(vselvs, 0xe100a00, N_INV, N_INV), \
13645 X(vselge, 0xe200a00, N_INV, N_INV), \
13646 X(vselgt, 0xe300a00, N_INV, N_INV), \
13647 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13648 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13649 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13650 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13651 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13652 X(aes, 0x3b00300, N_INV, N_INV), \
13653 X(sha3op, 0x2000c00, N_INV, N_INV), \
13654 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13655 X(sha2op, 0x3ba0380, N_INV, N_INV)
13659 #define X(OPC,I,F,S) N_MNEM_##OPC
13664 static const struct neon_tab_entry neon_enc_tab
[] =
13666 #define X(OPC,I,F,S) { (I), (F), (S) }
13671 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13672 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13673 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13674 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13675 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13676 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13677 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13678 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13679 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13680 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13681 #define NEON_ENC_SINGLE_(X) \
13682 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13683 #define NEON_ENC_DOUBLE_(X) \
13684 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13685 #define NEON_ENC_FPV8_(X) \
13686 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13688 #define NEON_ENCODE(type, inst) \
13691 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13692 inst.is_neon = 1; \
13696 #define check_neon_suffixes \
13699 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13701 as_bad (_("invalid neon suffix for non neon instruction")); \
13707 /* Define shapes for instruction operands. The following mnemonic characters
13708 are used in this table:
13710 F - VFP S<n> register
13711 D - Neon D<n> register
13712 Q - Neon Q<n> register
13716 L - D<n> register list
13718 This table is used to generate various data:
13719 - enumerations of the form NS_DDR to be used as arguments to
13721 - a table classifying shapes into single, double, quad, mixed.
13722 - a table used to drive neon_select_shape. */
13724 #define NEON_SHAPE_DEF \
13725 X(3, (D, D, D), DOUBLE), \
13726 X(3, (Q, Q, Q), QUAD), \
13727 X(3, (D, D, I), DOUBLE), \
13728 X(3, (Q, Q, I), QUAD), \
13729 X(3, (D, D, S), DOUBLE), \
13730 X(3, (Q, Q, S), QUAD), \
13731 X(2, (D, D), DOUBLE), \
13732 X(2, (Q, Q), QUAD), \
13733 X(2, (D, S), DOUBLE), \
13734 X(2, (Q, S), QUAD), \
13735 X(2, (D, R), DOUBLE), \
13736 X(2, (Q, R), QUAD), \
13737 X(2, (D, I), DOUBLE), \
13738 X(2, (Q, I), QUAD), \
13739 X(3, (D, L, D), DOUBLE), \
13740 X(2, (D, Q), MIXED), \
13741 X(2, (Q, D), MIXED), \
13742 X(3, (D, Q, I), MIXED), \
13743 X(3, (Q, D, I), MIXED), \
13744 X(3, (Q, D, D), MIXED), \
13745 X(3, (D, Q, Q), MIXED), \
13746 X(3, (Q, Q, D), MIXED), \
13747 X(3, (Q, D, S), MIXED), \
13748 X(3, (D, Q, S), MIXED), \
13749 X(4, (D, D, D, I), DOUBLE), \
13750 X(4, (Q, Q, Q, I), QUAD), \
13751 X(4, (D, D, S, I), DOUBLE), \
13752 X(4, (Q, Q, S, I), QUAD), \
13753 X(2, (F, F), SINGLE), \
13754 X(3, (F, F, F), SINGLE), \
13755 X(2, (F, I), SINGLE), \
13756 X(2, (F, D), MIXED), \
13757 X(2, (D, F), MIXED), \
13758 X(3, (F, F, I), MIXED), \
13759 X(4, (R, R, F, F), SINGLE), \
13760 X(4, (F, F, R, R), SINGLE), \
13761 X(3, (D, R, R), DOUBLE), \
13762 X(3, (R, R, D), DOUBLE), \
13763 X(2, (S, R), SINGLE), \
13764 X(2, (R, S), SINGLE), \
13765 X(2, (F, R), SINGLE), \
13766 X(2, (R, F), SINGLE), \
13767 /* Half float shape supported so far. */\
13768 X (2, (H, D), MIXED), \
13769 X (2, (D, H), MIXED), \
13770 X (2, (H, F), MIXED), \
13771 X (2, (F, H), MIXED), \
13772 X (2, (H, H), HALF), \
13773 X (2, (H, R), HALF), \
13774 X (2, (R, H), HALF), \
13775 X (2, (H, I), HALF), \
13776 X (3, (H, H, H), HALF), \
13777 X (3, (H, F, I), MIXED), \
13778 X (3, (F, H, I), MIXED), \
13779 X (3, (D, H, H), MIXED), \
13780 X (3, (D, H, S), MIXED)
13782 #define S2(A,B) NS_##A##B
13783 #define S3(A,B,C) NS_##A##B##C
13784 #define S4(A,B,C,D) NS_##A##B##C##D
13786 #define X(N, L, C) S##N L
13799 enum neon_shape_class
13808 #define X(N, L, C) SC_##C
13810 static enum neon_shape_class neon_shape_class
[] =
13829 /* Register widths of above. */
13830 static unsigned neon_shape_el_size
[] =
13842 struct neon_shape_info
13845 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13848 #define S2(A,B) { SE_##A, SE_##B }
13849 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13850 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13852 #define X(N, L, C) { N, S##N L }
13854 static struct neon_shape_info neon_shape_tab
[] =
13864 /* Bit masks used in type checking given instructions.
13865 'N_EQK' means the type must be the same as (or based on in some way) the key
13866 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13867 set, various other bits can be set as well in order to modify the meaning of
13868 the type constraint. */
13870 enum neon_type_mask
13894 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13895 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13896 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13897 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13898 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13899 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13900 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13901 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13902 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13903 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13904 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13906 N_MAX_NONSPECIAL
= N_P64
13909 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13911 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13912 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13913 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13914 #define N_S_32 (N_S8 | N_S16 | N_S32)
13915 #define N_F_16_32 (N_F16 | N_F32)
13916 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13917 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13918 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13919 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13921 /* Pass this as the first type argument to neon_check_type to ignore types
13923 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13925 /* Select a "shape" for the current instruction (describing register types or
13926 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13927 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13928 function of operand parsing, so this function doesn't need to be called.
13929 Shapes should be listed in order of decreasing length. */
13931 static enum neon_shape
13932 neon_select_shape (enum neon_shape shape
, ...)
13935 enum neon_shape first_shape
= shape
;
13937 /* Fix missing optional operands. FIXME: we don't know at this point how
13938 many arguments we should have, so this makes the assumption that we have
13939 > 1. This is true of all current Neon opcodes, I think, but may not be
13940 true in the future. */
13941 if (!inst
.operands
[1].present
)
13942 inst
.operands
[1] = inst
.operands
[0];
13944 va_start (ap
, shape
);
13946 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13951 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13953 if (!inst
.operands
[j
].present
)
13959 switch (neon_shape_tab
[shape
].el
[j
])
13961 /* If a .f16, .16, .u16, .s16 type specifier is given over
13962 a VFP single precision register operand, it's essentially
13963 means only half of the register is used.
13965 If the type specifier is given after the mnemonics, the
13966 information is stored in inst.vectype. If the type specifier
13967 is given after register operand, the information is stored
13968 in inst.operands[].vectype.
13970 When there is only one type specifier, and all the register
13971 operands are the same type of hardware register, the type
13972 specifier applies to all register operands.
13974 If no type specifier is given, the shape is inferred from
13975 operand information.
13978 vadd.f16 s0, s1, s2: NS_HHH
13979 vabs.f16 s0, s1: NS_HH
13980 vmov.f16 s0, r1: NS_HR
13981 vmov.f16 r0, s1: NS_RH
13982 vcvt.f16 r0, s1: NS_RH
13983 vcvt.f16.s32 s2, s2, #29: NS_HFI
13984 vcvt.f16.s32 s2, s2: NS_HF
13987 if (!(inst
.operands
[j
].isreg
13988 && inst
.operands
[j
].isvec
13989 && inst
.operands
[j
].issingle
13990 && !inst
.operands
[j
].isquad
13991 && ((inst
.vectype
.elems
== 1
13992 && inst
.vectype
.el
[0].size
== 16)
13993 || (inst
.vectype
.elems
> 1
13994 && inst
.vectype
.el
[j
].size
== 16)
13995 || (inst
.vectype
.elems
== 0
13996 && inst
.operands
[j
].vectype
.type
!= NT_invtype
13997 && inst
.operands
[j
].vectype
.size
== 16))))
14002 if (!(inst
.operands
[j
].isreg
14003 && inst
.operands
[j
].isvec
14004 && inst
.operands
[j
].issingle
14005 && !inst
.operands
[j
].isquad
14006 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
14007 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
14008 || (inst
.vectype
.elems
== 0
14009 && (inst
.operands
[j
].vectype
.size
== 32
14010 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
14015 if (!(inst
.operands
[j
].isreg
14016 && inst
.operands
[j
].isvec
14017 && !inst
.operands
[j
].isquad
14018 && !inst
.operands
[j
].issingle
))
14023 if (!(inst
.operands
[j
].isreg
14024 && !inst
.operands
[j
].isvec
))
14029 if (!(inst
.operands
[j
].isreg
14030 && inst
.operands
[j
].isvec
14031 && inst
.operands
[j
].isquad
14032 && !inst
.operands
[j
].issingle
))
14037 if (!(!inst
.operands
[j
].isreg
14038 && !inst
.operands
[j
].isscalar
))
14043 if (!(!inst
.operands
[j
].isreg
14044 && inst
.operands
[j
].isscalar
))
14054 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
14055 /* We've matched all the entries in the shape table, and we don't
14056 have any left over operands which have not been matched. */
14062 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
14063 first_error (_("invalid instruction shape"));
14068 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14069 means the Q bit should be set). */
14072 neon_quad (enum neon_shape shape
)
14074 return neon_shape_class
[shape
] == SC_QUAD
;
14078 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
14081 /* Allow modification to be made to types which are constrained to be
14082 based on the key element, based on bits set alongside N_EQK. */
14083 if ((typebits
& N_EQK
) != 0)
14085 if ((typebits
& N_HLF
) != 0)
14087 else if ((typebits
& N_DBL
) != 0)
14089 if ((typebits
& N_SGN
) != 0)
14090 *g_type
= NT_signed
;
14091 else if ((typebits
& N_UNS
) != 0)
14092 *g_type
= NT_unsigned
;
14093 else if ((typebits
& N_INT
) != 0)
14094 *g_type
= NT_integer
;
14095 else if ((typebits
& N_FLT
) != 0)
14096 *g_type
= NT_float
;
14097 else if ((typebits
& N_SIZ
) != 0)
14098 *g_type
= NT_untyped
;
14102 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14103 operand type, i.e. the single type specified in a Neon instruction when it
14104 is the only one given. */
14106 static struct neon_type_el
14107 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
14109 struct neon_type_el dest
= *key
;
14111 gas_assert ((thisarg
& N_EQK
) != 0);
14113 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
14118 /* Convert Neon type and size into compact bitmask representation. */
14120 static enum neon_type_mask
14121 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
14128 case 8: return N_8
;
14129 case 16: return N_16
;
14130 case 32: return N_32
;
14131 case 64: return N_64
;
14139 case 8: return N_I8
;
14140 case 16: return N_I16
;
14141 case 32: return N_I32
;
14142 case 64: return N_I64
;
14150 case 16: return N_F16
;
14151 case 32: return N_F32
;
14152 case 64: return N_F64
;
14160 case 8: return N_P8
;
14161 case 16: return N_P16
;
14162 case 64: return N_P64
;
14170 case 8: return N_S8
;
14171 case 16: return N_S16
;
14172 case 32: return N_S32
;
14173 case 64: return N_S64
;
14181 case 8: return N_U8
;
14182 case 16: return N_U16
;
14183 case 32: return N_U32
;
14184 case 64: return N_U64
;
14195 /* Convert compact Neon bitmask type representation to a type and size. Only
14196 handles the case where a single bit is set in the mask. */
14199 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
14200 enum neon_type_mask mask
)
14202 if ((mask
& N_EQK
) != 0)
14205 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
14207 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
14209 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
14211 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
14216 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
14218 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
14219 *type
= NT_unsigned
;
14220 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
14221 *type
= NT_integer
;
14222 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
14223 *type
= NT_untyped
;
14224 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
14226 else if ((mask
& (N_F_ALL
)) != 0)
14234 /* Modify a bitmask of allowed types. This is only needed for type
14238 modify_types_allowed (unsigned allowed
, unsigned mods
)
14241 enum neon_el_type type
;
14247 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
14249 if (el_type_of_type_chk (&type
, &size
,
14250 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
14252 neon_modify_type_size (mods
, &type
, &size
);
14253 destmask
|= type_chk_of_el_type (type
, size
);
14260 /* Check type and return type classification.
14261 The manual states (paraphrase): If one datatype is given, it indicates the
14263 - the second operand, if there is one
14264 - the operand, if there is no second operand
14265 - the result, if there are no operands.
14266 This isn't quite good enough though, so we use a concept of a "key" datatype
14267 which is set on a per-instruction basis, which is the one which matters when
14268 only one data type is written.
14269 Note: this function has side-effects (e.g. filling in missing operands). All
14270 Neon instructions should call it before performing bit encoding. */
14272 static struct neon_type_el
14273 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
14276 unsigned i
, pass
, key_el
= 0;
14277 unsigned types
[NEON_MAX_TYPE_ELS
];
14278 enum neon_el_type k_type
= NT_invtype
;
14279 unsigned k_size
= -1u;
14280 struct neon_type_el badtype
= {NT_invtype
, -1};
14281 unsigned key_allowed
= 0;
14283 /* Optional registers in Neon instructions are always (not) in operand 1.
14284 Fill in the missing operand here, if it was omitted. */
14285 if (els
> 1 && !inst
.operands
[1].present
)
14286 inst
.operands
[1] = inst
.operands
[0];
14288 /* Suck up all the varargs. */
14290 for (i
= 0; i
< els
; i
++)
14292 unsigned thisarg
= va_arg (ap
, unsigned);
14293 if (thisarg
== N_IGNORE_TYPE
)
14298 types
[i
] = thisarg
;
14299 if ((thisarg
& N_KEY
) != 0)
14304 if (inst
.vectype
.elems
> 0)
14305 for (i
= 0; i
< els
; i
++)
14306 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
14308 first_error (_("types specified in both the mnemonic and operands"));
14312 /* Duplicate inst.vectype elements here as necessary.
14313 FIXME: No idea if this is exactly the same as the ARM assembler,
14314 particularly when an insn takes one register and one non-register
14316 if (inst
.vectype
.elems
== 1 && els
> 1)
14319 inst
.vectype
.elems
= els
;
14320 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
14321 for (j
= 0; j
< els
; j
++)
14323 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14326 else if (inst
.vectype
.elems
== 0 && els
> 0)
14329 /* No types were given after the mnemonic, so look for types specified
14330 after each operand. We allow some flexibility here; as long as the
14331 "key" operand has a type, we can infer the others. */
14332 for (j
= 0; j
< els
; j
++)
14333 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
14334 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
14336 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
14338 for (j
= 0; j
< els
; j
++)
14339 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
14340 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14345 first_error (_("operand types can't be inferred"));
14349 else if (inst
.vectype
.elems
!= els
)
14351 first_error (_("type specifier has the wrong number of parts"));
14355 for (pass
= 0; pass
< 2; pass
++)
14357 for (i
= 0; i
< els
; i
++)
14359 unsigned thisarg
= types
[i
];
14360 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14361 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14362 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14363 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14365 /* Decay more-specific signed & unsigned types to sign-insensitive
14366 integer types if sign-specific variants are unavailable. */
14367 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14368 && (types_allowed
& N_SU_ALL
) == 0)
14369 g_type
= NT_integer
;
14371 /* If only untyped args are allowed, decay any more specific types to
14372 them. Some instructions only care about signs for some element
14373 sizes, so handle that properly. */
14374 if (((types_allowed
& N_UNT
) == 0)
14375 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14376 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14377 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14378 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14379 g_type
= NT_untyped
;
14383 if ((thisarg
& N_KEY
) != 0)
14387 key_allowed
= thisarg
& ~N_KEY
;
14389 /* Check architecture constraint on FP16 extension. */
14391 && k_type
== NT_float
14392 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14394 inst
.error
= _(BAD_FP16
);
14401 if ((thisarg
& N_VFP
) != 0)
14403 enum neon_shape_el regshape
;
14404 unsigned regwidth
, match
;
14406 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14409 first_error (_("invalid instruction shape"));
14412 regshape
= neon_shape_tab
[ns
].el
[i
];
14413 regwidth
= neon_shape_el_size
[regshape
];
14415 /* In VFP mode, operands must match register widths. If we
14416 have a key operand, use its width, else use the width of
14417 the current operand. */
14423 /* FP16 will use a single precision register. */
14424 if (regwidth
== 32 && match
== 16)
14426 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14430 inst
.error
= _(BAD_FP16
);
14435 if (regwidth
!= match
)
14437 first_error (_("operand size must match register width"));
14442 if ((thisarg
& N_EQK
) == 0)
14444 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14446 if ((given_type
& types_allowed
) == 0)
14448 first_error (_("bad type in Neon instruction"));
14454 enum neon_el_type mod_k_type
= k_type
;
14455 unsigned mod_k_size
= k_size
;
14456 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14457 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14459 first_error (_("inconsistent types in Neon instruction"));
14467 return inst
.vectype
.el
[key_el
];
14470 /* Neon-style VFP instruction forwarding. */
14472 /* Thumb VFP instructions have 0xE in the condition field. */
14475 do_vfp_cond_or_thumb (void)
14480 inst
.instruction
|= 0xe0000000;
14482 inst
.instruction
|= inst
.cond
<< 28;
14485 /* Look up and encode a simple mnemonic, for use as a helper function for the
14486 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14487 etc. It is assumed that operand parsing has already been done, and that the
14488 operands are in the form expected by the given opcode (this isn't necessarily
14489 the same as the form in which they were parsed, hence some massaging must
14490 take place before this function is called).
14491 Checks current arch version against that in the looked-up opcode. */
14494 do_vfp_nsyn_opcode (const char *opname
)
14496 const struct asm_opcode
*opcode
;
14498 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14503 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14504 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14511 inst
.instruction
= opcode
->tvalue
;
14512 opcode
->tencode ();
14516 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14517 opcode
->aencode ();
14522 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14524 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14526 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14529 do_vfp_nsyn_opcode ("fadds");
14531 do_vfp_nsyn_opcode ("fsubs");
14533 /* ARMv8.2 fp16 instruction. */
14535 do_scalar_fp16_v82_encode ();
14540 do_vfp_nsyn_opcode ("faddd");
14542 do_vfp_nsyn_opcode ("fsubd");
14546 /* Check operand types to see if this is a VFP instruction, and if so call
14550 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14552 enum neon_shape rs
;
14553 struct neon_type_el et
;
14558 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14559 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14563 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14564 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14565 N_F_ALL
| N_KEY
| N_VFP
);
14572 if (et
.type
!= NT_invtype
)
14583 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14585 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14587 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14590 do_vfp_nsyn_opcode ("fmacs");
14592 do_vfp_nsyn_opcode ("fnmacs");
14594 /* ARMv8.2 fp16 instruction. */
14596 do_scalar_fp16_v82_encode ();
14601 do_vfp_nsyn_opcode ("fmacd");
14603 do_vfp_nsyn_opcode ("fnmacd");
14608 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14610 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14612 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14615 do_vfp_nsyn_opcode ("ffmas");
14617 do_vfp_nsyn_opcode ("ffnmas");
14619 /* ARMv8.2 fp16 instruction. */
14621 do_scalar_fp16_v82_encode ();
14626 do_vfp_nsyn_opcode ("ffmad");
14628 do_vfp_nsyn_opcode ("ffnmad");
14633 do_vfp_nsyn_mul (enum neon_shape rs
)
14635 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14637 do_vfp_nsyn_opcode ("fmuls");
14639 /* ARMv8.2 fp16 instruction. */
14641 do_scalar_fp16_v82_encode ();
14644 do_vfp_nsyn_opcode ("fmuld");
14648 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14650 int is_neg
= (inst
.instruction
& 0x80) != 0;
14651 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14653 if (rs
== NS_FF
|| rs
== NS_HH
)
14656 do_vfp_nsyn_opcode ("fnegs");
14658 do_vfp_nsyn_opcode ("fabss");
14660 /* ARMv8.2 fp16 instruction. */
14662 do_scalar_fp16_v82_encode ();
14667 do_vfp_nsyn_opcode ("fnegd");
14669 do_vfp_nsyn_opcode ("fabsd");
14673 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14674 insns belong to Neon, and are handled elsewhere. */
14677 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14679 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14683 do_vfp_nsyn_opcode ("fldmdbs");
14685 do_vfp_nsyn_opcode ("fldmias");
14690 do_vfp_nsyn_opcode ("fstmdbs");
14692 do_vfp_nsyn_opcode ("fstmias");
14697 do_vfp_nsyn_sqrt (void)
14699 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14700 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14702 if (rs
== NS_FF
|| rs
== NS_HH
)
14704 do_vfp_nsyn_opcode ("fsqrts");
14706 /* ARMv8.2 fp16 instruction. */
14708 do_scalar_fp16_v82_encode ();
14711 do_vfp_nsyn_opcode ("fsqrtd");
14715 do_vfp_nsyn_div (void)
14717 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14718 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14719 N_F_ALL
| N_KEY
| N_VFP
);
14721 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14723 do_vfp_nsyn_opcode ("fdivs");
14725 /* ARMv8.2 fp16 instruction. */
14727 do_scalar_fp16_v82_encode ();
14730 do_vfp_nsyn_opcode ("fdivd");
14734 do_vfp_nsyn_nmul (void)
14736 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14737 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14738 N_F_ALL
| N_KEY
| N_VFP
);
14740 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14742 NEON_ENCODE (SINGLE
, inst
);
14743 do_vfp_sp_dyadic ();
14745 /* ARMv8.2 fp16 instruction. */
14747 do_scalar_fp16_v82_encode ();
14751 NEON_ENCODE (DOUBLE
, inst
);
14752 do_vfp_dp_rd_rn_rm ();
14754 do_vfp_cond_or_thumb ();
14759 do_vfp_nsyn_cmp (void)
14761 enum neon_shape rs
;
14762 if (inst
.operands
[1].isreg
)
14764 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14765 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14767 if (rs
== NS_FF
|| rs
== NS_HH
)
14769 NEON_ENCODE (SINGLE
, inst
);
14770 do_vfp_sp_monadic ();
14774 NEON_ENCODE (DOUBLE
, inst
);
14775 do_vfp_dp_rd_rm ();
14780 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14781 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14783 switch (inst
.instruction
& 0x0fffffff)
14786 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14789 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14795 if (rs
== NS_FI
|| rs
== NS_HI
)
14797 NEON_ENCODE (SINGLE
, inst
);
14798 do_vfp_sp_compare_z ();
14802 NEON_ENCODE (DOUBLE
, inst
);
14806 do_vfp_cond_or_thumb ();
14808 /* ARMv8.2 fp16 instruction. */
14809 if (rs
== NS_HI
|| rs
== NS_HH
)
14810 do_scalar_fp16_v82_encode ();
14814 nsyn_insert_sp (void)
14816 inst
.operands
[1] = inst
.operands
[0];
14817 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14818 inst
.operands
[0].reg
= REG_SP
;
14819 inst
.operands
[0].isreg
= 1;
14820 inst
.operands
[0].writeback
= 1;
14821 inst
.operands
[0].present
= 1;
14825 do_vfp_nsyn_push (void)
14829 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14830 _("register list must contain at least 1 and at most 16 "
14833 if (inst
.operands
[1].issingle
)
14834 do_vfp_nsyn_opcode ("fstmdbs");
14836 do_vfp_nsyn_opcode ("fstmdbd");
14840 do_vfp_nsyn_pop (void)
14844 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14845 _("register list must contain at least 1 and at most 16 "
14848 if (inst
.operands
[1].issingle
)
14849 do_vfp_nsyn_opcode ("fldmias");
14851 do_vfp_nsyn_opcode ("fldmiad");
14854 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14855 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14858 neon_dp_fixup (struct arm_it
* insn
)
14860 unsigned int i
= insn
->instruction
;
14865 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14876 insn
->instruction
= i
;
14879 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14883 neon_logbits (unsigned x
)
14885 return ffs (x
) - 4;
14888 #define LOW4(R) ((R) & 0xf)
14889 #define HI1(R) (((R) >> 4) & 1)
14891 /* Encode insns with bit pattern:
14893 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14894 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14896 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14897 different meaning for some instruction. */
14900 neon_three_same (int isquad
, int ubit
, int size
)
14902 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14903 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14904 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14905 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14906 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14907 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14908 inst
.instruction
|= (isquad
!= 0) << 6;
14909 inst
.instruction
|= (ubit
!= 0) << 24;
14911 inst
.instruction
|= neon_logbits (size
) << 20;
14913 neon_dp_fixup (&inst
);
14916 /* Encode instructions of the form:
14918 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14919 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14921 Don't write size if SIZE == -1. */
14924 neon_two_same (int qbit
, int ubit
, int size
)
14926 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14927 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14928 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14929 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14930 inst
.instruction
|= (qbit
!= 0) << 6;
14931 inst
.instruction
|= (ubit
!= 0) << 24;
14934 inst
.instruction
|= neon_logbits (size
) << 18;
14936 neon_dp_fixup (&inst
);
14939 /* Neon instruction encoders, in approximate order of appearance. */
14942 do_neon_dyadic_i_su (void)
14944 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14945 struct neon_type_el et
= neon_check_type (3, rs
,
14946 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14947 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14951 do_neon_dyadic_i64_su (void)
14953 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14954 struct neon_type_el et
= neon_check_type (3, rs
,
14955 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14956 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14960 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14963 unsigned size
= et
.size
>> 3;
14964 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14965 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14966 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14967 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14968 inst
.instruction
|= (isquad
!= 0) << 6;
14969 inst
.instruction
|= immbits
<< 16;
14970 inst
.instruction
|= (size
>> 3) << 7;
14971 inst
.instruction
|= (size
& 0x7) << 19;
14973 inst
.instruction
|= (uval
!= 0) << 24;
14975 neon_dp_fixup (&inst
);
14979 do_neon_shl_imm (void)
14981 if (!inst
.operands
[2].isreg
)
14983 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14984 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14985 int imm
= inst
.operands
[2].imm
;
14987 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14988 _("immediate out of range for shift"));
14989 NEON_ENCODE (IMMED
, inst
);
14990 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14994 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14995 struct neon_type_el et
= neon_check_type (3, rs
,
14996 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14999 /* VSHL/VQSHL 3-register variants have syntax such as:
15001 whereas other 3-register operations encoded by neon_three_same have
15004 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15006 tmp
= inst
.operands
[2].reg
;
15007 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15008 inst
.operands
[1].reg
= tmp
;
15009 NEON_ENCODE (INTEGER
, inst
);
15010 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15015 do_neon_qshl_imm (void)
15017 if (!inst
.operands
[2].isreg
)
15019 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15020 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15021 int imm
= inst
.operands
[2].imm
;
15023 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15024 _("immediate out of range for shift"));
15025 NEON_ENCODE (IMMED
, inst
);
15026 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
15030 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15031 struct neon_type_el et
= neon_check_type (3, rs
,
15032 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15035 /* See note in do_neon_shl_imm. */
15036 tmp
= inst
.operands
[2].reg
;
15037 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15038 inst
.operands
[1].reg
= tmp
;
15039 NEON_ENCODE (INTEGER
, inst
);
15040 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15045 do_neon_rshl (void)
15047 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15048 struct neon_type_el et
= neon_check_type (3, rs
,
15049 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15052 tmp
= inst
.operands
[2].reg
;
15053 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15054 inst
.operands
[1].reg
= tmp
;
15055 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15059 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
15061 /* Handle .I8 pseudo-instructions. */
15064 /* Unfortunately, this will make everything apart from zero out-of-range.
15065 FIXME is this the intended semantics? There doesn't seem much point in
15066 accepting .I8 if so. */
15067 immediate
|= immediate
<< 8;
15073 if (immediate
== (immediate
& 0x000000ff))
15075 *immbits
= immediate
;
15078 else if (immediate
== (immediate
& 0x0000ff00))
15080 *immbits
= immediate
>> 8;
15083 else if (immediate
== (immediate
& 0x00ff0000))
15085 *immbits
= immediate
>> 16;
15088 else if (immediate
== (immediate
& 0xff000000))
15090 *immbits
= immediate
>> 24;
15093 if ((immediate
& 0xffff) != (immediate
>> 16))
15094 goto bad_immediate
;
15095 immediate
&= 0xffff;
15098 if (immediate
== (immediate
& 0x000000ff))
15100 *immbits
= immediate
;
15103 else if (immediate
== (immediate
& 0x0000ff00))
15105 *immbits
= immediate
>> 8;
15110 first_error (_("immediate value out of range"));
15115 do_neon_logic (void)
15117 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
15119 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15120 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15121 /* U bit and size field were set as part of the bitmask. */
15122 NEON_ENCODE (INTEGER
, inst
);
15123 neon_three_same (neon_quad (rs
), 0, -1);
15127 const int three_ops_form
= (inst
.operands
[2].present
15128 && !inst
.operands
[2].isreg
);
15129 const int immoperand
= (three_ops_form
? 2 : 1);
15130 enum neon_shape rs
= (three_ops_form
15131 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
15132 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
15133 struct neon_type_el et
= neon_check_type (2, rs
,
15134 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15135 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
15139 if (et
.type
== NT_invtype
)
15142 if (three_ops_form
)
15143 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15144 _("first and second operands shall be the same register"));
15146 NEON_ENCODE (IMMED
, inst
);
15148 immbits
= inst
.operands
[immoperand
].imm
;
15151 /* .i64 is a pseudo-op, so the immediate must be a repeating
15153 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
15154 inst
.operands
[immoperand
].reg
: 0))
15156 /* Set immbits to an invalid constant. */
15157 immbits
= 0xdeadbeef;
15164 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15168 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15172 /* Pseudo-instruction for VBIC. */
15173 neon_invert_size (&immbits
, 0, et
.size
);
15174 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15178 /* Pseudo-instruction for VORR. */
15179 neon_invert_size (&immbits
, 0, et
.size
);
15180 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15190 inst
.instruction
|= neon_quad (rs
) << 6;
15191 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15192 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15193 inst
.instruction
|= cmode
<< 8;
15194 neon_write_immbits (immbits
);
15196 neon_dp_fixup (&inst
);
15201 do_neon_bitfield (void)
15203 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15204 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15205 neon_three_same (neon_quad (rs
), 0, -1);
15209 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
15212 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15213 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
15215 if (et
.type
== NT_float
)
15217 NEON_ENCODE (FLOAT
, inst
);
15218 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15222 NEON_ENCODE (INTEGER
, inst
);
15223 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
15228 do_neon_dyadic_if_su (void)
15230 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15234 do_neon_dyadic_if_su_d (void)
15236 /* This version only allow D registers, but that constraint is enforced during
15237 operand parsing so we don't need to do anything extra here. */
15238 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15242 do_neon_dyadic_if_i_d (void)
15244 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15245 affected if we specify unsigned args. */
15246 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15249 enum vfp_or_neon_is_neon_bits
15252 NEON_CHECK_ARCH
= 2,
15253 NEON_CHECK_ARCH8
= 4
15256 /* Call this function if an instruction which may have belonged to the VFP or
15257 Neon instruction sets, but turned out to be a Neon instruction (due to the
15258 operand types involved, etc.). We have to check and/or fix-up a couple of
15261 - Make sure the user hasn't attempted to make a Neon instruction
15263 - Alter the value in the condition code field if necessary.
15264 - Make sure that the arch supports Neon instructions.
15266 Which of these operations take place depends on bits from enum
15267 vfp_or_neon_is_neon_bits.
15269 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15270 current instruction's condition is COND_ALWAYS, the condition field is
15271 changed to inst.uncond_value. This is necessary because instructions shared
15272 between VFP and Neon may be conditional for the VFP variants only, and the
15273 unconditional Neon version must have, e.g., 0xF in the condition field. */
15276 vfp_or_neon_is_neon (unsigned check
)
15278 /* Conditions are always legal in Thumb mode (IT blocks). */
15279 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
15281 if (inst
.cond
!= COND_ALWAYS
)
15283 first_error (_(BAD_COND
));
15286 if (inst
.uncond_value
!= -1)
15287 inst
.instruction
|= inst
.uncond_value
<< 28;
15290 if ((check
& NEON_CHECK_ARCH
)
15291 && !mark_feature_used (&fpu_neon_ext_v1
))
15293 first_error (_(BAD_FPU
));
15297 if ((check
& NEON_CHECK_ARCH8
)
15298 && !mark_feature_used (&fpu_neon_ext_armv8
))
15300 first_error (_(BAD_FPU
));
15308 do_neon_addsub_if_i (void)
15310 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
15313 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15316 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15317 affected if we specify unsigned args. */
15318 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
15321 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15323 V<op> A,B (A is operand 0, B is operand 2)
15328 so handle that case specially. */
15331 neon_exchange_operands (void)
15333 if (inst
.operands
[1].present
)
15335 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
15337 /* Swap operands[1] and operands[2]. */
15338 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
15339 inst
.operands
[1] = inst
.operands
[2];
15340 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
15345 inst
.operands
[1] = inst
.operands
[2];
15346 inst
.operands
[2] = inst
.operands
[0];
15351 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
15353 if (inst
.operands
[2].isreg
)
15356 neon_exchange_operands ();
15357 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
15361 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15362 struct neon_type_el et
= neon_check_type (2, rs
,
15363 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
15365 NEON_ENCODE (IMMED
, inst
);
15366 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15367 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15368 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15369 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15370 inst
.instruction
|= neon_quad (rs
) << 6;
15371 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15372 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15374 neon_dp_fixup (&inst
);
15381 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
15385 do_neon_cmp_inv (void)
15387 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
15393 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
15396 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15397 scalars, which are encoded in 5 bits, M : Rm.
15398 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15399 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15402 Dot Product instructions are similar to multiply instructions except elsize
15403 should always be 32.
15405 This function translates SCALAR, which is GAS's internal encoding of indexed
15406 scalar register, to raw encoding. There is also register and index range
15407 check based on ELSIZE. */
15410 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
15412 unsigned regno
= NEON_SCALAR_REG (scalar
);
15413 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15418 if (regno
> 7 || elno
> 3)
15420 return regno
| (elno
<< 3);
15423 if (regno
> 15 || elno
> 1)
15425 return regno
| (elno
<< 4);
15429 first_error (_("scalar out of range for multiply instruction"));
15435 /* Encode multiply / multiply-accumulate scalar instructions. */
15438 neon_mul_mac (struct neon_type_el et
, int ubit
)
15442 /* Give a more helpful error message if we have an invalid type. */
15443 if (et
.type
== NT_invtype
)
15446 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15447 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15448 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15449 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15450 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15451 inst
.instruction
|= LOW4 (scalar
);
15452 inst
.instruction
|= HI1 (scalar
) << 5;
15453 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15454 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15455 inst
.instruction
|= (ubit
!= 0) << 24;
15457 neon_dp_fixup (&inst
);
15461 do_neon_mac_maybe_scalar (void)
15463 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15466 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15469 if (inst
.operands
[2].isscalar
)
15471 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15472 struct neon_type_el et
= neon_check_type (3, rs
,
15473 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15474 NEON_ENCODE (SCALAR
, inst
);
15475 neon_mul_mac (et
, neon_quad (rs
));
15479 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15480 affected if we specify unsigned args. */
15481 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15486 do_neon_fmac (void)
15488 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15491 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15494 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15500 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15501 struct neon_type_el et
= neon_check_type (3, rs
,
15502 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15503 neon_three_same (neon_quad (rs
), 0, et
.size
);
15506 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15507 same types as the MAC equivalents. The polynomial type for this instruction
15508 is encoded the same as the integer type. */
15513 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15516 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15519 if (inst
.operands
[2].isscalar
)
15520 do_neon_mac_maybe_scalar ();
15522 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15526 do_neon_qdmulh (void)
15528 if (inst
.operands
[2].isscalar
)
15530 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15531 struct neon_type_el et
= neon_check_type (3, rs
,
15532 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15533 NEON_ENCODE (SCALAR
, inst
);
15534 neon_mul_mac (et
, neon_quad (rs
));
15538 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15539 struct neon_type_el et
= neon_check_type (3, rs
,
15540 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15541 NEON_ENCODE (INTEGER
, inst
);
15542 /* The U bit (rounding) comes from bit mask. */
15543 neon_three_same (neon_quad (rs
), 0, et
.size
);
15548 do_neon_qrdmlah (void)
15550 /* Check we're on the correct architecture. */
15551 if (!mark_feature_used (&fpu_neon_ext_armv8
))
15553 _("instruction form not available on this architecture.");
15554 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
15556 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15557 record_feature_use (&fpu_neon_ext_v8_1
);
15560 if (inst
.operands
[2].isscalar
)
15562 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15563 struct neon_type_el et
= neon_check_type (3, rs
,
15564 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15565 NEON_ENCODE (SCALAR
, inst
);
15566 neon_mul_mac (et
, neon_quad (rs
));
15570 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15571 struct neon_type_el et
= neon_check_type (3, rs
,
15572 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15573 NEON_ENCODE (INTEGER
, inst
);
15574 /* The U bit (rounding) comes from bit mask. */
15575 neon_three_same (neon_quad (rs
), 0, et
.size
);
15580 do_neon_fcmp_absolute (void)
15582 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15583 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15584 N_F_16_32
| N_KEY
);
15585 /* Size field comes from bit mask. */
15586 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
15590 do_neon_fcmp_absolute_inv (void)
15592 neon_exchange_operands ();
15593 do_neon_fcmp_absolute ();
15597 do_neon_step (void)
15599 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15600 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15601 N_F_16_32
| N_KEY
);
15602 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15606 do_neon_abs_neg (void)
15608 enum neon_shape rs
;
15609 struct neon_type_el et
;
15611 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15614 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15617 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15618 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
15620 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15621 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15622 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15623 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15624 inst
.instruction
|= neon_quad (rs
) << 6;
15625 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15626 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15628 neon_dp_fixup (&inst
);
15634 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15635 struct neon_type_el et
= neon_check_type (2, rs
,
15636 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15637 int imm
= inst
.operands
[2].imm
;
15638 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15639 _("immediate out of range for insert"));
15640 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15646 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15647 struct neon_type_el et
= neon_check_type (2, rs
,
15648 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15649 int imm
= inst
.operands
[2].imm
;
15650 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15651 _("immediate out of range for insert"));
15652 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15656 do_neon_qshlu_imm (void)
15658 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15659 struct neon_type_el et
= neon_check_type (2, rs
,
15660 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15661 int imm
= inst
.operands
[2].imm
;
15662 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15663 _("immediate out of range for shift"));
15664 /* Only encodes the 'U present' variant of the instruction.
15665 In this case, signed types have OP (bit 8) set to 0.
15666 Unsigned types have OP set to 1. */
15667 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15668 /* The rest of the bits are the same as other immediate shifts. */
15669 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15673 do_neon_qmovn (void)
15675 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15676 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15677 /* Saturating move where operands can be signed or unsigned, and the
15678 destination has the same signedness. */
15679 NEON_ENCODE (INTEGER
, inst
);
15680 if (et
.type
== NT_unsigned
)
15681 inst
.instruction
|= 0xc0;
15683 inst
.instruction
|= 0x80;
15684 neon_two_same (0, 1, et
.size
/ 2);
15688 do_neon_qmovun (void)
15690 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15691 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15692 /* Saturating move with unsigned results. Operands must be signed. */
15693 NEON_ENCODE (INTEGER
, inst
);
15694 neon_two_same (0, 1, et
.size
/ 2);
15698 do_neon_rshift_sat_narrow (void)
15700 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15701 or unsigned. If operands are unsigned, results must also be unsigned. */
15702 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15703 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15704 int imm
= inst
.operands
[2].imm
;
15705 /* This gets the bounds check, size encoding and immediate bits calculation
15709 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15710 VQMOVN.I<size> <Dd>, <Qm>. */
15713 inst
.operands
[2].present
= 0;
15714 inst
.instruction
= N_MNEM_vqmovn
;
15719 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15720 _("immediate out of range"));
15721 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15725 do_neon_rshift_sat_narrow_u (void)
15727 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15728 or unsigned. If operands are unsigned, results must also be unsigned. */
15729 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15730 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15731 int imm
= inst
.operands
[2].imm
;
15732 /* This gets the bounds check, size encoding and immediate bits calculation
15736 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15737 VQMOVUN.I<size> <Dd>, <Qm>. */
15740 inst
.operands
[2].present
= 0;
15741 inst
.instruction
= N_MNEM_vqmovun
;
15746 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15747 _("immediate out of range"));
15748 /* FIXME: The manual is kind of unclear about what value U should have in
15749 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15751 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15755 do_neon_movn (void)
15757 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15758 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15759 NEON_ENCODE (INTEGER
, inst
);
15760 neon_two_same (0, 1, et
.size
/ 2);
15764 do_neon_rshift_narrow (void)
15766 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15767 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15768 int imm
= inst
.operands
[2].imm
;
15769 /* This gets the bounds check, size encoding and immediate bits calculation
15773 /* If immediate is zero then we are a pseudo-instruction for
15774 VMOVN.I<size> <Dd>, <Qm> */
15777 inst
.operands
[2].present
= 0;
15778 inst
.instruction
= N_MNEM_vmovn
;
15783 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15784 _("immediate out of range for narrowing operation"));
15785 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15789 do_neon_shll (void)
15791 /* FIXME: Type checking when lengthening. */
15792 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15793 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15794 unsigned imm
= inst
.operands
[2].imm
;
15796 if (imm
== et
.size
)
15798 /* Maximum shift variant. */
15799 NEON_ENCODE (INTEGER
, inst
);
15800 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15801 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15802 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15803 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15804 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15806 neon_dp_fixup (&inst
);
15810 /* A more-specific type check for non-max versions. */
15811 et
= neon_check_type (2, NS_QDI
,
15812 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15813 NEON_ENCODE (IMMED
, inst
);
15814 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15818 /* Check the various types for the VCVT instruction, and return which version
15819 the current instruction is. */
15821 #define CVT_FLAVOUR_VAR \
15822 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15823 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15824 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15825 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15826 /* Half-precision conversions. */ \
15827 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15828 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15829 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15830 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15831 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15832 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15833 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15834 Compared with single/double precision variants, only the co-processor \
15835 field is different, so the encoding flow is reused here. */ \
15836 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15837 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15838 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15839 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15840 /* VFP instructions. */ \
15841 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15842 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15843 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15844 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15845 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15846 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15847 /* VFP instructions with bitshift. */ \
15848 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15849 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15850 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15851 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15852 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15853 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15854 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15855 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15857 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15858 neon_cvt_flavour_##C,
15860 /* The different types of conversions we can do. */
15861 enum neon_cvt_flavour
15864 neon_cvt_flavour_invalid
,
15865 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15870 static enum neon_cvt_flavour
15871 get_neon_cvt_flavour (enum neon_shape rs
)
15873 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15874 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15875 if (et.type != NT_invtype) \
15877 inst.error = NULL; \
15878 return (neon_cvt_flavour_##C); \
15881 struct neon_type_el et
;
15882 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15883 || rs
== NS_FF
) ? N_VFP
: 0;
15884 /* The instruction versions which take an immediate take one register
15885 argument, which is extended to the width of the full register. Thus the
15886 "source" and "destination" registers must have the same width. Hack that
15887 here by making the size equal to the key (wider, in this case) operand. */
15888 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15892 return neon_cvt_flavour_invalid
;
15907 /* Neon-syntax VFP conversions. */
15910 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15912 const char *opname
= 0;
15914 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
15915 || rs
== NS_FHI
|| rs
== NS_HFI
)
15917 /* Conversions with immediate bitshift. */
15918 const char *enc
[] =
15920 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15926 if (flavour
< (int) ARRAY_SIZE (enc
))
15928 opname
= enc
[flavour
];
15929 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15930 _("operands 0 and 1 must be the same register"));
15931 inst
.operands
[1] = inst
.operands
[2];
15932 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15937 /* Conversions without bitshift. */
15938 const char *enc
[] =
15940 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15946 if (flavour
< (int) ARRAY_SIZE (enc
))
15947 opname
= enc
[flavour
];
15951 do_vfp_nsyn_opcode (opname
);
15953 /* ARMv8.2 fp16 VCVT instruction. */
15954 if (flavour
== neon_cvt_flavour_s32_f16
15955 || flavour
== neon_cvt_flavour_u32_f16
15956 || flavour
== neon_cvt_flavour_f16_u32
15957 || flavour
== neon_cvt_flavour_f16_s32
)
15958 do_scalar_fp16_v82_encode ();
15962 do_vfp_nsyn_cvtz (void)
15964 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
15965 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15966 const char *enc
[] =
15968 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15974 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15975 do_vfp_nsyn_opcode (enc
[flavour
]);
15979 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15980 enum neon_cvt_mode mode
)
15985 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15986 D register operands. */
15987 if (flavour
== neon_cvt_flavour_s32_f64
15988 || flavour
== neon_cvt_flavour_u32_f64
)
15989 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15992 if (flavour
== neon_cvt_flavour_s32_f16
15993 || flavour
== neon_cvt_flavour_u32_f16
)
15994 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
15997 set_it_insn_type (OUTSIDE_IT_INSN
);
16001 case neon_cvt_flavour_s32_f64
:
16005 case neon_cvt_flavour_s32_f32
:
16009 case neon_cvt_flavour_s32_f16
:
16013 case neon_cvt_flavour_u32_f64
:
16017 case neon_cvt_flavour_u32_f32
:
16021 case neon_cvt_flavour_u32_f16
:
16026 first_error (_("invalid instruction shape"));
16032 case neon_cvt_mode_a
: rm
= 0; break;
16033 case neon_cvt_mode_n
: rm
= 1; break;
16034 case neon_cvt_mode_p
: rm
= 2; break;
16035 case neon_cvt_mode_m
: rm
= 3; break;
16036 default: first_error (_("invalid rounding mode")); return;
16039 NEON_ENCODE (FPV8
, inst
);
16040 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
16041 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
16042 inst
.instruction
|= sz
<< 8;
16044 /* ARMv8.2 fp16 VCVT instruction. */
16045 if (flavour
== neon_cvt_flavour_s32_f16
16046 ||flavour
== neon_cvt_flavour_u32_f16
)
16047 do_scalar_fp16_v82_encode ();
16048 inst
.instruction
|= op
<< 7;
16049 inst
.instruction
|= rm
<< 16;
16050 inst
.instruction
|= 0xf0000000;
16051 inst
.is_neon
= TRUE
;
16055 do_neon_cvt_1 (enum neon_cvt_mode mode
)
16057 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
16058 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
16059 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
16061 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16063 if (flavour
== neon_cvt_flavour_invalid
)
16066 /* PR11109: Handle round-to-zero for VCVT conversions. */
16067 if (mode
== neon_cvt_mode_z
16068 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
16069 && (flavour
== neon_cvt_flavour_s16_f16
16070 || flavour
== neon_cvt_flavour_u16_f16
16071 || flavour
== neon_cvt_flavour_s32_f32
16072 || flavour
== neon_cvt_flavour_u32_f32
16073 || flavour
== neon_cvt_flavour_s32_f64
16074 || flavour
== neon_cvt_flavour_u32_f64
)
16075 && (rs
== NS_FD
|| rs
== NS_FF
))
16077 do_vfp_nsyn_cvtz ();
16081 /* ARMv8.2 fp16 VCVT conversions. */
16082 if (mode
== neon_cvt_mode_z
16083 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
16084 && (flavour
== neon_cvt_flavour_s32_f16
16085 || flavour
== neon_cvt_flavour_u32_f16
)
16088 do_vfp_nsyn_cvtz ();
16089 do_scalar_fp16_v82_encode ();
16093 /* VFP rather than Neon conversions. */
16094 if (flavour
>= neon_cvt_flavour_first_fp
)
16096 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16097 do_vfp_nsyn_cvt (rs
, flavour
);
16099 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16110 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16111 0x0000100, 0x1000100, 0x0, 0x1000000};
16113 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16116 /* Fixed-point conversion with #0 immediate is encoded as an
16117 integer conversion. */
16118 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
16120 NEON_ENCODE (IMMED
, inst
);
16121 if (flavour
!= neon_cvt_flavour_invalid
)
16122 inst
.instruction
|= enctab
[flavour
];
16123 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16124 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16125 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16126 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16127 inst
.instruction
|= neon_quad (rs
) << 6;
16128 inst
.instruction
|= 1 << 21;
16129 if (flavour
< neon_cvt_flavour_s16_f16
)
16131 inst
.instruction
|= 1 << 21;
16132 immbits
= 32 - inst
.operands
[2].imm
;
16133 inst
.instruction
|= immbits
<< 16;
16137 inst
.instruction
|= 3 << 20;
16138 immbits
= 16 - inst
.operands
[2].imm
;
16139 inst
.instruction
|= immbits
<< 16;
16140 inst
.instruction
&= ~(1 << 9);
16143 neon_dp_fixup (&inst
);
16149 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
16151 NEON_ENCODE (FLOAT
, inst
);
16152 set_it_insn_type (OUTSIDE_IT_INSN
);
16154 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16157 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16158 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16159 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16160 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16161 inst
.instruction
|= neon_quad (rs
) << 6;
16162 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
16163 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
16164 inst
.instruction
|= mode
<< 8;
16165 if (flavour
== neon_cvt_flavour_u16_f16
16166 || flavour
== neon_cvt_flavour_s16_f16
)
16167 /* Mask off the original size bits and reencode them. */
16168 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
16171 inst
.instruction
|= 0xfc000000;
16173 inst
.instruction
|= 0xf0000000;
16179 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
16180 0x100, 0x180, 0x0, 0x080};
16182 NEON_ENCODE (INTEGER
, inst
);
16184 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16187 if (flavour
!= neon_cvt_flavour_invalid
)
16188 inst
.instruction
|= enctab
[flavour
];
16190 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16191 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16192 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16193 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16194 inst
.instruction
|= neon_quad (rs
) << 6;
16195 if (flavour
>= neon_cvt_flavour_s16_f16
16196 && flavour
<= neon_cvt_flavour_f16_u16
)
16197 /* Half precision. */
16198 inst
.instruction
|= 1 << 18;
16200 inst
.instruction
|= 2 << 18;
16202 neon_dp_fixup (&inst
);
16207 /* Half-precision conversions for Advanced SIMD -- neon. */
16210 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16214 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
16216 as_bad (_("operand size must match register width"));
16221 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
16223 as_bad (_("operand size must match register width"));
16228 inst
.instruction
= 0x3b60600;
16230 inst
.instruction
= 0x3b60700;
16232 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16233 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16234 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16235 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16236 neon_dp_fixup (&inst
);
16240 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16241 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16242 do_vfp_nsyn_cvt (rs
, flavour
);
16244 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16249 do_neon_cvtr (void)
16251 do_neon_cvt_1 (neon_cvt_mode_x
);
16257 do_neon_cvt_1 (neon_cvt_mode_z
);
16261 do_neon_cvta (void)
16263 do_neon_cvt_1 (neon_cvt_mode_a
);
16267 do_neon_cvtn (void)
16269 do_neon_cvt_1 (neon_cvt_mode_n
);
16273 do_neon_cvtp (void)
16275 do_neon_cvt_1 (neon_cvt_mode_p
);
16279 do_neon_cvtm (void)
16281 do_neon_cvt_1 (neon_cvt_mode_m
);
16285 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
16288 mark_feature_used (&fpu_vfp_ext_armv8
);
16290 encode_arm_vfp_reg (inst
.operands
[0].reg
,
16291 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
16292 encode_arm_vfp_reg (inst
.operands
[1].reg
,
16293 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
16294 inst
.instruction
|= to
? 0x10000 : 0;
16295 inst
.instruction
|= t
? 0x80 : 0;
16296 inst
.instruction
|= is_double
? 0x100 : 0;
16297 do_vfp_cond_or_thumb ();
16301 do_neon_cvttb_1 (bfd_boolean t
)
16303 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
16304 NS_DF
, NS_DH
, NS_NULL
);
16308 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
16311 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
16313 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
16316 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
16318 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
16320 /* The VCVTB and VCVTT instructions with D-register operands
16321 don't work for SP only targets. */
16322 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16326 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
16328 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
16330 /* The VCVTB and VCVTT instructions with D-register operands
16331 don't work for SP only targets. */
16332 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16336 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
16343 do_neon_cvtb (void)
16345 do_neon_cvttb_1 (FALSE
);
16350 do_neon_cvtt (void)
16352 do_neon_cvttb_1 (TRUE
);
16356 neon_move_immediate (void)
16358 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
16359 struct neon_type_el et
= neon_check_type (2, rs
,
16360 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
16361 unsigned immlo
, immhi
= 0, immbits
;
16362 int op
, cmode
, float_p
;
16364 constraint (et
.type
== NT_invtype
,
16365 _("operand size must be specified for immediate VMOV"));
16367 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16368 op
= (inst
.instruction
& (1 << 5)) != 0;
16370 immlo
= inst
.operands
[1].imm
;
16371 if (inst
.operands
[1].regisimm
)
16372 immhi
= inst
.operands
[1].reg
;
16374 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
16375 _("immediate has bits set outside the operand size"));
16377 float_p
= inst
.operands
[1].immisfloat
;
16379 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
16380 et
.size
, et
.type
)) == FAIL
)
16382 /* Invert relevant bits only. */
16383 neon_invert_size (&immlo
, &immhi
, et
.size
);
16384 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16385 with one or the other; those cases are caught by
16386 neon_cmode_for_move_imm. */
16388 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
16389 &op
, et
.size
, et
.type
)) == FAIL
)
16391 first_error (_("immediate out of range"));
16396 inst
.instruction
&= ~(1 << 5);
16397 inst
.instruction
|= op
<< 5;
16399 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16400 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16401 inst
.instruction
|= neon_quad (rs
) << 6;
16402 inst
.instruction
|= cmode
<< 8;
16404 neon_write_immbits (immbits
);
16410 if (inst
.operands
[1].isreg
)
16412 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16414 NEON_ENCODE (INTEGER
, inst
);
16415 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16416 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16417 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16418 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16419 inst
.instruction
|= neon_quad (rs
) << 6;
16423 NEON_ENCODE (IMMED
, inst
);
16424 neon_move_immediate ();
16427 neon_dp_fixup (&inst
);
16430 /* Encode instructions of form:
16432 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16433 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16436 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16438 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16439 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16440 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16441 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16442 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16443 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16444 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16445 inst
.instruction
|= neon_logbits (size
) << 20;
16447 neon_dp_fixup (&inst
);
16451 do_neon_dyadic_long (void)
16453 /* FIXME: Type checking for lengthening op. */
16454 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16455 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16456 neon_mixed_length (et
, et
.size
);
16460 do_neon_abal (void)
16462 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16463 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16464 neon_mixed_length (et
, et
.size
);
16468 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16470 if (inst
.operands
[2].isscalar
)
16472 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16473 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16474 NEON_ENCODE (SCALAR
, inst
);
16475 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16479 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16480 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16481 NEON_ENCODE (INTEGER
, inst
);
16482 neon_mixed_length (et
, et
.size
);
16487 do_neon_mac_maybe_scalar_long (void)
16489 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16492 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16493 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16496 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
16498 unsigned regno
= NEON_SCALAR_REG (scalar
);
16499 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
16503 if (regno
> 7 || elno
> 3)
16506 return ((regno
& 0x7)
16507 | ((elno
& 0x1) << 3)
16508 | (((elno
>> 1) & 0x1) << 5));
16512 if (regno
> 15 || elno
> 1)
16515 return (((regno
& 0x1) << 5)
16516 | ((regno
>> 1) & 0x7)
16517 | ((elno
& 0x1) << 3));
16521 first_error (_("scalar out of range for multiply instruction"));
16526 do_neon_fmac_maybe_scalar_long (int subtype
)
16528 enum neon_shape rs
;
16530 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16531 field (bits[21:20]) has different meaning. For scalar index variant, it's
16532 used to differentiate add and subtract, otherwise it's with fixed value
16536 if (inst
.cond
!= COND_ALWAYS
)
16537 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16538 "behaviour is UNPREDICTABLE"));
16540 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
16543 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
16546 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16547 be a scalar index register. */
16548 if (inst
.operands
[2].isscalar
)
16550 high8
= 0xfe000000;
16553 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
16557 high8
= 0xfc000000;
16560 inst
.instruction
|= (0x1 << 23);
16561 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
16564 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
);
16566 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16567 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16568 so we simply pass -1 as size. */
16569 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
16570 neon_three_same (quad_p
, 0, size
);
16572 /* Undo neon_dp_fixup. Redo the high eight bits. */
16573 inst
.instruction
&= 0x00ffffff;
16574 inst
.instruction
|= high8
;
16576 #define LOW1(R) ((R) & 0x1)
16577 #define HI4(R) (((R) >> 1) & 0xf)
16578 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16579 whether the instruction is in Q form and whether Vm is a scalar indexed
16581 if (inst
.operands
[2].isscalar
)
16584 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
16585 inst
.instruction
&= 0xffffffd0;
16586 inst
.instruction
|= rm
;
16590 /* Redo Rn as well. */
16591 inst
.instruction
&= 0xfff0ff7f;
16592 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
16593 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
16598 /* Redo Rn and Rm. */
16599 inst
.instruction
&= 0xfff0ff50;
16600 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
16601 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
16602 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
16603 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
16608 do_neon_vfmal (void)
16610 return do_neon_fmac_maybe_scalar_long (0);
16614 do_neon_vfmsl (void)
16616 return do_neon_fmac_maybe_scalar_long (1);
16620 do_neon_dyadic_wide (void)
16622 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
16623 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16624 neon_mixed_length (et
, et
.size
);
16628 do_neon_dyadic_narrow (void)
16630 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16631 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
16632 /* Operand sign is unimportant, and the U bit is part of the opcode,
16633 so force the operand type to integer. */
16634 et
.type
= NT_integer
;
16635 neon_mixed_length (et
, et
.size
/ 2);
16639 do_neon_mul_sat_scalar_long (void)
16641 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
16645 do_neon_vmull (void)
16647 if (inst
.operands
[2].isscalar
)
16648 do_neon_mac_maybe_scalar_long ();
16651 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16652 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
16654 if (et
.type
== NT_poly
)
16655 NEON_ENCODE (POLY
, inst
);
16657 NEON_ENCODE (INTEGER
, inst
);
16659 /* For polynomial encoding the U bit must be zero, and the size must
16660 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16661 obviously, as 0b10). */
16664 /* Check we're on the correct architecture. */
16665 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
16667 _("Instruction form not available on this architecture.");
16672 neon_mixed_length (et
, et
.size
);
16679 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
16680 struct neon_type_el et
= neon_check_type (3, rs
,
16681 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16682 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
16684 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
16685 _("shift out of range"));
16686 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16687 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16688 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16689 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16690 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16691 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16692 inst
.instruction
|= neon_quad (rs
) << 6;
16693 inst
.instruction
|= imm
<< 8;
16695 neon_dp_fixup (&inst
);
16701 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16702 struct neon_type_el et
= neon_check_type (2, rs
,
16703 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16704 unsigned op
= (inst
.instruction
>> 7) & 3;
16705 /* N (width of reversed regions) is encoded as part of the bitmask. We
16706 extract it here to check the elements to be reversed are smaller.
16707 Otherwise we'd get a reserved instruction. */
16708 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
16709 gas_assert (elsize
!= 0);
16710 constraint (et
.size
>= elsize
,
16711 _("elements must be smaller than reversal region"));
16712 neon_two_same (neon_quad (rs
), 1, et
.size
);
16718 if (inst
.operands
[1].isscalar
)
16720 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16721 struct neon_type_el et
= neon_check_type (2, rs
,
16722 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16723 unsigned sizebits
= et
.size
>> 3;
16724 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16725 int logsize
= neon_logbits (et
.size
);
16726 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16728 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16731 NEON_ENCODE (SCALAR
, inst
);
16732 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16733 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16734 inst
.instruction
|= LOW4 (dm
);
16735 inst
.instruction
|= HI1 (dm
) << 5;
16736 inst
.instruction
|= neon_quad (rs
) << 6;
16737 inst
.instruction
|= x
<< 17;
16738 inst
.instruction
|= sizebits
<< 16;
16740 neon_dp_fixup (&inst
);
16744 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16745 struct neon_type_el et
= neon_check_type (2, rs
,
16746 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16747 /* Duplicate ARM register to lanes of vector. */
16748 NEON_ENCODE (ARMREG
, inst
);
16751 case 8: inst
.instruction
|= 0x400000; break;
16752 case 16: inst
.instruction
|= 0x000020; break;
16753 case 32: inst
.instruction
|= 0x000000; break;
16756 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16757 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16758 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16759 inst
.instruction
|= neon_quad (rs
) << 21;
16760 /* The encoding for this instruction is identical for the ARM and Thumb
16761 variants, except for the condition field. */
16762 do_vfp_cond_or_thumb ();
16766 /* VMOV has particularly many variations. It can be one of:
16767 0. VMOV<c><q> <Qd>, <Qm>
16768 1. VMOV<c><q> <Dd>, <Dm>
16769 (Register operations, which are VORR with Rm = Rn.)
16770 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16771 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16773 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16774 (ARM register to scalar.)
16775 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16776 (Two ARM registers to vector.)
16777 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16778 (Scalar to ARM register.)
16779 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16780 (Vector to two ARM registers.)
16781 8. VMOV.F32 <Sd>, <Sm>
16782 9. VMOV.F64 <Dd>, <Dm>
16783 (VFP register moves.)
16784 10. VMOV.F32 <Sd>, #imm
16785 11. VMOV.F64 <Dd>, #imm
16786 (VFP float immediate load.)
16787 12. VMOV <Rd>, <Sm>
16788 (VFP single to ARM reg.)
16789 13. VMOV <Sd>, <Rm>
16790 (ARM reg to VFP single.)
16791 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16792 (Two ARM regs to two VFP singles.)
16793 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16794 (Two VFP singles to two ARM regs.)
16796 These cases can be disambiguated using neon_select_shape, except cases 1/9
16797 and 3/11 which depend on the operand type too.
16799 All the encoded bits are hardcoded by this function.
16801 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16802 Cases 5, 7 may be used with VFPv2 and above.
16804 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16805 can specify a type where it doesn't make sense to, and is ignored). */
16810 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16811 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16812 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16813 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16814 struct neon_type_el et
;
16815 const char *ldconst
= 0;
16819 case NS_DD
: /* case 1/9. */
16820 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16821 /* It is not an error here if no type is given. */
16823 if (et
.type
== NT_float
&& et
.size
== 64)
16825 do_vfp_nsyn_opcode ("fcpyd");
16828 /* fall through. */
16830 case NS_QQ
: /* case 0/1. */
16832 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16834 /* The architecture manual I have doesn't explicitly state which
16835 value the U bit should have for register->register moves, but
16836 the equivalent VORR instruction has U = 0, so do that. */
16837 inst
.instruction
= 0x0200110;
16838 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16839 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16840 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16841 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16842 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16843 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16844 inst
.instruction
|= neon_quad (rs
) << 6;
16846 neon_dp_fixup (&inst
);
16850 case NS_DI
: /* case 3/11. */
16851 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16853 if (et
.type
== NT_float
&& et
.size
== 64)
16855 /* case 11 (fconstd). */
16856 ldconst
= "fconstd";
16857 goto encode_fconstd
;
16859 /* fall through. */
16861 case NS_QI
: /* case 2/3. */
16862 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16864 inst
.instruction
= 0x0800010;
16865 neon_move_immediate ();
16866 neon_dp_fixup (&inst
);
16869 case NS_SR
: /* case 4. */
16871 unsigned bcdebits
= 0;
16873 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
16874 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
16876 /* .<size> is optional here, defaulting to .32. */
16877 if (inst
.vectype
.elems
== 0
16878 && inst
.operands
[0].vectype
.type
== NT_invtype
16879 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16881 inst
.vectype
.el
[0].type
= NT_untyped
;
16882 inst
.vectype
.el
[0].size
= 32;
16883 inst
.vectype
.elems
= 1;
16886 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16887 logsize
= neon_logbits (et
.size
);
16889 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16891 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16892 && et
.size
!= 32, _(BAD_FPU
));
16893 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16894 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16898 case 8: bcdebits
= 0x8; break;
16899 case 16: bcdebits
= 0x1; break;
16900 case 32: bcdebits
= 0x0; break;
16904 bcdebits
|= x
<< logsize
;
16906 inst
.instruction
= 0xe000b10;
16907 do_vfp_cond_or_thumb ();
16908 inst
.instruction
|= LOW4 (dn
) << 16;
16909 inst
.instruction
|= HI1 (dn
) << 7;
16910 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16911 inst
.instruction
|= (bcdebits
& 3) << 5;
16912 inst
.instruction
|= (bcdebits
>> 2) << 21;
16916 case NS_DRR
: /* case 5 (fmdrr). */
16917 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16920 inst
.instruction
= 0xc400b10;
16921 do_vfp_cond_or_thumb ();
16922 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16923 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16924 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16925 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16928 case NS_RS
: /* case 6. */
16931 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16932 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16933 unsigned abcdebits
= 0;
16935 /* .<dt> is optional here, defaulting to .32. */
16936 if (inst
.vectype
.elems
== 0
16937 && inst
.operands
[0].vectype
.type
== NT_invtype
16938 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16940 inst
.vectype
.el
[0].type
= NT_untyped
;
16941 inst
.vectype
.el
[0].size
= 32;
16942 inst
.vectype
.elems
= 1;
16945 et
= neon_check_type (2, NS_NULL
,
16946 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16947 logsize
= neon_logbits (et
.size
);
16949 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16951 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16952 && et
.size
!= 32, _(BAD_FPU
));
16953 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16954 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16958 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16959 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16960 case 32: abcdebits
= 0x00; break;
16964 abcdebits
|= x
<< logsize
;
16965 inst
.instruction
= 0xe100b10;
16966 do_vfp_cond_or_thumb ();
16967 inst
.instruction
|= LOW4 (dn
) << 16;
16968 inst
.instruction
|= HI1 (dn
) << 7;
16969 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16970 inst
.instruction
|= (abcdebits
& 3) << 5;
16971 inst
.instruction
|= (abcdebits
>> 2) << 21;
16975 case NS_RRD
: /* case 7 (fmrrd). */
16976 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16979 inst
.instruction
= 0xc500b10;
16980 do_vfp_cond_or_thumb ();
16981 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16982 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16983 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16984 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16987 case NS_FF
: /* case 8 (fcpys). */
16988 do_vfp_nsyn_opcode ("fcpys");
16992 case NS_FI
: /* case 10 (fconsts). */
16993 ldconst
= "fconsts";
16995 if (!inst
.operands
[1].immisfloat
)
16998 /* Immediate has to fit in 8 bits so float is enough. */
16999 float imm
= (float) inst
.operands
[1].imm
;
17000 memcpy (&new_imm
, &imm
, sizeof (float));
17001 /* But the assembly may have been written to provide an integer
17002 bit pattern that equates to a float, so check that the
17003 conversion has worked. */
17004 if (is_quarter_float (new_imm
))
17006 if (is_quarter_float (inst
.operands
[1].imm
))
17007 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17009 inst
.operands
[1].imm
= new_imm
;
17010 inst
.operands
[1].immisfloat
= 1;
17014 if (is_quarter_float (inst
.operands
[1].imm
))
17016 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
17017 do_vfp_nsyn_opcode (ldconst
);
17019 /* ARMv8.2 fp16 vmov.f16 instruction. */
17021 do_scalar_fp16_v82_encode ();
17024 first_error (_("immediate out of range"));
17028 case NS_RF
: /* case 12 (fmrs). */
17029 do_vfp_nsyn_opcode ("fmrs");
17030 /* ARMv8.2 fp16 vmov.f16 instruction. */
17032 do_scalar_fp16_v82_encode ();
17036 case NS_FR
: /* case 13 (fmsr). */
17037 do_vfp_nsyn_opcode ("fmsr");
17038 /* ARMv8.2 fp16 vmov.f16 instruction. */
17040 do_scalar_fp16_v82_encode ();
17043 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17044 (one of which is a list), but we have parsed four. Do some fiddling to
17045 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17047 case NS_RRFF
: /* case 14 (fmrrs). */
17048 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
17049 _("VFP registers must be adjacent"));
17050 inst
.operands
[2].imm
= 2;
17051 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17052 do_vfp_nsyn_opcode ("fmrrs");
17055 case NS_FFRR
: /* case 15 (fmsrr). */
17056 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
17057 _("VFP registers must be adjacent"));
17058 inst
.operands
[1] = inst
.operands
[2];
17059 inst
.operands
[2] = inst
.operands
[3];
17060 inst
.operands
[0].imm
= 2;
17061 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17062 do_vfp_nsyn_opcode ("fmsrr");
17066 /* neon_select_shape has determined that the instruction
17067 shape is wrong and has already set the error message. */
17076 do_neon_rshift_round_imm (void)
17078 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17079 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
17080 int imm
= inst
.operands
[2].imm
;
17082 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17085 inst
.operands
[2].present
= 0;
17090 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
17091 _("immediate out of range for shift"));
17092 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
17097 do_neon_movhf (void)
17099 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
17100 constraint (rs
!= NS_HH
, _("invalid suffix"));
17102 if (inst
.cond
!= COND_ALWAYS
)
17106 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17107 " the behaviour is UNPREDICTABLE"));
17111 inst
.error
= BAD_COND
;
17116 do_vfp_sp_monadic ();
17119 inst
.instruction
|= 0xf0000000;
17123 do_neon_movl (void)
17125 struct neon_type_el et
= neon_check_type (2, NS_QD
,
17126 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17127 unsigned sizebits
= et
.size
>> 3;
17128 inst
.instruction
|= sizebits
<< 19;
17129 neon_two_same (0, et
.type
== NT_unsigned
, -1);
17135 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17136 struct neon_type_el et
= neon_check_type (2, rs
,
17137 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17138 NEON_ENCODE (INTEGER
, inst
);
17139 neon_two_same (neon_quad (rs
), 1, et
.size
);
17143 do_neon_zip_uzp (void)
17145 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17146 struct neon_type_el et
= neon_check_type (2, rs
,
17147 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17148 if (rs
== NS_DD
&& et
.size
== 32)
17150 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17151 inst
.instruction
= N_MNEM_vtrn
;
17155 neon_two_same (neon_quad (rs
), 1, et
.size
);
17159 do_neon_sat_abs_neg (void)
17161 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17162 struct neon_type_el et
= neon_check_type (2, rs
,
17163 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17164 neon_two_same (neon_quad (rs
), 1, et
.size
);
17168 do_neon_pair_long (void)
17170 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17171 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
17172 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17173 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
17174 neon_two_same (neon_quad (rs
), 1, et
.size
);
17178 do_neon_recip_est (void)
17180 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17181 struct neon_type_el et
= neon_check_type (2, rs
,
17182 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
17183 inst
.instruction
|= (et
.type
== NT_float
) << 8;
17184 neon_two_same (neon_quad (rs
), 1, et
.size
);
17190 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17191 struct neon_type_el et
= neon_check_type (2, rs
,
17192 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17193 neon_two_same (neon_quad (rs
), 1, et
.size
);
17199 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17200 struct neon_type_el et
= neon_check_type (2, rs
,
17201 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
17202 neon_two_same (neon_quad (rs
), 1, et
.size
);
17208 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17209 struct neon_type_el et
= neon_check_type (2, rs
,
17210 N_EQK
| N_INT
, N_8
| N_KEY
);
17211 neon_two_same (neon_quad (rs
), 1, et
.size
);
17217 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17218 neon_two_same (neon_quad (rs
), 1, -1);
17222 do_neon_tbl_tbx (void)
17224 unsigned listlenbits
;
17225 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
17227 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
17229 first_error (_("bad list length for table lookup"));
17233 listlenbits
= inst
.operands
[1].imm
- 1;
17234 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17235 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17236 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17237 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17238 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17239 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17240 inst
.instruction
|= listlenbits
<< 8;
17242 neon_dp_fixup (&inst
);
17246 do_neon_ldm_stm (void)
17248 /* P, U and L bits are part of bitmask. */
17249 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
17250 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
17252 if (inst
.operands
[1].issingle
)
17254 do_vfp_nsyn_ldm_stm (is_dbmode
);
17258 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
17259 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17261 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
17262 _("register list must contain at least 1 and at most 16 "
17265 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
17266 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
17267 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17268 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
17270 inst
.instruction
|= offsetbits
;
17272 do_vfp_cond_or_thumb ();
17276 do_neon_ldr_str (void)
17278 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
17280 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17281 And is UNPREDICTABLE in thumb mode. */
17283 && inst
.operands
[1].reg
== REG_PC
17284 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
17287 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17288 else if (warn_on_deprecated
)
17289 as_tsktsk (_("Use of PC here is deprecated"));
17292 if (inst
.operands
[0].issingle
)
17295 do_vfp_nsyn_opcode ("flds");
17297 do_vfp_nsyn_opcode ("fsts");
17299 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17300 if (inst
.vectype
.el
[0].size
== 16)
17301 do_scalar_fp16_v82_encode ();
17306 do_vfp_nsyn_opcode ("fldd");
17308 do_vfp_nsyn_opcode ("fstd");
17312 /* "interleave" version also handles non-interleaving register VLD1/VST1
17316 do_neon_ld_st_interleave (void)
17318 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
17319 N_8
| N_16
| N_32
| N_64
);
17320 unsigned alignbits
= 0;
17322 /* The bits in this table go:
17323 0: register stride of one (0) or two (1)
17324 1,2: register list length, minus one (1, 2, 3, 4).
17325 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17326 We use -1 for invalid entries. */
17327 const int typetable
[] =
17329 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17330 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17331 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17332 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17336 if (et
.type
== NT_invtype
)
17339 if (inst
.operands
[1].immisalign
)
17340 switch (inst
.operands
[1].imm
>> 8)
17342 case 64: alignbits
= 1; break;
17344 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
17345 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17346 goto bad_alignment
;
17350 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17351 goto bad_alignment
;
17356 first_error (_("bad alignment"));
17360 inst
.instruction
|= alignbits
<< 4;
17361 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17363 /* Bits [4:6] of the immediate in a list specifier encode register stride
17364 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17365 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17366 up the right value for "type" in a table based on this value and the given
17367 list style, then stick it back. */
17368 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
17369 | (((inst
.instruction
>> 8) & 3) << 3);
17371 typebits
= typetable
[idx
];
17373 constraint (typebits
== -1, _("bad list type for instruction"));
17374 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
17375 _("bad element type for instruction"));
17377 inst
.instruction
&= ~0xf00;
17378 inst
.instruction
|= typebits
<< 8;
17381 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17382 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17383 otherwise. The variable arguments are a list of pairs of legal (size, align)
17384 values, terminated with -1. */
17387 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
17390 int result
= FAIL
, thissize
, thisalign
;
17392 if (!inst
.operands
[1].immisalign
)
17398 va_start (ap
, do_alignment
);
17402 thissize
= va_arg (ap
, int);
17403 if (thissize
== -1)
17405 thisalign
= va_arg (ap
, int);
17407 if (size
== thissize
&& align
== thisalign
)
17410 while (result
!= SUCCESS
);
17414 if (result
== SUCCESS
)
17417 first_error (_("unsupported alignment for instruction"));
17423 do_neon_ld_st_lane (void)
17425 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17426 int align_good
, do_alignment
= 0;
17427 int logsize
= neon_logbits (et
.size
);
17428 int align
= inst
.operands
[1].imm
>> 8;
17429 int n
= (inst
.instruction
>> 8) & 3;
17430 int max_el
= 64 / et
.size
;
17432 if (et
.type
== NT_invtype
)
17435 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
17436 _("bad list length"));
17437 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
17438 _("scalar index out of range"));
17439 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
17441 _("stride of 2 unavailable when element size is 8"));
17445 case 0: /* VLD1 / VST1. */
17446 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
17448 if (align_good
== FAIL
)
17452 unsigned alignbits
= 0;
17455 case 16: alignbits
= 0x1; break;
17456 case 32: alignbits
= 0x3; break;
17459 inst
.instruction
|= alignbits
<< 4;
17463 case 1: /* VLD2 / VST2. */
17464 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
17465 16, 32, 32, 64, -1);
17466 if (align_good
== FAIL
)
17469 inst
.instruction
|= 1 << 4;
17472 case 2: /* VLD3 / VST3. */
17473 constraint (inst
.operands
[1].immisalign
,
17474 _("can't use alignment with this instruction"));
17477 case 3: /* VLD4 / VST4. */
17478 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17479 16, 64, 32, 64, 32, 128, -1);
17480 if (align_good
== FAIL
)
17484 unsigned alignbits
= 0;
17487 case 8: alignbits
= 0x1; break;
17488 case 16: alignbits
= 0x1; break;
17489 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
17492 inst
.instruction
|= alignbits
<< 4;
17499 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17500 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17501 inst
.instruction
|= 1 << (4 + logsize
);
17503 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
17504 inst
.instruction
|= logsize
<< 10;
17507 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17510 do_neon_ld_dup (void)
17512 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17513 int align_good
, do_alignment
= 0;
17515 if (et
.type
== NT_invtype
)
17518 switch ((inst
.instruction
>> 8) & 3)
17520 case 0: /* VLD1. */
17521 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
17522 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17523 &do_alignment
, 16, 16, 32, 32, -1);
17524 if (align_good
== FAIL
)
17526 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
17529 case 2: inst
.instruction
|= 1 << 5; break;
17530 default: first_error (_("bad list length")); return;
17532 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17535 case 1: /* VLD2. */
17536 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17537 &do_alignment
, 8, 16, 16, 32, 32, 64,
17539 if (align_good
== FAIL
)
17541 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
17542 _("bad list length"));
17543 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17544 inst
.instruction
|= 1 << 5;
17545 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17548 case 2: /* VLD3. */
17549 constraint (inst
.operands
[1].immisalign
,
17550 _("can't use alignment with this instruction"));
17551 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
17552 _("bad list length"));
17553 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17554 inst
.instruction
|= 1 << 5;
17555 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17558 case 3: /* VLD4. */
17560 int align
= inst
.operands
[1].imm
>> 8;
17561 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17562 16, 64, 32, 64, 32, 128, -1);
17563 if (align_good
== FAIL
)
17565 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
17566 _("bad list length"));
17567 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17568 inst
.instruction
|= 1 << 5;
17569 if (et
.size
== 32 && align
== 128)
17570 inst
.instruction
|= 0x3 << 6;
17572 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17579 inst
.instruction
|= do_alignment
<< 4;
17582 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17583 apart from bits [11:4]. */
17586 do_neon_ldx_stx (void)
17588 if (inst
.operands
[1].isreg
)
17589 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
17591 switch (NEON_LANE (inst
.operands
[0].imm
))
17593 case NEON_INTERLEAVE_LANES
:
17594 NEON_ENCODE (INTERLV
, inst
);
17595 do_neon_ld_st_interleave ();
17598 case NEON_ALL_LANES
:
17599 NEON_ENCODE (DUP
, inst
);
17600 if (inst
.instruction
== N_INV
)
17602 first_error ("only loads support such operands");
17609 NEON_ENCODE (LANE
, inst
);
17610 do_neon_ld_st_lane ();
17613 /* L bit comes from bit mask. */
17614 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17615 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17616 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17618 if (inst
.operands
[1].postind
)
17620 int postreg
= inst
.operands
[1].imm
& 0xf;
17621 constraint (!inst
.operands
[1].immisreg
,
17622 _("post-index must be a register"));
17623 constraint (postreg
== 0xd || postreg
== 0xf,
17624 _("bad register for post-index"));
17625 inst
.instruction
|= postreg
;
17629 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17630 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
17631 || inst
.relocs
[0].exp
.X_add_number
!= 0,
17634 if (inst
.operands
[1].writeback
)
17636 inst
.instruction
|= 0xd;
17639 inst
.instruction
|= 0xf;
17643 inst
.instruction
|= 0xf9000000;
17645 inst
.instruction
|= 0xf4000000;
17650 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
17652 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17653 D register operands. */
17654 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17655 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17658 NEON_ENCODE (FPV8
, inst
);
17660 if (rs
== NS_FFF
|| rs
== NS_HHH
)
17662 do_vfp_sp_dyadic ();
17664 /* ARMv8.2 fp16 instruction. */
17666 do_scalar_fp16_v82_encode ();
17669 do_vfp_dp_rd_rn_rm ();
17672 inst
.instruction
|= 0x100;
17674 inst
.instruction
|= 0xf0000000;
17680 set_it_insn_type (OUTSIDE_IT_INSN
);
17682 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
17683 first_error (_("invalid instruction shape"));
17689 set_it_insn_type (OUTSIDE_IT_INSN
);
17691 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
17694 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17697 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
17701 do_vrint_1 (enum neon_cvt_mode mode
)
17703 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
17704 struct neon_type_el et
;
17709 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17710 D register operands. */
17711 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17712 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17715 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
17717 if (et
.type
!= NT_invtype
)
17719 /* VFP encodings. */
17720 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17721 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
17722 set_it_insn_type (OUTSIDE_IT_INSN
);
17724 NEON_ENCODE (FPV8
, inst
);
17725 if (rs
== NS_FF
|| rs
== NS_HH
)
17726 do_vfp_sp_monadic ();
17728 do_vfp_dp_rd_rm ();
17732 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
17733 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
17734 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
17735 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
17736 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
17737 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
17738 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
17742 inst
.instruction
|= (rs
== NS_DD
) << 8;
17743 do_vfp_cond_or_thumb ();
17745 /* ARMv8.2 fp16 vrint instruction. */
17747 do_scalar_fp16_v82_encode ();
17751 /* Neon encodings (or something broken...). */
17753 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
17755 if (et
.type
== NT_invtype
)
17758 set_it_insn_type (OUTSIDE_IT_INSN
);
17759 NEON_ENCODE (FLOAT
, inst
);
17761 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17764 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17765 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17766 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17767 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17768 inst
.instruction
|= neon_quad (rs
) << 6;
17769 /* Mask off the original size bits and reencode them. */
17770 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
17771 | neon_logbits (et
.size
) << 18);
17775 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17776 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17777 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17778 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17779 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17780 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17781 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17786 inst
.instruction
|= 0xfc000000;
17788 inst
.instruction
|= 0xf0000000;
17795 do_vrint_1 (neon_cvt_mode_x
);
17801 do_vrint_1 (neon_cvt_mode_z
);
17807 do_vrint_1 (neon_cvt_mode_r
);
17813 do_vrint_1 (neon_cvt_mode_a
);
17819 do_vrint_1 (neon_cvt_mode_n
);
17825 do_vrint_1 (neon_cvt_mode_p
);
17831 do_vrint_1 (neon_cvt_mode_m
);
17835 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
17837 unsigned regno
= NEON_SCALAR_REG (opnd
);
17838 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
17840 if (elsize
== 16 && elno
< 2 && regno
< 16)
17841 return regno
| (elno
<< 4);
17842 else if (elsize
== 32 && elno
== 0)
17845 first_error (_("scalar out of range"));
17852 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17854 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
17855 _("expression too complex"));
17856 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
17857 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
17858 _("immediate out of range"));
17860 if (inst
.operands
[2].isscalar
)
17862 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
17863 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17864 N_KEY
| N_F16
| N_F32
).size
;
17865 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
17867 inst
.instruction
= 0xfe000800;
17868 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17869 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17870 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17871 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17872 inst
.instruction
|= LOW4 (m
);
17873 inst
.instruction
|= HI1 (m
) << 5;
17874 inst
.instruction
|= neon_quad (rs
) << 6;
17875 inst
.instruction
|= rot
<< 20;
17876 inst
.instruction
|= (size
== 32) << 23;
17880 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17881 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17882 N_KEY
| N_F16
| N_F32
).size
;
17883 neon_three_same (neon_quad (rs
), 0, -1);
17884 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
17885 inst
.instruction
|= 0xfc200800;
17886 inst
.instruction
|= rot
<< 23;
17887 inst
.instruction
|= (size
== 32) << 20;
17894 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17896 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
17897 _("expression too complex"));
17898 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
17899 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
17900 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17901 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17902 N_KEY
| N_F16
| N_F32
).size
;
17903 neon_three_same (neon_quad (rs
), 0, -1);
17904 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
17905 inst
.instruction
|= 0xfc800800;
17906 inst
.instruction
|= (rot
== 270) << 24;
17907 inst
.instruction
|= (size
== 32) << 20;
17910 /* Dot Product instructions encoding support. */
17913 do_neon_dotproduct (int unsigned_p
)
17915 enum neon_shape rs
;
17916 unsigned scalar_oprd2
= 0;
17919 if (inst
.cond
!= COND_ALWAYS
)
17920 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
17921 "is UNPREDICTABLE"));
17923 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17926 /* Dot Product instructions are in three-same D/Q register format or the third
17927 operand can be a scalar index register. */
17928 if (inst
.operands
[2].isscalar
)
17930 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
17931 high8
= 0xfe000000;
17932 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
17936 high8
= 0xfc000000;
17937 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
17941 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
17943 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
17945 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
17946 Product instruction, so we pass 0 as the "ubit" parameter. And the
17947 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
17948 neon_three_same (neon_quad (rs
), 0, 32);
17950 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
17951 different NEON three-same encoding. */
17952 inst
.instruction
&= 0x00ffffff;
17953 inst
.instruction
|= high8
;
17954 /* Encode 'U' bit which indicates signedness. */
17955 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
17956 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
17957 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
17958 the instruction encoding. */
17959 if (inst
.operands
[2].isscalar
)
17961 inst
.instruction
&= 0xffffffd0;
17962 inst
.instruction
|= LOW4 (scalar_oprd2
);
17963 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
17967 /* Dot Product instructions for signed integer. */
17970 do_neon_dotproduct_s (void)
17972 return do_neon_dotproduct (0);
17975 /* Dot Product instructions for unsigned integer. */
17978 do_neon_dotproduct_u (void)
17980 return do_neon_dotproduct (1);
17983 /* Crypto v1 instructions. */
17985 do_crypto_2op_1 (unsigned elttype
, int op
)
17987 set_it_insn_type (OUTSIDE_IT_INSN
);
17989 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
17995 NEON_ENCODE (INTEGER
, inst
);
17996 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17997 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17998 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17999 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18001 inst
.instruction
|= op
<< 6;
18004 inst
.instruction
|= 0xfc000000;
18006 inst
.instruction
|= 0xf0000000;
18010 do_crypto_3op_1 (int u
, int op
)
18012 set_it_insn_type (OUTSIDE_IT_INSN
);
18014 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
18015 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
18020 NEON_ENCODE (INTEGER
, inst
);
18021 neon_three_same (1, u
, 8 << op
);
18027 do_crypto_2op_1 (N_8
, 0);
18033 do_crypto_2op_1 (N_8
, 1);
18039 do_crypto_2op_1 (N_8
, 2);
18045 do_crypto_2op_1 (N_8
, 3);
18051 do_crypto_3op_1 (0, 0);
18057 do_crypto_3op_1 (0, 1);
18063 do_crypto_3op_1 (0, 2);
18069 do_crypto_3op_1 (0, 3);
18075 do_crypto_3op_1 (1, 0);
18081 do_crypto_3op_1 (1, 1);
18085 do_sha256su1 (void)
18087 do_crypto_3op_1 (1, 2);
18093 do_crypto_2op_1 (N_32
, -1);
18099 do_crypto_2op_1 (N_32
, 0);
18103 do_sha256su0 (void)
18105 do_crypto_2op_1 (N_32
, 1);
18109 do_crc32_1 (unsigned int poly
, unsigned int sz
)
18111 unsigned int Rd
= inst
.operands
[0].reg
;
18112 unsigned int Rn
= inst
.operands
[1].reg
;
18113 unsigned int Rm
= inst
.operands
[2].reg
;
18115 set_it_insn_type (OUTSIDE_IT_INSN
);
18116 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
18117 inst
.instruction
|= LOW4 (Rn
) << 16;
18118 inst
.instruction
|= LOW4 (Rm
);
18119 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
18120 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
18122 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
18123 as_warn (UNPRED_REG ("r15"));
18165 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18167 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
18168 do_vfp_sp_dp_cvt ();
18169 do_vfp_cond_or_thumb ();
18173 /* Overall per-instruction processing. */
18175 /* We need to be able to fix up arbitrary expressions in some statements.
18176 This is so that we can handle symbols that are an arbitrary distance from
18177 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18178 which returns part of an address in a form which will be valid for
18179 a data instruction. We do this by pushing the expression into a symbol
18180 in the expr_section, and creating a fix for that. */
18183 fix_new_arm (fragS
* frag
,
18197 /* Create an absolute valued symbol, so we have something to
18198 refer to in the object file. Unfortunately for us, gas's
18199 generic expression parsing will already have folded out
18200 any use of .set foo/.type foo %function that may have
18201 been used to set type information of the target location,
18202 that's being specified symbolically. We have to presume
18203 the user knows what they are doing. */
18207 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
18209 symbol
= symbol_find_or_make (name
);
18210 S_SET_SEGMENT (symbol
, absolute_section
);
18211 symbol_set_frag (symbol
, &zero_address_frag
);
18212 S_SET_VALUE (symbol
, exp
->X_add_number
);
18213 exp
->X_op
= O_symbol
;
18214 exp
->X_add_symbol
= symbol
;
18215 exp
->X_add_number
= 0;
18221 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
18222 (enum bfd_reloc_code_real
) reloc
);
18226 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
18227 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
18231 /* Mark whether the fix is to a THUMB instruction, or an ARM
18233 new_fix
->tc_fix_data
= thumb_mode
;
18236 /* Create a frg for an instruction requiring relaxation. */
18238 output_relax_insn (void)
18244 /* The size of the instruction is unknown, so tie the debug info to the
18245 start of the instruction. */
18246 dwarf2_emit_insn (0);
18248 switch (inst
.relocs
[0].exp
.X_op
)
18251 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
18252 offset
= inst
.relocs
[0].exp
.X_add_number
;
18256 offset
= inst
.relocs
[0].exp
.X_add_number
;
18259 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
18263 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
18264 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
18265 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
18268 /* Write a 32-bit thumb instruction to buf. */
18270 put_thumb32_insn (char * buf
, unsigned long insn
)
18272 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
18273 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
18277 output_inst (const char * str
)
18283 as_bad ("%s -- `%s'", inst
.error
, str
);
18288 output_relax_insn ();
18291 if (inst
.size
== 0)
18294 to
= frag_more (inst
.size
);
18295 /* PR 9814: Record the thumb mode into the current frag so that we know
18296 what type of NOP padding to use, if necessary. We override any previous
18297 setting so that if the mode has changed then the NOPS that we use will
18298 match the encoding of the last instruction in the frag. */
18299 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18301 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
18303 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
18304 put_thumb32_insn (to
, inst
.instruction
);
18306 else if (inst
.size
> INSN_SIZE
)
18308 gas_assert (inst
.size
== (2 * INSN_SIZE
));
18309 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
18310 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
18313 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
18316 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
18318 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
18319 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
18320 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
18321 inst
.relocs
[r
].type
);
18324 dwarf2_emit_insn (inst
.size
);
18328 output_it_inst (int cond
, int mask
, char * to
)
18330 unsigned long instruction
= 0xbf00;
18333 instruction
|= mask
;
18334 instruction
|= cond
<< 4;
18338 to
= frag_more (2);
18340 dwarf2_emit_insn (2);
18344 md_number_to_chars (to
, instruction
, 2);
18349 /* Tag values used in struct asm_opcode's tag field. */
18352 OT_unconditional
, /* Instruction cannot be conditionalized.
18353 The ARM condition field is still 0xE. */
18354 OT_unconditionalF
, /* Instruction cannot be conditionalized
18355 and carries 0xF in its ARM condition field. */
18356 OT_csuffix
, /* Instruction takes a conditional suffix. */
18357 OT_csuffixF
, /* Some forms of the instruction take a conditional
18358 suffix, others place 0xF where the condition field
18360 OT_cinfix3
, /* Instruction takes a conditional infix,
18361 beginning at character index 3. (In
18362 unified mode, it becomes a suffix.) */
18363 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
18364 tsts, cmps, cmns, and teqs. */
18365 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
18366 character index 3, even in unified mode. Used for
18367 legacy instructions where suffix and infix forms
18368 may be ambiguous. */
18369 OT_csuf_or_in3
, /* Instruction takes either a conditional
18370 suffix or an infix at character index 3. */
18371 OT_odd_infix_unc
, /* This is the unconditional variant of an
18372 instruction that takes a conditional infix
18373 at an unusual position. In unified mode,
18374 this variant will accept a suffix. */
18375 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
18376 are the conditional variants of instructions that
18377 take conditional infixes in unusual positions.
18378 The infix appears at character index
18379 (tag - OT_odd_infix_0). These are not accepted
18380 in unified mode. */
18383 /* Subroutine of md_assemble, responsible for looking up the primary
18384 opcode from the mnemonic the user wrote. STR points to the
18385 beginning of the mnemonic.
18387 This is not simply a hash table lookup, because of conditional
18388 variants. Most instructions have conditional variants, which are
18389 expressed with a _conditional affix_ to the mnemonic. If we were
18390 to encode each conditional variant as a literal string in the opcode
18391 table, it would have approximately 20,000 entries.
18393 Most mnemonics take this affix as a suffix, and in unified syntax,
18394 'most' is upgraded to 'all'. However, in the divided syntax, some
18395 instructions take the affix as an infix, notably the s-variants of
18396 the arithmetic instructions. Of those instructions, all but six
18397 have the infix appear after the third character of the mnemonic.
18399 Accordingly, the algorithm for looking up primary opcodes given
18402 1. Look up the identifier in the opcode table.
18403 If we find a match, go to step U.
18405 2. Look up the last two characters of the identifier in the
18406 conditions table. If we find a match, look up the first N-2
18407 characters of the identifier in the opcode table. If we
18408 find a match, go to step CE.
18410 3. Look up the fourth and fifth characters of the identifier in
18411 the conditions table. If we find a match, extract those
18412 characters from the identifier, and look up the remaining
18413 characters in the opcode table. If we find a match, go
18418 U. Examine the tag field of the opcode structure, in case this is
18419 one of the six instructions with its conditional infix in an
18420 unusual place. If it is, the tag tells us where to find the
18421 infix; look it up in the conditions table and set inst.cond
18422 accordingly. Otherwise, this is an unconditional instruction.
18423 Again set inst.cond accordingly. Return the opcode structure.
18425 CE. Examine the tag field to make sure this is an instruction that
18426 should receive a conditional suffix. If it is not, fail.
18427 Otherwise, set inst.cond from the suffix we already looked up,
18428 and return the opcode structure.
18430 CM. Examine the tag field to make sure this is an instruction that
18431 should receive a conditional infix after the third character.
18432 If it is not, fail. Otherwise, undo the edits to the current
18433 line of input and proceed as for case CE. */
18435 static const struct asm_opcode
*
18436 opcode_lookup (char **str
)
18440 const struct asm_opcode
*opcode
;
18441 const struct asm_cond
*cond
;
18444 /* Scan up to the end of the mnemonic, which must end in white space,
18445 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18446 for (base
= end
= *str
; *end
!= '\0'; end
++)
18447 if (*end
== ' ' || *end
== '.')
18453 /* Handle a possible width suffix and/or Neon type suffix. */
18458 /* The .w and .n suffixes are only valid if the unified syntax is in
18460 if (unified_syntax
&& end
[1] == 'w')
18462 else if (unified_syntax
&& end
[1] == 'n')
18467 inst
.vectype
.elems
= 0;
18469 *str
= end
+ offset
;
18471 if (end
[offset
] == '.')
18473 /* See if we have a Neon type suffix (possible in either unified or
18474 non-unified ARM syntax mode). */
18475 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
18478 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
18484 /* Look for unaffixed or special-case affixed mnemonic. */
18485 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18490 if (opcode
->tag
< OT_odd_infix_0
)
18492 inst
.cond
= COND_ALWAYS
;
18496 if (warn_on_deprecated
&& unified_syntax
)
18497 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18498 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
18499 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18502 inst
.cond
= cond
->value
;
18506 /* Cannot have a conditional suffix on a mnemonic of less than two
18508 if (end
- base
< 3)
18511 /* Look for suffixed mnemonic. */
18513 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18514 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18516 if (opcode
&& cond
)
18519 switch (opcode
->tag
)
18521 case OT_cinfix3_legacy
:
18522 /* Ignore conditional suffixes matched on infix only mnemonics. */
18526 case OT_cinfix3_deprecated
:
18527 case OT_odd_infix_unc
:
18528 if (!unified_syntax
)
18530 /* Fall through. */
18534 case OT_csuf_or_in3
:
18535 inst
.cond
= cond
->value
;
18538 case OT_unconditional
:
18539 case OT_unconditionalF
:
18541 inst
.cond
= cond
->value
;
18544 /* Delayed diagnostic. */
18545 inst
.error
= BAD_COND
;
18546 inst
.cond
= COND_ALWAYS
;
18555 /* Cannot have a usual-position infix on a mnemonic of less than
18556 six characters (five would be a suffix). */
18557 if (end
- base
< 6)
18560 /* Look for infixed mnemonic in the usual position. */
18562 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18566 memcpy (save
, affix
, 2);
18567 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
18568 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18570 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
18571 memcpy (affix
, save
, 2);
18574 && (opcode
->tag
== OT_cinfix3
18575 || opcode
->tag
== OT_cinfix3_deprecated
18576 || opcode
->tag
== OT_csuf_or_in3
18577 || opcode
->tag
== OT_cinfix3_legacy
))
18580 if (warn_on_deprecated
&& unified_syntax
18581 && (opcode
->tag
== OT_cinfix3
18582 || opcode
->tag
== OT_cinfix3_deprecated
))
18583 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18585 inst
.cond
= cond
->value
;
18592 /* This function generates an initial IT instruction, leaving its block
18593 virtually open for the new instructions. Eventually,
18594 the mask will be updated by now_it_add_mask () each time
18595 a new instruction needs to be included in the IT block.
18596 Finally, the block is closed with close_automatic_it_block ().
18597 The block closure can be requested either from md_assemble (),
18598 a tencode (), or due to a label hook. */
18601 new_automatic_it_block (int cond
)
18603 now_it
.state
= AUTOMATIC_IT_BLOCK
;
18604 now_it
.mask
= 0x18;
18606 now_it
.block_length
= 1;
18607 mapping_state (MAP_THUMB
);
18608 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
18609 now_it
.warn_deprecated
= FALSE
;
18610 now_it
.insn_cond
= TRUE
;
18613 /* Close an automatic IT block.
18614 See comments in new_automatic_it_block (). */
18617 close_automatic_it_block (void)
18619 now_it
.mask
= 0x10;
18620 now_it
.block_length
= 0;
18623 /* Update the mask of the current automatically-generated IT
18624 instruction. See comments in new_automatic_it_block (). */
18627 now_it_add_mask (int cond
)
18629 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18630 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18631 | ((bitvalue) << (nbit)))
18632 const int resulting_bit
= (cond
& 1);
18634 now_it
.mask
&= 0xf;
18635 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18637 (5 - now_it
.block_length
));
18638 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18640 ((5 - now_it
.block_length
) - 1) );
18641 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
18644 #undef SET_BIT_VALUE
18647 /* The IT blocks handling machinery is accessed through the these functions:
18648 it_fsm_pre_encode () from md_assemble ()
18649 set_it_insn_type () optional, from the tencode functions
18650 set_it_insn_type_last () ditto
18651 in_it_block () ditto
18652 it_fsm_post_encode () from md_assemble ()
18653 force_automatic_it_block_close () from label handling functions
18656 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18657 initializing the IT insn type with a generic initial value depending
18658 on the inst.condition.
18659 2) During the tencode function, two things may happen:
18660 a) The tencode function overrides the IT insn type by
18661 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18662 b) The tencode function queries the IT block state by
18663 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18665 Both set_it_insn_type and in_it_block run the internal FSM state
18666 handling function (handle_it_state), because: a) setting the IT insn
18667 type may incur in an invalid state (exiting the function),
18668 and b) querying the state requires the FSM to be updated.
18669 Specifically we want to avoid creating an IT block for conditional
18670 branches, so it_fsm_pre_encode is actually a guess and we can't
18671 determine whether an IT block is required until the tencode () routine
18672 has decided what type of instruction this actually it.
18673 Because of this, if set_it_insn_type and in_it_block have to be used,
18674 set_it_insn_type has to be called first.
18676 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18677 determines the insn IT type depending on the inst.cond code.
18678 When a tencode () routine encodes an instruction that can be
18679 either outside an IT block, or, in the case of being inside, has to be
18680 the last one, set_it_insn_type_last () will determine the proper
18681 IT instruction type based on the inst.cond code. Otherwise,
18682 set_it_insn_type can be called for overriding that logic or
18683 for covering other cases.
18685 Calling handle_it_state () may not transition the IT block state to
18686 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18687 still queried. Instead, if the FSM determines that the state should
18688 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18689 after the tencode () function: that's what it_fsm_post_encode () does.
18691 Since in_it_block () calls the state handling function to get an
18692 updated state, an error may occur (due to invalid insns combination).
18693 In that case, inst.error is set.
18694 Therefore, inst.error has to be checked after the execution of
18695 the tencode () routine.
18697 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18698 any pending state change (if any) that didn't take place in
18699 handle_it_state () as explained above. */
18702 it_fsm_pre_encode (void)
18704 if (inst
.cond
!= COND_ALWAYS
)
18705 inst
.it_insn_type
= INSIDE_IT_INSN
;
18707 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
18709 now_it
.state_handled
= 0;
18712 /* IT state FSM handling function. */
18715 handle_it_state (void)
18717 now_it
.state_handled
= 1;
18718 now_it
.insn_cond
= FALSE
;
18720 switch (now_it
.state
)
18722 case OUTSIDE_IT_BLOCK
:
18723 switch (inst
.it_insn_type
)
18725 case OUTSIDE_IT_INSN
:
18728 case INSIDE_IT_INSN
:
18729 case INSIDE_IT_LAST_INSN
:
18730 if (thumb_mode
== 0)
18733 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
18734 as_tsktsk (_("Warning: conditional outside an IT block"\
18739 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
18740 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
18742 /* Automatically generate the IT instruction. */
18743 new_automatic_it_block (inst
.cond
);
18744 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
18745 close_automatic_it_block ();
18749 inst
.error
= BAD_OUT_IT
;
18755 case IF_INSIDE_IT_LAST_INSN
:
18756 case NEUTRAL_IT_INSN
:
18760 now_it
.state
= MANUAL_IT_BLOCK
;
18761 now_it
.block_length
= 0;
18766 case AUTOMATIC_IT_BLOCK
:
18767 /* Three things may happen now:
18768 a) We should increment current it block size;
18769 b) We should close current it block (closing insn or 4 insns);
18770 c) We should close current it block and start a new one (due
18771 to incompatible conditions or
18772 4 insns-length block reached). */
18774 switch (inst
.it_insn_type
)
18776 case OUTSIDE_IT_INSN
:
18777 /* The closure of the block shall happen immediately,
18778 so any in_it_block () call reports the block as closed. */
18779 force_automatic_it_block_close ();
18782 case INSIDE_IT_INSN
:
18783 case INSIDE_IT_LAST_INSN
:
18784 case IF_INSIDE_IT_LAST_INSN
:
18785 now_it
.block_length
++;
18787 if (now_it
.block_length
> 4
18788 || !now_it_compatible (inst
.cond
))
18790 force_automatic_it_block_close ();
18791 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
18792 new_automatic_it_block (inst
.cond
);
18796 now_it
.insn_cond
= TRUE
;
18797 now_it_add_mask (inst
.cond
);
18800 if (now_it
.state
== AUTOMATIC_IT_BLOCK
18801 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
18802 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
18803 close_automatic_it_block ();
18806 case NEUTRAL_IT_INSN
:
18807 now_it
.block_length
++;
18808 now_it
.insn_cond
= TRUE
;
18810 if (now_it
.block_length
> 4)
18811 force_automatic_it_block_close ();
18813 now_it_add_mask (now_it
.cc
& 1);
18817 close_automatic_it_block ();
18818 now_it
.state
= MANUAL_IT_BLOCK
;
18823 case MANUAL_IT_BLOCK
:
18825 /* Check conditional suffixes. */
18826 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
18829 now_it
.mask
&= 0x1f;
18830 is_last
= (now_it
.mask
== 0x10);
18831 now_it
.insn_cond
= TRUE
;
18833 switch (inst
.it_insn_type
)
18835 case OUTSIDE_IT_INSN
:
18836 inst
.error
= BAD_NOT_IT
;
18839 case INSIDE_IT_INSN
:
18840 if (cond
!= inst
.cond
)
18842 inst
.error
= BAD_IT_COND
;
18847 case INSIDE_IT_LAST_INSN
:
18848 case IF_INSIDE_IT_LAST_INSN
:
18849 if (cond
!= inst
.cond
)
18851 inst
.error
= BAD_IT_COND
;
18856 inst
.error
= BAD_BRANCH
;
18861 case NEUTRAL_IT_INSN
:
18862 /* The BKPT instruction is unconditional even in an IT block. */
18866 inst
.error
= BAD_IT_IT
;
18876 struct depr_insn_mask
18878 unsigned long pattern
;
18879 unsigned long mask
;
18880 const char* description
;
18883 /* List of 16-bit instruction patterns deprecated in an IT block in
18885 static const struct depr_insn_mask depr_it_insns
[] = {
18886 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18887 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18888 { 0xa000, 0xb800, N_("ADR") },
18889 { 0x4800, 0xf800, N_("Literal loads") },
18890 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18891 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18892 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18893 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18894 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18899 it_fsm_post_encode (void)
18903 if (!now_it
.state_handled
)
18904 handle_it_state ();
18906 if (now_it
.insn_cond
18907 && !now_it
.warn_deprecated
18908 && warn_on_deprecated
18909 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
18910 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
18912 if (inst
.instruction
>= 0x10000)
18914 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18915 "performance deprecated in ARMv8-A and ARMv8-R"));
18916 now_it
.warn_deprecated
= TRUE
;
18920 const struct depr_insn_mask
*p
= depr_it_insns
;
18922 while (p
->mask
!= 0)
18924 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
18926 as_tsktsk (_("IT blocks containing 16-bit Thumb "
18927 "instructions of the following class are "
18928 "performance deprecated in ARMv8-A and "
18929 "ARMv8-R: %s"), p
->description
);
18930 now_it
.warn_deprecated
= TRUE
;
18938 if (now_it
.block_length
> 1)
18940 as_tsktsk (_("IT blocks containing more than one conditional "
18941 "instruction are performance deprecated in ARMv8-A and "
18943 now_it
.warn_deprecated
= TRUE
;
18947 is_last
= (now_it
.mask
== 0x10);
18950 now_it
.state
= OUTSIDE_IT_BLOCK
;
18956 force_automatic_it_block_close (void)
18958 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
18960 close_automatic_it_block ();
18961 now_it
.state
= OUTSIDE_IT_BLOCK
;
18969 if (!now_it
.state_handled
)
18970 handle_it_state ();
18972 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
18975 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18976 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18977 here, hence the "known" in the function name. */
18980 known_t32_only_insn (const struct asm_opcode
*opcode
)
18982 /* Original Thumb-1 wide instruction. */
18983 if (opcode
->tencode
== do_t_blx
18984 || opcode
->tencode
== do_t_branch23
18985 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
18986 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
18989 /* Wide-only instruction added to ARMv8-M Baseline. */
18990 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
18991 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
18992 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
18993 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
18999 /* Whether wide instruction variant can be used if available for a valid OPCODE
19003 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
19005 if (known_t32_only_insn (opcode
))
19008 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19009 of variant T3 of B.W is checked in do_t_branch. */
19010 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19011 && opcode
->tencode
== do_t_branch
)
19014 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19015 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19016 && opcode
->tencode
== do_t_mov_cmp
19017 /* Make sure CMP instruction is not affected. */
19018 && opcode
->aencode
== do_mov
)
19021 /* Wide instruction variants of all instructions with narrow *and* wide
19022 variants become available with ARMv6t2. Other opcodes are either
19023 narrow-only or wide-only and are thus available if OPCODE is valid. */
19024 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
19027 /* OPCODE with narrow only instruction variant or wide variant not
19033 md_assemble (char *str
)
19036 const struct asm_opcode
* opcode
;
19038 /* Align the previous label if needed. */
19039 if (last_label_seen
!= NULL
)
19041 symbol_set_frag (last_label_seen
, frag_now
);
19042 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
19043 S_SET_SEGMENT (last_label_seen
, now_seg
);
19046 memset (&inst
, '\0', sizeof (inst
));
19048 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
19049 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
19051 opcode
= opcode_lookup (&p
);
19054 /* It wasn't an instruction, but it might be a register alias of
19055 the form alias .req reg, or a Neon .dn/.qn directive. */
19056 if (! create_register_alias (str
, p
)
19057 && ! create_neon_reg_alias (str
, p
))
19058 as_bad (_("bad instruction `%s'"), str
);
19063 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
19064 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19066 /* The value which unconditional instructions should have in place of the
19067 condition field. */
19068 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
19072 arm_feature_set variant
;
19074 variant
= cpu_variant
;
19075 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19076 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
19077 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
19078 /* Check that this instruction is supported for this CPU. */
19079 if (!opcode
->tvariant
19080 || (thumb_mode
== 1
19081 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
19083 if (opcode
->tencode
== do_t_swi
)
19084 as_bad (_("SVC is not permitted on this architecture"));
19086 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
19089 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
19090 && opcode
->tencode
!= do_t_branch
)
19092 as_bad (_("Thumb does not support conditional execution"));
19096 /* Two things are addressed here:
19097 1) Implicit require narrow instructions on Thumb-1.
19098 This avoids relaxation accidentally introducing Thumb-2
19100 2) Reject wide instructions in non Thumb-2 cores.
19102 Only instructions with narrow and wide variants need to be handled
19103 but selecting all non wide-only instructions is easier. */
19104 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
19105 && !t32_insn_ok (variant
, opcode
))
19107 if (inst
.size_req
== 0)
19109 else if (inst
.size_req
== 4)
19111 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
19112 as_bad (_("selected processor does not support 32bit wide "
19113 "variant of instruction `%s'"), str
);
19115 as_bad (_("selected processor does not support `%s' in "
19116 "Thumb-2 mode"), str
);
19121 inst
.instruction
= opcode
->tvalue
;
19123 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
19125 /* Prepare the it_insn_type for those encodings that don't set
19127 it_fsm_pre_encode ();
19129 opcode
->tencode ();
19131 it_fsm_post_encode ();
19134 if (!(inst
.error
|| inst
.relax
))
19136 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
19137 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
19138 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
19140 as_bad (_("cannot honor width suffix -- `%s'"), str
);
19145 /* Something has gone badly wrong if we try to relax a fixed size
19147 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
19149 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19150 *opcode
->tvariant
);
19151 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
19152 set those bits when Thumb-2 32-bit instructions are seen. The impact
19153 of relaxable instructions will be considered later after we finish all
19155 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
19156 variant
= arm_arch_none
;
19158 variant
= cpu_variant
;
19159 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
19160 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19163 check_neon_suffixes
;
19167 mapping_state (MAP_THUMB
);
19170 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
19174 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
19175 is_bx
= (opcode
->aencode
== do_bx
);
19177 /* Check that this instruction is supported for this CPU. */
19178 if (!(is_bx
&& fix_v4bx
)
19179 && !(opcode
->avariant
&&
19180 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
19182 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
19187 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
19191 inst
.instruction
= opcode
->avalue
;
19192 if (opcode
->tag
== OT_unconditionalF
)
19193 inst
.instruction
|= 0xFU
<< 28;
19195 inst
.instruction
|= inst
.cond
<< 28;
19196 inst
.size
= INSN_SIZE
;
19197 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
19199 it_fsm_pre_encode ();
19200 opcode
->aencode ();
19201 it_fsm_post_encode ();
19203 /* Arm mode bx is marked as both v4T and v5 because it's still required
19204 on a hypothetical non-thumb v5 core. */
19206 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
19208 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
19209 *opcode
->avariant
);
19211 check_neon_suffixes
;
19215 mapping_state (MAP_ARM
);
19220 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
19228 check_it_blocks_finished (void)
19233 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
19234 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
19235 == MANUAL_IT_BLOCK
)
19237 as_warn (_("section '%s' finished with an open IT block."),
19241 if (now_it
.state
== MANUAL_IT_BLOCK
)
19242 as_warn (_("file finished with an open IT block."));
19246 /* Various frobbings of labels and their addresses. */
19249 arm_start_line_hook (void)
19251 last_label_seen
= NULL
;
19255 arm_frob_label (symbolS
* sym
)
19257 last_label_seen
= sym
;
19259 ARM_SET_THUMB (sym
, thumb_mode
);
19261 #if defined OBJ_COFF || defined OBJ_ELF
19262 ARM_SET_INTERWORK (sym
, support_interwork
);
19265 force_automatic_it_block_close ();
19267 /* Note - do not allow local symbols (.Lxxx) to be labelled
19268 as Thumb functions. This is because these labels, whilst
19269 they exist inside Thumb code, are not the entry points for
19270 possible ARM->Thumb calls. Also, these labels can be used
19271 as part of a computed goto or switch statement. eg gcc
19272 can generate code that looks like this:
19274 ldr r2, [pc, .Laaa]
19284 The first instruction loads the address of the jump table.
19285 The second instruction converts a table index into a byte offset.
19286 The third instruction gets the jump address out of the table.
19287 The fourth instruction performs the jump.
19289 If the address stored at .Laaa is that of a symbol which has the
19290 Thumb_Func bit set, then the linker will arrange for this address
19291 to have the bottom bit set, which in turn would mean that the
19292 address computation performed by the third instruction would end
19293 up with the bottom bit set. Since the ARM is capable of unaligned
19294 word loads, the instruction would then load the incorrect address
19295 out of the jump table, and chaos would ensue. */
19296 if (label_is_thumb_function_name
19297 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
19298 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
19300 /* When the address of a Thumb function is taken the bottom
19301 bit of that address should be set. This will allow
19302 interworking between Arm and Thumb functions to work
19305 THUMB_SET_FUNC (sym
, 1);
19307 label_is_thumb_function_name
= FALSE
;
19310 dwarf2_emit_label (sym
);
19314 arm_data_in_code (void)
19316 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
19318 *input_line_pointer
= '/';
19319 input_line_pointer
+= 5;
19320 *input_line_pointer
= 0;
19328 arm_canonicalize_symbol_name (char * name
)
19332 if (thumb_mode
&& (len
= strlen (name
)) > 5
19333 && streq (name
+ len
- 5, "/data"))
19334 *(name
+ len
- 5) = 0;
19339 /* Table of all register names defined by default. The user can
19340 define additional names with .req. Note that all register names
19341 should appear in both upper and lowercase variants. Some registers
19342 also have mixed-case names. */
19344 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19345 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19346 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19347 #define REGSET(p,t) \
19348 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19349 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19350 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19351 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19352 #define REGSETH(p,t) \
19353 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19354 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19355 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19356 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19357 #define REGSET2(p,t) \
19358 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19359 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19360 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19361 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19362 #define SPLRBANK(base,bank,t) \
19363 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19364 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19365 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19366 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19367 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19368 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19370 static const struct reg_entry reg_names
[] =
19372 /* ARM integer registers. */
19373 REGSET(r
, RN
), REGSET(R
, RN
),
19375 /* ATPCS synonyms. */
19376 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
19377 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
19378 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
19380 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
19381 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
19382 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
19384 /* Well-known aliases. */
19385 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
19386 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
19388 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
19389 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
19391 /* Coprocessor numbers. */
19392 REGSET(p
, CP
), REGSET(P
, CP
),
19394 /* Coprocessor register numbers. The "cr" variants are for backward
19396 REGSET(c
, CN
), REGSET(C
, CN
),
19397 REGSET(cr
, CN
), REGSET(CR
, CN
),
19399 /* ARM banked registers. */
19400 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
19401 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
19402 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
19403 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
19404 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
19405 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
19406 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
19408 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
19409 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
19410 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
19411 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
19412 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
19413 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
19414 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
19415 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
19417 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
19418 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
19419 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
19420 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
19421 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
19422 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
19423 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
19424 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
19425 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
19427 /* FPA registers. */
19428 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
19429 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
19431 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
19432 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
19434 /* VFP SP registers. */
19435 REGSET(s
,VFS
), REGSET(S
,VFS
),
19436 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
19438 /* VFP DP Registers. */
19439 REGSET(d
,VFD
), REGSET(D
,VFD
),
19440 /* Extra Neon DP registers. */
19441 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
19443 /* Neon QP registers. */
19444 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
19446 /* VFP control registers. */
19447 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
19448 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
19449 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
19450 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
19451 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
19452 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
19453 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
19455 /* Maverick DSP coprocessor registers. */
19456 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
19457 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
19459 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
19460 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
19461 REGDEF(dspsc
,0,DSPSC
),
19463 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
19464 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
19465 REGDEF(DSPSC
,0,DSPSC
),
19467 /* iWMMXt data registers - p0, c0-15. */
19468 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
19470 /* iWMMXt control registers - p1, c0-3. */
19471 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
19472 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
19473 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
19474 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
19476 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19477 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
19478 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
19479 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
19480 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
19482 /* XScale accumulator registers. */
19483 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
19489 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19490 within psr_required_here. */
19491 static const struct asm_psr psrs
[] =
19493 /* Backward compatibility notation. Note that "all" is no longer
19494 truly all possible PSR bits. */
19495 {"all", PSR_c
| PSR_f
},
19499 /* Individual flags. */
19505 /* Combinations of flags. */
19506 {"fs", PSR_f
| PSR_s
},
19507 {"fx", PSR_f
| PSR_x
},
19508 {"fc", PSR_f
| PSR_c
},
19509 {"sf", PSR_s
| PSR_f
},
19510 {"sx", PSR_s
| PSR_x
},
19511 {"sc", PSR_s
| PSR_c
},
19512 {"xf", PSR_x
| PSR_f
},
19513 {"xs", PSR_x
| PSR_s
},
19514 {"xc", PSR_x
| PSR_c
},
19515 {"cf", PSR_c
| PSR_f
},
19516 {"cs", PSR_c
| PSR_s
},
19517 {"cx", PSR_c
| PSR_x
},
19518 {"fsx", PSR_f
| PSR_s
| PSR_x
},
19519 {"fsc", PSR_f
| PSR_s
| PSR_c
},
19520 {"fxs", PSR_f
| PSR_x
| PSR_s
},
19521 {"fxc", PSR_f
| PSR_x
| PSR_c
},
19522 {"fcs", PSR_f
| PSR_c
| PSR_s
},
19523 {"fcx", PSR_f
| PSR_c
| PSR_x
},
19524 {"sfx", PSR_s
| PSR_f
| PSR_x
},
19525 {"sfc", PSR_s
| PSR_f
| PSR_c
},
19526 {"sxf", PSR_s
| PSR_x
| PSR_f
},
19527 {"sxc", PSR_s
| PSR_x
| PSR_c
},
19528 {"scf", PSR_s
| PSR_c
| PSR_f
},
19529 {"scx", PSR_s
| PSR_c
| PSR_x
},
19530 {"xfs", PSR_x
| PSR_f
| PSR_s
},
19531 {"xfc", PSR_x
| PSR_f
| PSR_c
},
19532 {"xsf", PSR_x
| PSR_s
| PSR_f
},
19533 {"xsc", PSR_x
| PSR_s
| PSR_c
},
19534 {"xcf", PSR_x
| PSR_c
| PSR_f
},
19535 {"xcs", PSR_x
| PSR_c
| PSR_s
},
19536 {"cfs", PSR_c
| PSR_f
| PSR_s
},
19537 {"cfx", PSR_c
| PSR_f
| PSR_x
},
19538 {"csf", PSR_c
| PSR_s
| PSR_f
},
19539 {"csx", PSR_c
| PSR_s
| PSR_x
},
19540 {"cxf", PSR_c
| PSR_x
| PSR_f
},
19541 {"cxs", PSR_c
| PSR_x
| PSR_s
},
19542 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
19543 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
19544 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
19545 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
19546 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
19547 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
19548 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
19549 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
19550 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
19551 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
19552 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
19553 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
19554 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
19555 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
19556 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
19557 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
19558 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
19559 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
19560 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
19561 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
19562 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
19563 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
19564 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
19565 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
19568 /* Table of V7M psr names. */
19569 static const struct asm_psr v7m_psrs
[] =
19571 {"apsr", 0x0 }, {"APSR", 0x0 },
19572 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19573 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19574 {"psr", 0x3 }, {"PSR", 0x3 },
19575 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19576 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19577 {"epsr", 0x6 }, {"EPSR", 0x6 },
19578 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19579 {"msp", 0x8 }, {"MSP", 0x8 },
19580 {"psp", 0x9 }, {"PSP", 0x9 },
19581 {"msplim", 0xa }, {"MSPLIM", 0xa },
19582 {"psplim", 0xb }, {"PSPLIM", 0xb },
19583 {"primask", 0x10}, {"PRIMASK", 0x10},
19584 {"basepri", 0x11}, {"BASEPRI", 0x11},
19585 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19586 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19587 {"control", 0x14}, {"CONTROL", 0x14},
19588 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19589 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19590 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19591 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19592 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19593 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19594 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19595 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19596 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19599 /* Table of all shift-in-operand names. */
19600 static const struct asm_shift_name shift_names
[] =
19602 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
19603 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
19604 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
19605 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
19606 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
19607 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
19610 /* Table of all explicit relocation names. */
19612 static struct reloc_entry reloc_names
[] =
19614 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
19615 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
19616 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
19617 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
19618 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
19619 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
19620 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
19621 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
19622 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
19623 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
19624 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
19625 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
19626 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
19627 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
19628 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
19629 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
19630 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
19631 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
19632 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
19633 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
19634 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
19635 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
19636 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
19637 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
19638 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
19639 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
19640 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
19644 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19645 static const struct asm_cond conds
[] =
19649 {"cs", 0x2}, {"hs", 0x2},
19650 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19664 #define UL_BARRIER(L,U,CODE,FEAT) \
19665 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19666 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19668 static struct asm_barrier_opt barrier_opt_names
[] =
19670 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
19671 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
19672 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
19673 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
19674 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
19675 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
19676 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
19677 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
19678 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
19679 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
19680 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
19681 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
19682 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
19683 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
19684 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
19685 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
19690 /* Table of ARM-format instructions. */
19692 /* Macros for gluing together operand strings. N.B. In all cases
19693 other than OPS0, the trailing OP_stop comes from default
19694 zero-initialization of the unspecified elements of the array. */
19695 #define OPS0() { OP_stop, }
19696 #define OPS1(a) { OP_##a, }
19697 #define OPS2(a,b) { OP_##a,OP_##b, }
19698 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19699 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19700 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19701 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19703 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19704 This is useful when mixing operands for ARM and THUMB, i.e. using the
19705 MIX_ARM_THUMB_OPERANDS macro.
19706 In order to use these macros, prefix the number of operands with _
19708 #define OPS_1(a) { a, }
19709 #define OPS_2(a,b) { a,b, }
19710 #define OPS_3(a,b,c) { a,b,c, }
19711 #define OPS_4(a,b,c,d) { a,b,c,d, }
19712 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19713 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19715 /* These macros abstract out the exact format of the mnemonic table and
19716 save some repeated characters. */
19718 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19719 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19720 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19721 THUMB_VARIANT, do_##ae, do_##te }
19723 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19724 a T_MNEM_xyz enumerator. */
19725 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19726 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19727 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19728 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19730 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19731 infix after the third character. */
19732 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19733 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19734 THUMB_VARIANT, do_##ae, do_##te }
19735 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19736 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19737 THUMB_VARIANT, do_##ae, do_##te }
19738 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19739 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19740 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19741 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19742 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19743 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19744 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19745 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19747 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19748 field is still 0xE. Many of the Thumb variants can be executed
19749 conditionally, so this is checked separately. */
19750 #define TUE(mnem, op, top, nops, ops, ae, te) \
19751 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19752 THUMB_VARIANT, do_##ae, do_##te }
19754 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19755 Used by mnemonics that have very minimal differences in the encoding for
19756 ARM and Thumb variants and can be handled in a common function. */
19757 #define TUEc(mnem, op, top, nops, ops, en) \
19758 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19759 THUMB_VARIANT, do_##en, do_##en }
19761 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19762 condition code field. */
19763 #define TUF(mnem, op, top, nops, ops, ae, te) \
19764 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19765 THUMB_VARIANT, do_##ae, do_##te }
19767 /* ARM-only variants of all the above. */
19768 #define CE(mnem, op, nops, ops, ae) \
19769 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19771 #define C3(mnem, op, nops, ops, ae) \
19772 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19774 /* Thumb-only variants of TCE and TUE. */
19775 #define ToC(mnem, top, nops, ops, te) \
19776 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19779 #define ToU(mnem, top, nops, ops, te) \
19780 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19783 /* T_MNEM_xyz enumerator variants of ToC. */
19784 #define toC(mnem, top, nops, ops, te) \
19785 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
19788 /* T_MNEM_xyz enumerator variants of ToU. */
19789 #define toU(mnem, top, nops, ops, te) \
19790 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
19793 /* Legacy mnemonics that always have conditional infix after the third
19795 #define CL(mnem, op, nops, ops, ae) \
19796 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19797 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19799 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19800 #define cCE(mnem, op, nops, ops, ae) \
19801 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19803 /* Legacy coprocessor instructions where conditional infix and conditional
19804 suffix are ambiguous. For consistency this includes all FPA instructions,
19805 not just the potentially ambiguous ones. */
19806 #define cCL(mnem, op, nops, ops, ae) \
19807 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19808 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19810 /* Coprocessor, takes either a suffix or a position-3 infix
19811 (for an FPA corner case). */
19812 #define C3E(mnem, op, nops, ops, ae) \
19813 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19814 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19816 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19817 { m1 #m2 m3, OPS##nops ops, \
19818 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19819 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19821 #define CM(m1, m2, op, nops, ops, ae) \
19822 xCM_ (m1, , m2, op, nops, ops, ae), \
19823 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19824 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19825 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19826 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19827 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19828 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19829 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19830 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19831 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19832 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19833 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19834 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19835 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19836 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19837 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19838 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19839 xCM_ (m1, le, m2, op, nops, ops, ae), \
19840 xCM_ (m1, al, m2, op, nops, ops, ae)
19842 #define UE(mnem, op, nops, ops, ae) \
19843 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19845 #define UF(mnem, op, nops, ops, ae) \
19846 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19848 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19849 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19850 use the same encoding function for each. */
19851 #define NUF(mnem, op, nops, ops, enc) \
19852 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19853 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19855 /* Neon data processing, version which indirects through neon_enc_tab for
19856 the various overloaded versions of opcodes. */
19857 #define nUF(mnem, op, nops, ops, enc) \
19858 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19859 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19861 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19863 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19864 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19865 THUMB_VARIANT, do_##enc, do_##enc }
19867 #define NCE(mnem, op, nops, ops, enc) \
19868 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19870 #define NCEF(mnem, op, nops, ops, enc) \
19871 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19873 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19874 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19875 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19876 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19878 #define nCE(mnem, op, nops, ops, enc) \
19879 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19881 #define nCEF(mnem, op, nops, ops, enc) \
19882 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19886 static const struct asm_opcode insns
[] =
19888 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19889 #define THUMB_VARIANT & arm_ext_v4t
19890 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19891 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19892 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19893 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19894 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19895 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19896 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19897 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19898 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19899 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19900 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19901 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19902 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19903 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19904 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19905 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19907 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19908 for setting PSR flag bits. They are obsolete in V6 and do not
19909 have Thumb equivalents. */
19910 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19911 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19912 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
19913 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19914 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19915 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
19916 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19917 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19918 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
19920 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
19921 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
19922 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19923 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19925 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
19926 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19927 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
19929 OP_ADDRGLDR
),ldst
, t_ldst
),
19930 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19932 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19933 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19934 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19935 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19936 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19937 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19939 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
19940 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
19943 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
19944 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
19945 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
19946 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
19948 /* Thumb-compatibility pseudo ops. */
19949 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19950 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19951 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19952 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19953 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19954 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19955 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19956 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19957 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
19958 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
19959 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
19960 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
19962 /* These may simplify to neg. */
19963 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19964 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19966 #undef THUMB_VARIANT
19967 #define THUMB_VARIANT & arm_ext_os
19969 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19970 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19972 #undef THUMB_VARIANT
19973 #define THUMB_VARIANT & arm_ext_v6
19975 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
19977 /* V1 instructions with no Thumb analogue prior to V6T2. */
19978 #undef THUMB_VARIANT
19979 #define THUMB_VARIANT & arm_ext_v6t2
19981 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19982 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19983 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
19985 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19986 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19987 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
19988 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19990 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19991 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19993 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19994 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19996 /* V1 instructions with no Thumb analogue at all. */
19997 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
19998 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
20000 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20001 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20002 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20003 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20004 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20005 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20006 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20007 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20010 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20011 #undef THUMB_VARIANT
20012 #define THUMB_VARIANT & arm_ext_v4t
20014 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20015 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20017 #undef THUMB_VARIANT
20018 #define THUMB_VARIANT & arm_ext_v6t2
20020 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20021 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
20023 /* Generic coprocessor instructions. */
20024 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20025 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20026 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20027 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20028 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20029 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20030 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20033 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20035 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20036 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20039 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20040 #undef THUMB_VARIANT
20041 #define THUMB_VARIANT & arm_ext_msr
20043 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
20044 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
20047 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20048 #undef THUMB_VARIANT
20049 #define THUMB_VARIANT & arm_ext_v6t2
20051 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20052 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20053 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20054 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20055 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20056 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20057 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20058 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20061 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20062 #undef THUMB_VARIANT
20063 #define THUMB_VARIANT & arm_ext_v4t
20065 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20066 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20067 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20068 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20069 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20070 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20073 #define ARM_VARIANT & arm_ext_v4t_5
20075 /* ARM Architecture 4T. */
20076 /* Note: bx (and blx) are required on V5, even if the processor does
20077 not support Thumb. */
20078 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
20081 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
20082 #undef THUMB_VARIANT
20083 #define THUMB_VARIANT & arm_ext_v5t
20085 /* Note: blx has 2 variants; the .value coded here is for
20086 BLX(2). Only this variant has conditional execution. */
20087 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
20088 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
20090 #undef THUMB_VARIANT
20091 #define THUMB_VARIANT & arm_ext_v6t2
20093 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
20094 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20095 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20096 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20097 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20098 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20099 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20100 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20103 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
20104 #undef THUMB_VARIANT
20105 #define THUMB_VARIANT & arm_ext_v5exp
20107 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20108 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20109 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20110 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20112 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20113 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20115 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20116 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20117 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20118 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20120 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20121 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20122 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20123 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20125 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20126 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20128 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20129 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20130 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20131 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20134 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
20135 #undef THUMB_VARIANT
20136 #define THUMB_VARIANT & arm_ext_v6t2
20138 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
20139 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
20141 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
20142 ADDRGLDRS
), ldrd
, t_ldstd
),
20144 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20145 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20148 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
20150 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
20153 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
20154 #undef THUMB_VARIANT
20155 #define THUMB_VARIANT & arm_ext_v6
20157 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20158 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20159 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20160 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20161 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20162 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20163 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20164 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20165 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20166 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
20168 #undef THUMB_VARIANT
20169 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20171 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
20172 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20174 #undef THUMB_VARIANT
20175 #define THUMB_VARIANT & arm_ext_v6t2
20177 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20178 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20180 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
20181 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
20183 /* ARM V6 not included in V7M. */
20184 #undef THUMB_VARIANT
20185 #define THUMB_VARIANT & arm_ext_v6_notm
20186 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20187 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20188 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
20189 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
20190 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
20191 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20192 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
20193 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
20194 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
20195 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20196 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20197 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20198 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
20199 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
20200 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
20201 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
20202 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
20203 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
20204 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
20206 /* ARM V6 not included in V7M (eg. integer SIMD). */
20207 #undef THUMB_VARIANT
20208 #define THUMB_VARIANT & arm_ext_v6_dsp
20209 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
20210 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
20211 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20212 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20213 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20214 /* Old name for QASX. */
20215 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20216 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20217 /* Old name for QSAX. */
20218 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20219 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20220 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20221 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20222 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20223 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20224 /* Old name for SASX. */
20225 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20226 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20227 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20228 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20229 /* Old name for SHASX. */
20230 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20231 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20232 /* Old name for SHSAX. */
20233 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20234 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20235 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20236 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20237 /* Old name for SSAX. */
20238 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20239 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20240 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20241 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20242 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20243 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20244 /* Old name for UASX. */
20245 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20246 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20247 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20248 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20249 /* Old name for UHASX. */
20250 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20251 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20252 /* Old name for UHSAX. */
20253 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20254 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20255 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20256 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20257 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20258 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20259 /* Old name for UQASX. */
20260 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20261 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20262 /* Old name for UQSAX. */
20263 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20264 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20265 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20266 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20267 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20268 /* Old name for USAX. */
20269 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20270 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20271 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20272 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20273 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20274 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20275 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20276 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20277 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20278 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20279 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20280 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20281 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20282 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20283 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20284 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20285 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20286 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20287 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20288 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20289 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20290 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20291 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20292 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20293 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20294 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20295 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20296 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20297 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20298 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
20299 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
20300 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20301 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20302 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
20305 #define ARM_VARIANT & arm_ext_v6k_v6t2
20306 #undef THUMB_VARIANT
20307 #define THUMB_VARIANT & arm_ext_v6k_v6t2
20309 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
20310 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
20311 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
20312 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
20314 #undef THUMB_VARIANT
20315 #define THUMB_VARIANT & arm_ext_v6_notm
20316 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
20318 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
20319 RRnpcb
), strexd
, t_strexd
),
20321 #undef THUMB_VARIANT
20322 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20323 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
20325 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
20327 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20329 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20331 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
20334 #define ARM_VARIANT & arm_ext_sec
20335 #undef THUMB_VARIANT
20336 #define THUMB_VARIANT & arm_ext_sec
20338 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
20341 #define ARM_VARIANT & arm_ext_virt
20342 #undef THUMB_VARIANT
20343 #define THUMB_VARIANT & arm_ext_virt
20345 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
20346 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
20349 #define ARM_VARIANT & arm_ext_pan
20350 #undef THUMB_VARIANT
20351 #define THUMB_VARIANT & arm_ext_pan
20353 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
20356 #define ARM_VARIANT & arm_ext_v6t2
20357 #undef THUMB_VARIANT
20358 #define THUMB_VARIANT & arm_ext_v6t2
20360 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
20361 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
20362 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
20363 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
20365 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20366 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
20368 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20369 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20370 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20371 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20374 #define ARM_VARIANT & arm_ext_v3
20375 #undef THUMB_VARIANT
20376 #define THUMB_VARIANT & arm_ext_v6t2
20378 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
20379 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
20380 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
20383 #define ARM_VARIANT & arm_ext_v6t2
20384 #undef THUMB_VARIANT
20385 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20386 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
20387 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
20389 /* Thumb-only instructions. */
20391 #define ARM_VARIANT NULL
20392 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
20393 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
20395 /* ARM does not really have an IT instruction, so always allow it.
20396 The opcode is copied from Thumb in order to allow warnings in
20397 -mimplicit-it=[never | arm] modes. */
20399 #define ARM_VARIANT & arm_ext_v1
20400 #undef THUMB_VARIANT
20401 #define THUMB_VARIANT & arm_ext_v6t2
20403 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
20404 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
20405 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
20406 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
20407 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
20408 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
20409 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
20410 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
20411 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
20412 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
20413 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
20414 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
20415 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
20416 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
20417 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
20418 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20419 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
20420 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
20422 /* Thumb2 only instructions. */
20424 #define ARM_VARIANT NULL
20426 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
20427 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
20428 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
20429 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
20430 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
20431 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
20433 /* Hardware division instructions. */
20435 #define ARM_VARIANT & arm_ext_adiv
20436 #undef THUMB_VARIANT
20437 #define THUMB_VARIANT & arm_ext_div
20439 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
20440 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
20442 /* ARM V6M/V7 instructions. */
20444 #define ARM_VARIANT & arm_ext_barrier
20445 #undef THUMB_VARIANT
20446 #define THUMB_VARIANT & arm_ext_barrier
20448 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
20449 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
20450 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
20452 /* ARM V7 instructions. */
20454 #define ARM_VARIANT & arm_ext_v7
20455 #undef THUMB_VARIANT
20456 #define THUMB_VARIANT & arm_ext_v7
20458 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
20459 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
20462 #define ARM_VARIANT & arm_ext_mp
20463 #undef THUMB_VARIANT
20464 #define THUMB_VARIANT & arm_ext_mp
20466 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
20468 /* AArchv8 instructions. */
20470 #define ARM_VARIANT & arm_ext_v8
20472 /* Instructions shared between armv8-a and armv8-m. */
20473 #undef THUMB_VARIANT
20474 #define THUMB_VARIANT & arm_ext_atomics
20476 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20477 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20478 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20479 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20480 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20481 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20482 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20483 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
20484 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20485 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20487 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20489 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20491 #undef THUMB_VARIANT
20492 #define THUMB_VARIANT & arm_ext_v8
20494 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
20495 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
20497 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
20500 /* Defined in V8 but is in undefined encoding space for earlier
20501 architectures. However earlier architectures are required to treat
20502 this instuction as a semihosting trap as well. Hence while not explicitly
20503 defined as such, it is in fact correct to define the instruction for all
20505 #undef THUMB_VARIANT
20506 #define THUMB_VARIANT & arm_ext_v1
20508 #define ARM_VARIANT & arm_ext_v1
20509 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
20511 /* ARMv8 T32 only. */
20513 #define ARM_VARIANT NULL
20514 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
20515 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
20516 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
20518 /* FP for ARMv8. */
20520 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20521 #undef THUMB_VARIANT
20522 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20524 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20525 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20526 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20527 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20528 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
20529 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
20530 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
20531 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
20532 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
20533 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
20534 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
20535 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
20536 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
20537 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
20538 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
20539 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
20540 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
20542 /* Crypto v1 extensions. */
20544 #define ARM_VARIANT & fpu_crypto_ext_armv8
20545 #undef THUMB_VARIANT
20546 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20548 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
20549 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
20550 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
20551 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
20552 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
20553 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
20554 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
20555 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
20556 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
20557 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
20558 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
20559 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
20560 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
20561 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
20564 #define ARM_VARIANT & crc_ext_armv8
20565 #undef THUMB_VARIANT
20566 #define THUMB_VARIANT & crc_ext_armv8
20567 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
20568 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
20569 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
20570 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
20571 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
20572 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
20574 /* ARMv8.2 RAS extension. */
20576 #define ARM_VARIANT & arm_ext_ras
20577 #undef THUMB_VARIANT
20578 #define THUMB_VARIANT & arm_ext_ras
20579 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
20582 #define ARM_VARIANT & arm_ext_v8_3
20583 #undef THUMB_VARIANT
20584 #define THUMB_VARIANT & arm_ext_v8_3
20585 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
20586 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
20587 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
20590 #define ARM_VARIANT & fpu_neon_ext_dotprod
20591 #undef THUMB_VARIANT
20592 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20593 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
20594 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
20597 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20598 #undef THUMB_VARIANT
20599 #define THUMB_VARIANT NULL
20601 cCE("wfs", e200110
, 1, (RR
), rd
),
20602 cCE("rfs", e300110
, 1, (RR
), rd
),
20603 cCE("wfc", e400110
, 1, (RR
), rd
),
20604 cCE("rfc", e500110
, 1, (RR
), rd
),
20606 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20607 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20608 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20609 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20611 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20612 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20613 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20614 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20616 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
20617 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
20618 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
20619 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
20620 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
20621 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
20622 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
20623 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
20624 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
20625 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
20626 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
20627 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
20629 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
20630 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
20631 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
20632 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
20633 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
20634 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
20635 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
20636 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
20637 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
20638 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
20639 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
20640 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
20642 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
20643 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
20644 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
20645 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
20646 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
20647 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
20648 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
20649 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
20650 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
20651 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
20652 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
20653 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
20655 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
20656 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
20657 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
20658 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
20659 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
20660 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
20661 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
20662 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
20663 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
20664 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
20665 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
20666 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
20668 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
20669 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
20670 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
20671 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
20672 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
20673 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
20674 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
20675 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
20676 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
20677 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
20678 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
20679 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
20681 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
20682 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
20683 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
20684 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
20685 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
20686 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
20687 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
20688 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
20689 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
20690 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
20691 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
20692 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
20694 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
20695 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
20696 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
20697 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
20698 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
20699 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
20700 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
20701 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
20702 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
20703 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
20704 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
20705 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
20707 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
20708 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
20709 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
20710 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
20711 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
20712 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
20713 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
20714 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
20715 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
20716 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
20717 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
20718 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
20720 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
20721 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
20722 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
20723 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
20724 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
20725 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
20726 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
20727 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
20728 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
20729 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
20730 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
20731 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
20733 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
20734 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
20735 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
20736 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
20737 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
20738 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
20739 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
20740 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
20741 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
20742 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
20743 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
20744 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
20746 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
20747 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
20748 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
20749 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
20750 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
20751 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
20752 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
20753 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
20754 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
20755 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
20756 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
20757 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
20759 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
20760 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
20761 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
20762 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
20763 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
20764 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
20765 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
20766 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
20767 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
20768 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
20769 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
20770 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
20772 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
20773 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
20774 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
20775 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
20776 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
20777 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
20778 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
20779 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
20780 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
20781 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
20782 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
20783 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
20785 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
20786 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
20787 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
20788 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
20789 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
20790 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
20791 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
20792 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
20793 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
20794 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
20795 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
20796 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
20798 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
20799 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
20800 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
20801 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
20802 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
20803 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
20804 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
20805 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
20806 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
20807 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
20808 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
20809 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
20811 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
20812 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
20813 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
20814 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
20815 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
20816 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
20817 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
20818 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
20819 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
20820 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
20821 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
20822 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
20824 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20825 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20826 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20827 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20828 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20829 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20830 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20831 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20832 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20833 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20834 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20835 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20837 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20838 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20839 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20840 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20841 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20842 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20843 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20844 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20845 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20846 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20847 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20848 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20850 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20851 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20852 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20853 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20854 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20855 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20856 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20857 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20858 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20859 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20860 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20861 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20863 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20864 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20865 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20866 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20867 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20868 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20869 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20870 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20871 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20872 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20873 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20874 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20876 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20877 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20878 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20879 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20880 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20881 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20882 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20883 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20884 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20885 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20886 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20887 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20889 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20890 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20891 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20892 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20893 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20894 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20895 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20896 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20897 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20898 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20899 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20900 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20902 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20903 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20904 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20905 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20906 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20907 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20908 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20909 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20910 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20911 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20912 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20913 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20915 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20916 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20917 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20918 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20919 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20920 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20921 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20922 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20923 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20924 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20925 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20926 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20928 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20929 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20930 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20931 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20932 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20933 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20934 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20935 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20936 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20937 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20938 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20939 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20941 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20942 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20943 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20944 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20945 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20946 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20947 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20948 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20949 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20950 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20951 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20952 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20954 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20955 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20956 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20957 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20958 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20959 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20960 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20961 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20962 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20963 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20964 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20965 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20967 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20968 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20969 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20970 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20971 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20972 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20973 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20974 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20975 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20976 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20977 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20978 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20980 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20981 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20982 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20983 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20984 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20985 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20986 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20987 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20988 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20989 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20990 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20991 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20993 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20994 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20995 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20996 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20998 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
20999 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
21000 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
21001 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
21002 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
21003 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
21004 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
21005 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
21006 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
21007 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
21008 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
21009 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
21011 /* The implementation of the FIX instruction is broken on some
21012 assemblers, in that it accepts a precision specifier as well as a
21013 rounding specifier, despite the fact that this is meaningless.
21014 To be more compatible, we accept it as well, though of course it
21015 does not set any bits. */
21016 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
21017 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
21018 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
21019 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
21020 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
21021 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
21022 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
21023 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
21024 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
21025 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
21026 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
21027 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
21028 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
21030 /* Instructions that were new with the real FPA, call them V2. */
21032 #define ARM_VARIANT & fpu_fpa_ext_v2
21034 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21035 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21036 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21037 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21038 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21039 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21042 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21044 /* Moves and type conversions. */
21045 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21046 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
21047 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
21048 cCE("fmstat", ef1fa10
, 0, (), noargs
),
21049 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
21050 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
21051 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21052 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21053 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21054 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21055 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21056 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21057 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
21058 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
21060 /* Memory operations. */
21061 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21062 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21063 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21064 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21065 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21066 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21067 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21068 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21069 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21070 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21071 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21072 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21073 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21074 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21075 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21076 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21077 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21078 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21080 /* Monadic operations. */
21081 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21082 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21083 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21085 /* Dyadic operations. */
21086 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21087 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21088 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21089 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21090 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21091 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21092 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21093 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21094 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21097 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21098 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
21099 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21100 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
21102 /* Double precision load/store are still present on single precision
21103 implementations. */
21104 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21105 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21106 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21107 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21108 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21109 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21110 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21111 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21112 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21113 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21116 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
21118 /* Moves and type conversions. */
21119 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21120 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21121 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21122 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21123 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21124 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21125 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21126 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21127 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21128 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21129 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21130 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21131 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21133 /* Monadic operations. */
21134 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21135 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21136 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21138 /* Dyadic operations. */
21139 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21140 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21141 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21142 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21143 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21144 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21145 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21146 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21147 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21150 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21151 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
21152 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21153 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
21156 #define ARM_VARIANT & fpu_vfp_ext_v2
21158 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
21159 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
21160 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
21161 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
21163 /* Instructions which may belong to either the Neon or VFP instruction sets.
21164 Individual encoder functions perform additional architecture checks. */
21166 #define ARM_VARIANT & fpu_vfp_ext_v1xd
21167 #undef THUMB_VARIANT
21168 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
21170 /* These mnemonics are unique to VFP. */
21171 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
21172 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
21173 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21174 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21175 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21176 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
21177 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
21178 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
21179 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
21180 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
21182 /* Mnemonics shared by Neon and VFP. */
21183 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
21184 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
21185 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
21187 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
21188 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
21190 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
21191 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
21193 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21194 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21195 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21196 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21197 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21198 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21199 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
21200 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
21202 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
21203 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
21204 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
21205 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
21208 /* NOTE: All VMOV encoding is special-cased! */
21209 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
21210 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
21213 #define ARM_VARIANT & arm_ext_fp16
21214 #undef THUMB_VARIANT
21215 #define THUMB_VARIANT & arm_ext_fp16
21216 /* New instructions added from v8.2, allowing the extraction and insertion of
21217 the upper 16 bits of a 32-bit vector register. */
21218 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
21219 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
21221 /* New backported fma/fms instructions optional in v8.2. */
21222 NCE (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
21223 NCE (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
21225 #undef THUMB_VARIANT
21226 #define THUMB_VARIANT & fpu_neon_ext_v1
21228 #define ARM_VARIANT & fpu_neon_ext_v1
21230 /* Data processing with three registers of the same length. */
21231 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
21232 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
21233 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
21234 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21235 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21236 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21237 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21238 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21239 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21240 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
21241 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
21242 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
21243 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
21244 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
21245 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
21246 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
21247 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
21248 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
21249 /* If not immediate, fall back to neon_dyadic_i64_su.
21250 shl_imm should accept I8 I16 I32 I64,
21251 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
21252 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
21253 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
21254 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
21255 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
21256 /* Logic ops, types optional & ignored. */
21257 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21258 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21259 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21260 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21261 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21262 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21263 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21264 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21265 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
21266 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
21267 /* Bitfield ops, untyped. */
21268 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21269 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21270 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21271 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21272 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21273 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21274 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
21275 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21276 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21277 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21278 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21279 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21280 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21281 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
21282 back to neon_dyadic_if_su. */
21283 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
21284 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
21285 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
21286 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
21287 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
21288 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
21289 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
21290 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
21291 /* Comparison. Type I8 I16 I32 F32. */
21292 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
21293 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
21294 /* As above, D registers only. */
21295 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
21296 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
21297 /* Int and float variants, signedness unimportant. */
21298 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
21299 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
21300 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
21301 /* Add/sub take types I8 I16 I32 I64 F32. */
21302 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
21303 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
21304 /* vtst takes sizes 8, 16, 32. */
21305 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
21306 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
21307 /* VMUL takes I8 I16 I32 F32 P8. */
21308 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
21309 /* VQD{R}MULH takes S16 S32. */
21310 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
21311 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
21312 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
21313 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
21314 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
21315 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
21316 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
21317 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
21318 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
21319 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
21320 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
21321 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
21322 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
21323 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
21324 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
21325 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
21326 /* ARM v8.1 extension. */
21327 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
21328 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
21329 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
21330 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
21332 /* Two address, int/float. Types S8 S16 S32 F32. */
21333 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
21334 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
21336 /* Data processing with two registers and a shift amount. */
21337 /* Right shifts, and variants with rounding.
21338 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21339 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
21340 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
21341 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
21342 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
21343 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
21344 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
21345 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
21346 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
21347 /* Shift and insert. Sizes accepted 8 16 32 64. */
21348 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
21349 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
21350 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
21351 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
21352 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21353 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
21354 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
21355 /* Right shift immediate, saturating & narrowing, with rounding variants.
21356 Types accepted S16 S32 S64 U16 U32 U64. */
21357 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
21358 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
21359 /* As above, unsigned. Types accepted S16 S32 S64. */
21360 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
21361 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
21362 /* Right shift narrowing. Types accepted I16 I32 I64. */
21363 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
21364 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
21365 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21366 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
21367 /* CVT with optional immediate for fixed-point variant. */
21368 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
21370 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
21371 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
21373 /* Data processing, three registers of different lengths. */
21374 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21375 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
21376 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21377 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21378 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21379 /* If not scalar, fall back to neon_dyadic_long.
21380 Vector types as above, scalar types S16 S32 U16 U32. */
21381 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
21382 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
21383 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21384 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
21385 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
21386 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21387 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21388 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21389 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21390 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21391 /* Saturating doubling multiplies. Types S16 S32. */
21392 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21393 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21394 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21395 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21396 S16 S32 U16 U32. */
21397 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
21399 /* Extract. Size 8. */
21400 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
21401 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
21403 /* Two registers, miscellaneous. */
21404 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21405 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
21406 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
21407 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
21408 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
21409 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
21410 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
21411 /* Vector replicate. Sizes 8 16 32. */
21412 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
21413 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
21414 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21415 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
21416 /* VMOVN. Types I16 I32 I64. */
21417 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
21418 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21419 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
21420 /* VQMOVUN. Types S16 S32 S64. */
21421 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
21422 /* VZIP / VUZP. Sizes 8 16 32. */
21423 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
21424 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
21425 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
21426 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
21427 /* VQABS / VQNEG. Types S8 S16 S32. */
21428 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
21429 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
21430 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
21431 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
21432 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21433 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
21434 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
21435 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
21436 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
21437 /* Reciprocal estimates. Types U32 F16 F32. */
21438 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
21439 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
21440 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
21441 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
21442 /* VCLS. Types S8 S16 S32. */
21443 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
21444 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
21445 /* VCLZ. Types I8 I16 I32. */
21446 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
21447 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
21448 /* VCNT. Size 8. */
21449 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
21450 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
21451 /* Two address, untyped. */
21452 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
21453 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
21454 /* VTRN. Sizes 8 16 32. */
21455 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
21456 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
21458 /* Table lookup. Size 8. */
21459 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
21460 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
21462 #undef THUMB_VARIANT
21463 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21465 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21467 /* Neon element/structure load/store. */
21468 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21469 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21470 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21471 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21472 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21473 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21474 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21475 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21477 #undef THUMB_VARIANT
21478 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21480 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21481 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
21482 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21483 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21484 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21485 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21486 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21487 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21488 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21489 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21491 #undef THUMB_VARIANT
21492 #define THUMB_VARIANT & fpu_vfp_ext_v3
21494 #define ARM_VARIANT & fpu_vfp_ext_v3
21496 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
21497 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21498 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21499 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21500 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21501 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21502 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21503 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21504 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21507 #define ARM_VARIANT & fpu_vfp_ext_fma
21508 #undef THUMB_VARIANT
21509 #define THUMB_VARIANT & fpu_vfp_ext_fma
21510 /* Mnemonics shared by Neon and VFP. These are included in the
21511 VFP FMA variant; NEON and VFP FMA always includes the NEON
21512 FMA instructions. */
21513 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
21514 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
21515 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21516 the v form should always be used. */
21517 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21518 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21519 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21520 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21521 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21522 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21524 #undef THUMB_VARIANT
21526 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21528 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21529 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21530 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21531 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21532 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21533 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21534 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
21535 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
21538 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21540 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
21541 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
21542 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
21543 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
21544 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
21545 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
21546 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
21547 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
21548 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
21549 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21550 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21551 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21552 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21553 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21554 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21555 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21556 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21557 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21558 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
21559 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
21560 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21561 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21562 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21563 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21564 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21565 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21566 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
21567 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
21568 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
21569 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
21570 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
21571 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
21572 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
21573 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
21574 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21575 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21576 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21577 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21578 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21579 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21580 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21581 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21582 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21583 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21584 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21585 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21586 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
21587 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21588 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21589 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21590 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21591 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21592 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21593 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21594 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21595 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21596 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21597 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21598 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21599 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21600 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21601 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21602 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21603 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21604 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21605 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21606 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21607 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21608 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21609 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21610 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21611 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21612 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21613 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21614 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21615 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21616 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21617 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21618 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21619 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21620 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21621 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21622 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21623 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21624 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21625 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21626 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21627 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21628 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
21629 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21630 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21631 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21632 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21633 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21634 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21635 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21636 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21637 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21638 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21639 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21640 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21641 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21642 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21643 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21644 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21645 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21646 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21647 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21648 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21649 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21650 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
21651 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21652 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21653 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21654 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21655 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21656 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21657 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21658 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21659 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21660 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21661 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21662 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21663 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21664 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21665 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21666 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21667 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21668 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21669 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21670 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21671 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21672 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21673 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21674 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21675 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21676 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21677 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21678 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21679 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21680 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21681 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21682 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21683 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21684 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21685 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21686 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21687 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21688 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21689 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21690 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21691 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21692 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21693 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21694 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21695 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21696 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21697 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21698 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21699 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21700 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21701 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
21704 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21706 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
21707 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
21708 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
21709 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21710 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21711 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21712 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21713 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21714 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21715 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21716 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21717 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21718 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21719 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21720 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21721 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21722 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21723 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21724 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21725 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21726 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
21727 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21728 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21729 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21730 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21731 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21732 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21733 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21734 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21735 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21736 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21737 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21738 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21739 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21740 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21741 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21742 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21743 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21744 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21745 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21746 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21747 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21748 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21749 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21750 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21751 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21752 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21753 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21754 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21755 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21756 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21757 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21758 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21759 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21760 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21761 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21762 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21765 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21767 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21768 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21769 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21770 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21771 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21772 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21773 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21774 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21775 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
21776 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
21777 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
21778 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
21779 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
21780 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
21781 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
21782 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
21783 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
21784 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
21785 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
21786 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
21787 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
21788 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
21789 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
21790 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
21791 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
21792 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
21793 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
21794 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
21795 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
21796 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
21797 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
21798 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
21799 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
21800 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
21801 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
21802 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
21803 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
21804 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
21805 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
21806 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
21807 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
21808 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
21809 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
21810 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
21811 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
21812 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
21813 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
21814 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
21815 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
21816 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
21817 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
21818 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
21819 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
21820 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
21821 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21822 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21823 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21824 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21825 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21826 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21827 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
21828 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
21829 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
21830 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
21831 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21832 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21833 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21834 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21835 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21836 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21837 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21838 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21839 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21840 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21841 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21842 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21844 /* ARMv8.5-A instructions. */
21846 #define ARM_VARIANT & arm_ext_sb
21847 #undef THUMB_VARIANT
21848 #define THUMB_VARIANT & arm_ext_sb
21849 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
21852 #define ARM_VARIANT & arm_ext_predres
21853 #undef THUMB_VARIANT
21854 #define THUMB_VARIANT & arm_ext_predres
21855 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
21856 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
21857 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
21859 /* ARMv8-M instructions. */
21861 #define ARM_VARIANT NULL
21862 #undef THUMB_VARIANT
21863 #define THUMB_VARIANT & arm_ext_v8m
21864 ToU("sg", e97fe97f
, 0, (), noargs
),
21865 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
21866 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
21867 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
21868 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
21869 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
21870 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
21872 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21873 instructions behave as nop if no VFP is present. */
21874 #undef THUMB_VARIANT
21875 #define THUMB_VARIANT & arm_ext_v8m_main
21876 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
21877 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
21879 /* Armv8.1-M Mainline instructions. */
21880 #undef THUMB_VARIANT
21881 #define THUMB_VARIANT & arm_ext_v8_1m_main
21882 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
21883 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
21884 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
21885 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
21886 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
21888 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
21889 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
21890 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
21892 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
)
21895 #undef THUMB_VARIANT
21927 /* MD interface: bits in the object file. */
21929 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21930 for use in the a.out file, and stores them in the array pointed to by buf.
21931 This knows about the endian-ness of the target machine and does
21932 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21933 2 (short) and 4 (long) Floating numbers are put out as a series of
21934 LITTLENUMS (shorts, here at least). */
21937 md_number_to_chars (char * buf
, valueT val
, int n
)
21939 if (target_big_endian
)
21940 number_to_chars_bigendian (buf
, val
, n
);
21942 number_to_chars_littleendian (buf
, val
, n
);
21946 md_chars_to_number (char * buf
, int n
)
21949 unsigned char * where
= (unsigned char *) buf
;
21951 if (target_big_endian
)
21956 result
|= (*where
++ & 255);
21964 result
|= (where
[n
] & 255);
21971 /* MD interface: Sections. */
21973 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21974 that an rs_machine_dependent frag may reach. */
21977 arm_frag_max_var (fragS
*fragp
)
21979 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21980 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21982 Note that we generate relaxable instructions even for cases that don't
21983 really need it, like an immediate that's a trivial constant. So we're
21984 overestimating the instruction size for some of those cases. Rather
21985 than putting more intelligence here, it would probably be better to
21986 avoid generating a relaxation frag in the first place when it can be
21987 determined up front that a short instruction will suffice. */
21989 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
21993 /* Estimate the size of a frag before relaxing. Assume everything fits in
21997 md_estimate_size_before_relax (fragS
* fragp
,
21998 segT segtype ATTRIBUTE_UNUSED
)
22004 /* Convert a machine dependent frag. */
22007 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
22009 unsigned long insn
;
22010 unsigned long old_op
;
22018 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
22020 old_op
= bfd_get_16(abfd
, buf
);
22021 if (fragp
->fr_symbol
)
22023 exp
.X_op
= O_symbol
;
22024 exp
.X_add_symbol
= fragp
->fr_symbol
;
22028 exp
.X_op
= O_constant
;
22030 exp
.X_add_number
= fragp
->fr_offset
;
22031 opcode
= fragp
->fr_subtype
;
22034 case T_MNEM_ldr_pc
:
22035 case T_MNEM_ldr_pc2
:
22036 case T_MNEM_ldr_sp
:
22037 case T_MNEM_str_sp
:
22044 if (fragp
->fr_var
== 4)
22046 insn
= THUMB_OP32 (opcode
);
22047 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
22049 insn
|= (old_op
& 0x700) << 4;
22053 insn
|= (old_op
& 7) << 12;
22054 insn
|= (old_op
& 0x38) << 13;
22056 insn
|= 0x00000c00;
22057 put_thumb32_insn (buf
, insn
);
22058 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
22062 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
22064 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
22067 if (fragp
->fr_var
== 4)
22069 insn
= THUMB_OP32 (opcode
);
22070 insn
|= (old_op
& 0xf0) << 4;
22071 put_thumb32_insn (buf
, insn
);
22072 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
22076 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22077 exp
.X_add_number
-= 4;
22085 if (fragp
->fr_var
== 4)
22087 int r0off
= (opcode
== T_MNEM_mov
22088 || opcode
== T_MNEM_movs
) ? 0 : 8;
22089 insn
= THUMB_OP32 (opcode
);
22090 insn
= (insn
& 0xe1ffffff) | 0x10000000;
22091 insn
|= (old_op
& 0x700) << r0off
;
22092 put_thumb32_insn (buf
, insn
);
22093 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
22097 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
22102 if (fragp
->fr_var
== 4)
22104 insn
= THUMB_OP32(opcode
);
22105 put_thumb32_insn (buf
, insn
);
22106 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
22109 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
22113 if (fragp
->fr_var
== 4)
22115 insn
= THUMB_OP32(opcode
);
22116 insn
|= (old_op
& 0xf00) << 14;
22117 put_thumb32_insn (buf
, insn
);
22118 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
22121 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
22124 case T_MNEM_add_sp
:
22125 case T_MNEM_add_pc
:
22126 case T_MNEM_inc_sp
:
22127 case T_MNEM_dec_sp
:
22128 if (fragp
->fr_var
== 4)
22130 /* ??? Choose between add and addw. */
22131 insn
= THUMB_OP32 (opcode
);
22132 insn
|= (old_op
& 0xf0) << 4;
22133 put_thumb32_insn (buf
, insn
);
22134 if (opcode
== T_MNEM_add_pc
)
22135 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
22137 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
22140 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22148 if (fragp
->fr_var
== 4)
22150 insn
= THUMB_OP32 (opcode
);
22151 insn
|= (old_op
& 0xf0) << 4;
22152 insn
|= (old_op
& 0xf) << 16;
22153 put_thumb32_insn (buf
, insn
);
22154 if (insn
& (1 << 20))
22155 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
22157 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
22160 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22166 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
22167 (enum bfd_reloc_code_real
) reloc_type
);
22168 fixp
->fx_file
= fragp
->fr_file
;
22169 fixp
->fx_line
= fragp
->fr_line
;
22170 fragp
->fr_fix
+= fragp
->fr_var
;
22172 /* Set whether we use thumb-2 ISA based on final relaxation results. */
22173 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
22174 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
22175 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
22178 /* Return the size of a relaxable immediate operand instruction.
22179 SHIFT and SIZE specify the form of the allowable immediate. */
22181 relax_immediate (fragS
*fragp
, int size
, int shift
)
22187 /* ??? Should be able to do better than this. */
22188 if (fragp
->fr_symbol
)
22191 low
= (1 << shift
) - 1;
22192 mask
= (1 << (shift
+ size
)) - (1 << shift
);
22193 offset
= fragp
->fr_offset
;
22194 /* Force misaligned offsets to 32-bit variant. */
22197 if (offset
& ~mask
)
22202 /* Get the address of a symbol during relaxation. */
22204 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
22210 sym
= fragp
->fr_symbol
;
22211 sym_frag
= symbol_get_frag (sym
);
22212 know (S_GET_SEGMENT (sym
) != absolute_section
22213 || sym_frag
== &zero_address_frag
);
22214 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
22216 /* If frag has yet to be reached on this pass, assume it will
22217 move by STRETCH just as we did. If this is not so, it will
22218 be because some frag between grows, and that will force
22222 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
22226 /* Adjust stretch for any alignment frag. Note that if have
22227 been expanding the earlier code, the symbol may be
22228 defined in what appears to be an earlier frag. FIXME:
22229 This doesn't handle the fr_subtype field, which specifies
22230 a maximum number of bytes to skip when doing an
22232 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
22234 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
22237 stretch
= - ((- stretch
)
22238 & ~ ((1 << (int) f
->fr_offset
) - 1));
22240 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
22252 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
22255 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
22260 /* Assume worst case for symbols not known to be in the same section. */
22261 if (fragp
->fr_symbol
== NULL
22262 || !S_IS_DEFINED (fragp
->fr_symbol
)
22263 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
22264 || S_IS_WEAK (fragp
->fr_symbol
))
22267 val
= relaxed_symbol_addr (fragp
, stretch
);
22268 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
22269 addr
= (addr
+ 4) & ~3;
22270 /* Force misaligned targets to 32-bit variant. */
22274 if (val
< 0 || val
> 1020)
22279 /* Return the size of a relaxable add/sub immediate instruction. */
22281 relax_addsub (fragS
*fragp
, asection
*sec
)
22286 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
22287 op
= bfd_get_16(sec
->owner
, buf
);
22288 if ((op
& 0xf) == ((op
>> 4) & 0xf))
22289 return relax_immediate (fragp
, 8, 0);
22291 return relax_immediate (fragp
, 3, 0);
22294 /* Return TRUE iff the definition of symbol S could be pre-empted
22295 (overridden) at link or load time. */
22297 symbol_preemptible (symbolS
*s
)
22299 /* Weak symbols can always be pre-empted. */
22303 /* Non-global symbols cannot be pre-empted. */
22304 if (! S_IS_EXTERNAL (s
))
22308 /* In ELF, a global symbol can be marked protected, or private. In that
22309 case it can't be pre-empted (other definitions in the same link unit
22310 would violate the ODR). */
22311 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
22315 /* Other global symbols might be pre-empted. */
22319 /* Return the size of a relaxable branch instruction. BITS is the
22320 size of the offset field in the narrow instruction. */
22323 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
22329 /* Assume worst case for symbols not known to be in the same section. */
22330 if (!S_IS_DEFINED (fragp
->fr_symbol
)
22331 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
22332 || S_IS_WEAK (fragp
->fr_symbol
))
22336 /* A branch to a function in ARM state will require interworking. */
22337 if (S_IS_DEFINED (fragp
->fr_symbol
)
22338 && ARM_IS_FUNC (fragp
->fr_symbol
))
22342 if (symbol_preemptible (fragp
->fr_symbol
))
22345 val
= relaxed_symbol_addr (fragp
, stretch
);
22346 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
22349 /* Offset is a signed value *2 */
22351 if (val
>= limit
|| val
< -limit
)
22357 /* Relax a machine dependent frag. This returns the amount by which
22358 the current size of the frag should change. */
22361 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
22366 oldsize
= fragp
->fr_var
;
22367 switch (fragp
->fr_subtype
)
22369 case T_MNEM_ldr_pc2
:
22370 newsize
= relax_adr (fragp
, sec
, stretch
);
22372 case T_MNEM_ldr_pc
:
22373 case T_MNEM_ldr_sp
:
22374 case T_MNEM_str_sp
:
22375 newsize
= relax_immediate (fragp
, 8, 2);
22379 newsize
= relax_immediate (fragp
, 5, 2);
22383 newsize
= relax_immediate (fragp
, 5, 1);
22387 newsize
= relax_immediate (fragp
, 5, 0);
22390 newsize
= relax_adr (fragp
, sec
, stretch
);
22396 newsize
= relax_immediate (fragp
, 8, 0);
22399 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
22402 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
22404 case T_MNEM_add_sp
:
22405 case T_MNEM_add_pc
:
22406 newsize
= relax_immediate (fragp
, 8, 2);
22408 case T_MNEM_inc_sp
:
22409 case T_MNEM_dec_sp
:
22410 newsize
= relax_immediate (fragp
, 7, 2);
22416 newsize
= relax_addsub (fragp
, sec
);
22422 fragp
->fr_var
= newsize
;
22423 /* Freeze wide instructions that are at or before the same location as
22424 in the previous pass. This avoids infinite loops.
22425 Don't freeze them unconditionally because targets may be artificially
22426 misaligned by the expansion of preceding frags. */
22427 if (stretch
<= 0 && newsize
> 2)
22429 md_convert_frag (sec
->owner
, sec
, fragp
);
22433 return newsize
- oldsize
;
22436 /* Round up a section size to the appropriate boundary. */
22439 md_section_align (segT segment ATTRIBUTE_UNUSED
,
22445 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22446 of an rs_align_code fragment. */
22449 arm_handle_align (fragS
* fragP
)
22451 static unsigned char const arm_noop
[2][2][4] =
22454 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22455 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22458 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22459 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22462 static unsigned char const thumb_noop
[2][2][2] =
22465 {0xc0, 0x46}, /* LE */
22466 {0x46, 0xc0}, /* BE */
22469 {0x00, 0xbf}, /* LE */
22470 {0xbf, 0x00} /* BE */
22473 static unsigned char const wide_thumb_noop
[2][4] =
22474 { /* Wide Thumb-2 */
22475 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22476 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22479 unsigned bytes
, fix
, noop_size
;
22481 const unsigned char * noop
;
22482 const unsigned char *narrow_noop
= NULL
;
22487 if (fragP
->fr_type
!= rs_align_code
)
22490 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
22491 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
22494 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
22495 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
22497 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
22499 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
22501 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
22502 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
22504 narrow_noop
= thumb_noop
[1][target_big_endian
];
22505 noop
= wide_thumb_noop
[target_big_endian
];
22508 noop
= thumb_noop
[0][target_big_endian
];
22516 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
22517 ? selected_cpu
: arm_arch_none
,
22519 [target_big_endian
];
22526 fragP
->fr_var
= noop_size
;
22528 if (bytes
& (noop_size
- 1))
22530 fix
= bytes
& (noop_size
- 1);
22532 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
22534 memset (p
, 0, fix
);
22541 if (bytes
& noop_size
)
22543 /* Insert a narrow noop. */
22544 memcpy (p
, narrow_noop
, noop_size
);
22546 bytes
-= noop_size
;
22550 /* Use wide noops for the remainder */
22554 while (bytes
>= noop_size
)
22556 memcpy (p
, noop
, noop_size
);
22558 bytes
-= noop_size
;
22562 fragP
->fr_fix
+= fix
;
22565 /* Called from md_do_align. Used to create an alignment
22566 frag in a code section. */
22569 arm_frag_align_code (int n
, int max
)
22573 /* We assume that there will never be a requirement
22574 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22575 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
22580 _("alignments greater than %d bytes not supported in .text sections."),
22581 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
22582 as_fatal ("%s", err_msg
);
22585 p
= frag_var (rs_align_code
,
22586 MAX_MEM_FOR_RS_ALIGN_CODE
,
22588 (relax_substateT
) max
,
22595 /* Perform target specific initialisation of a frag.
22596 Note - despite the name this initialisation is not done when the frag
22597 is created, but only when its type is assigned. A frag can be created
22598 and used a long time before its type is set, so beware of assuming that
22599 this initialisation is performed first. */
22603 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
22605 /* Record whether this frag is in an ARM or a THUMB area. */
22606 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22609 #else /* OBJ_ELF is defined. */
22611 arm_init_frag (fragS
* fragP
, int max_chars
)
22613 bfd_boolean frag_thumb_mode
;
22615 /* If the current ARM vs THUMB mode has not already
22616 been recorded into this frag then do so now. */
22617 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
22618 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22620 /* PR 21809: Do not set a mapping state for debug sections
22621 - it just confuses other tools. */
22622 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
22625 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
22627 /* Record a mapping symbol for alignment frags. We will delete this
22628 later if the alignment ends up empty. */
22629 switch (fragP
->fr_type
)
22632 case rs_align_test
:
22634 mapping_state_2 (MAP_DATA
, max_chars
);
22636 case rs_align_code
:
22637 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
22644 /* When we change sections we need to issue a new mapping symbol. */
22647 arm_elf_change_section (void)
22649 /* Link an unlinked unwind index table section to the .text section. */
22650 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
22651 && elf_linked_to_section (now_seg
) == NULL
)
22652 elf_linked_to_section (now_seg
) = text_section
;
22656 arm_elf_section_type (const char * str
, size_t len
)
22658 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
22659 return SHT_ARM_EXIDX
;
22664 /* Code to deal with unwinding tables. */
22666 static void add_unwind_adjustsp (offsetT
);
22668 /* Generate any deferred unwind frame offset. */
22671 flush_pending_unwind (void)
22675 offset
= unwind
.pending_offset
;
22676 unwind
.pending_offset
= 0;
22678 add_unwind_adjustsp (offset
);
22681 /* Add an opcode to this list for this function. Two-byte opcodes should
22682 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22686 add_unwind_opcode (valueT op
, int length
)
22688 /* Add any deferred stack adjustment. */
22689 if (unwind
.pending_offset
)
22690 flush_pending_unwind ();
22692 unwind
.sp_restored
= 0;
22694 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
22696 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
22697 if (unwind
.opcodes
)
22698 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
22699 unwind
.opcode_alloc
);
22701 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
22706 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
22708 unwind
.opcode_count
++;
22712 /* Add unwind opcodes to adjust the stack pointer. */
22715 add_unwind_adjustsp (offsetT offset
)
22719 if (offset
> 0x200)
22721 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22726 /* Long form: 0xb2, uleb128. */
22727 /* This might not fit in a word so add the individual bytes,
22728 remembering the list is built in reverse order. */
22729 o
= (valueT
) ((offset
- 0x204) >> 2);
22731 add_unwind_opcode (0, 1);
22733 /* Calculate the uleb128 encoding of the offset. */
22737 bytes
[n
] = o
& 0x7f;
22743 /* Add the insn. */
22745 add_unwind_opcode (bytes
[n
- 1], 1);
22746 add_unwind_opcode (0xb2, 1);
22748 else if (offset
> 0x100)
22750 /* Two short opcodes. */
22751 add_unwind_opcode (0x3f, 1);
22752 op
= (offset
- 0x104) >> 2;
22753 add_unwind_opcode (op
, 1);
22755 else if (offset
> 0)
22757 /* Short opcode. */
22758 op
= (offset
- 4) >> 2;
22759 add_unwind_opcode (op
, 1);
22761 else if (offset
< 0)
22764 while (offset
> 0x100)
22766 add_unwind_opcode (0x7f, 1);
22769 op
= ((offset
- 4) >> 2) | 0x40;
22770 add_unwind_opcode (op
, 1);
22774 /* Finish the list of unwind opcodes for this function. */
22777 finish_unwind_opcodes (void)
22781 if (unwind
.fp_used
)
22783 /* Adjust sp as necessary. */
22784 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
22785 flush_pending_unwind ();
22787 /* After restoring sp from the frame pointer. */
22788 op
= 0x90 | unwind
.fp_reg
;
22789 add_unwind_opcode (op
, 1);
22792 flush_pending_unwind ();
22796 /* Start an exception table entry. If idx is nonzero this is an index table
22800 start_unwind_section (const segT text_seg
, int idx
)
22802 const char * text_name
;
22803 const char * prefix
;
22804 const char * prefix_once
;
22805 const char * group_name
;
22813 prefix
= ELF_STRING_ARM_unwind
;
22814 prefix_once
= ELF_STRING_ARM_unwind_once
;
22815 type
= SHT_ARM_EXIDX
;
22819 prefix
= ELF_STRING_ARM_unwind_info
;
22820 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
22821 type
= SHT_PROGBITS
;
22824 text_name
= segment_name (text_seg
);
22825 if (streq (text_name
, ".text"))
22828 if (strncmp (text_name
, ".gnu.linkonce.t.",
22829 strlen (".gnu.linkonce.t.")) == 0)
22831 prefix
= prefix_once
;
22832 text_name
+= strlen (".gnu.linkonce.t.");
22835 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
22841 /* Handle COMDAT group. */
22842 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
22844 group_name
= elf_group_name (text_seg
);
22845 if (group_name
== NULL
)
22847 as_bad (_("Group section `%s' has no group signature"),
22848 segment_name (text_seg
));
22849 ignore_rest_of_line ();
22852 flags
|= SHF_GROUP
;
22856 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
22859 /* Set the section link for index tables. */
22861 elf_linked_to_section (now_seg
) = text_seg
;
22865 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22866 personality routine data. Returns zero, or the index table value for
22867 an inline entry. */
22870 create_unwind_entry (int have_data
)
22875 /* The current word of data. */
22877 /* The number of bytes left in this word. */
22880 finish_unwind_opcodes ();
22882 /* Remember the current text section. */
22883 unwind
.saved_seg
= now_seg
;
22884 unwind
.saved_subseg
= now_subseg
;
22886 start_unwind_section (now_seg
, 0);
22888 if (unwind
.personality_routine
== NULL
)
22890 if (unwind
.personality_index
== -2)
22893 as_bad (_("handlerdata in cantunwind frame"));
22894 return 1; /* EXIDX_CANTUNWIND. */
22897 /* Use a default personality routine if none is specified. */
22898 if (unwind
.personality_index
== -1)
22900 if (unwind
.opcode_count
> 3)
22901 unwind
.personality_index
= 1;
22903 unwind
.personality_index
= 0;
22906 /* Space for the personality routine entry. */
22907 if (unwind
.personality_index
== 0)
22909 if (unwind
.opcode_count
> 3)
22910 as_bad (_("too many unwind opcodes for personality routine 0"));
22914 /* All the data is inline in the index table. */
22917 while (unwind
.opcode_count
> 0)
22919 unwind
.opcode_count
--;
22920 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22924 /* Pad with "finish" opcodes. */
22926 data
= (data
<< 8) | 0xb0;
22933 /* We get two opcodes "free" in the first word. */
22934 size
= unwind
.opcode_count
- 2;
22938 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22939 if (unwind
.personality_index
!= -1)
22941 as_bad (_("attempt to recreate an unwind entry"));
22945 /* An extra byte is required for the opcode count. */
22946 size
= unwind
.opcode_count
+ 1;
22949 size
= (size
+ 3) >> 2;
22951 as_bad (_("too many unwind opcodes"));
22953 frag_align (2, 0, 0);
22954 record_alignment (now_seg
, 2);
22955 unwind
.table_entry
= expr_build_dot ();
22957 /* Allocate the table entry. */
22958 ptr
= frag_more ((size
<< 2) + 4);
22959 /* PR 13449: Zero the table entries in case some of them are not used. */
22960 memset (ptr
, 0, (size
<< 2) + 4);
22961 where
= frag_now_fix () - ((size
<< 2) + 4);
22963 switch (unwind
.personality_index
)
22966 /* ??? Should this be a PLT generating relocation? */
22967 /* Custom personality routine. */
22968 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
22969 BFD_RELOC_ARM_PREL31
);
22974 /* Set the first byte to the number of additional words. */
22975 data
= size
> 0 ? size
- 1 : 0;
22979 /* ABI defined personality routines. */
22981 /* Three opcodes bytes are packed into the first word. */
22988 /* The size and first two opcode bytes go in the first word. */
22989 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
22994 /* Should never happen. */
22998 /* Pack the opcodes into words (MSB first), reversing the list at the same
23000 while (unwind
.opcode_count
> 0)
23004 md_number_to_chars (ptr
, data
, 4);
23009 unwind
.opcode_count
--;
23011 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23014 /* Finish off the last word. */
23017 /* Pad with "finish" opcodes. */
23019 data
= (data
<< 8) | 0xb0;
23021 md_number_to_chars (ptr
, data
, 4);
23026 /* Add an empty descriptor if there is no user-specified data. */
23027 ptr
= frag_more (4);
23028 md_number_to_chars (ptr
, 0, 4);
23035 /* Initialize the DWARF-2 unwind information for this procedure. */
23038 tc_arm_frame_initial_instructions (void)
23040 cfi_add_CFA_def_cfa (REG_SP
, 0);
23042 #endif /* OBJ_ELF */
23044 /* Convert REGNAME to a DWARF-2 register number. */
23047 tc_arm_regname_to_dw2regnum (char *regname
)
23049 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
23053 /* PR 16694: Allow VFP registers as well. */
23054 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
23058 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
23067 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
23071 exp
.X_op
= O_secrel
;
23072 exp
.X_add_symbol
= symbol
;
23073 exp
.X_add_number
= 0;
23074 emit_expr (&exp
, size
);
23078 /* MD interface: Symbol and relocation handling. */
23080 /* Return the address within the segment that a PC-relative fixup is
23081 relative to. For ARM, PC-relative fixups applied to instructions
23082 are generally relative to the location of the fixup plus 8 bytes.
23083 Thumb branches are offset by 4, and Thumb loads relative to PC
23084 require special handling. */
23087 md_pcrel_from_section (fixS
* fixP
, segT seg
)
23089 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23091 /* If this is pc-relative and we are going to emit a relocation
23092 then we just want to put out any pipeline compensation that the linker
23093 will need. Otherwise we want to use the calculated base.
23094 For WinCE we skip the bias for externals as well, since this
23095 is how the MS ARM-CE assembler behaves and we want to be compatible. */
23097 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23098 || (arm_force_relocation (fixP
)
23100 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
23106 switch (fixP
->fx_r_type
)
23108 /* PC relative addressing on the Thumb is slightly odd as the
23109 bottom two bits of the PC are forced to zero for the
23110 calculation. This happens *after* application of the
23111 pipeline offset. However, Thumb adrl already adjusts for
23112 this, so we need not do it again. */
23113 case BFD_RELOC_ARM_THUMB_ADD
:
23116 case BFD_RELOC_ARM_THUMB_OFFSET
:
23117 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
23118 case BFD_RELOC_ARM_T32_ADD_PC12
:
23119 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23120 return (base
+ 4) & ~3;
23122 /* Thumb branches are simply offset by +4. */
23123 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
23124 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23125 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23126 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23127 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23128 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23129 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
23130 case BFD_RELOC_ARM_THUMB_BF17
:
23131 case BFD_RELOC_ARM_THUMB_BF19
:
23132 case BFD_RELOC_ARM_THUMB_BF13
:
23133 case BFD_RELOC_ARM_THUMB_LOOP12
:
23136 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23138 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23139 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23140 && ARM_IS_FUNC (fixP
->fx_addsy
)
23141 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23142 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23145 /* BLX is like branches above, but forces the low two bits of PC to
23147 case BFD_RELOC_THUMB_PCREL_BLX
:
23149 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23150 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23151 && THUMB_IS_FUNC (fixP
->fx_addsy
)
23152 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23153 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23154 return (base
+ 4) & ~3;
23156 /* ARM mode branches are offset by +8. However, the Windows CE
23157 loader expects the relocation not to take this into account. */
23158 case BFD_RELOC_ARM_PCREL_BLX
:
23160 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23161 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23162 && ARM_IS_FUNC (fixP
->fx_addsy
)
23163 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23164 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23167 case BFD_RELOC_ARM_PCREL_CALL
:
23169 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23170 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23171 && THUMB_IS_FUNC (fixP
->fx_addsy
)
23172 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23173 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23176 case BFD_RELOC_ARM_PCREL_BRANCH
:
23177 case BFD_RELOC_ARM_PCREL_JUMP
:
23178 case BFD_RELOC_ARM_PLT32
:
23180 /* When handling fixups immediately, because we have already
23181 discovered the value of a symbol, or the address of the frag involved
23182 we must account for the offset by +8, as the OS loader will never see the reloc.
23183 see fixup_segment() in write.c
23184 The S_IS_EXTERNAL test handles the case of global symbols.
23185 Those need the calculated base, not just the pipe compensation the linker will need. */
23187 && fixP
->fx_addsy
!= NULL
23188 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23189 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
23197 /* ARM mode loads relative to PC are also offset by +8. Unlike
23198 branches, the Windows CE loader *does* expect the relocation
23199 to take this into account. */
23200 case BFD_RELOC_ARM_OFFSET_IMM
:
23201 case BFD_RELOC_ARM_OFFSET_IMM8
:
23202 case BFD_RELOC_ARM_HWLITERAL
:
23203 case BFD_RELOC_ARM_LITERAL
:
23204 case BFD_RELOC_ARM_CP_OFF_IMM
:
23208 /* Other PC-relative relocations are un-offset. */
23214 static bfd_boolean flag_warn_syms
= TRUE
;
23217 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
23219 /* PR 18347 - Warn if the user attempts to create a symbol with the same
23220 name as an ARM instruction. Whilst strictly speaking it is allowed, it
23221 does mean that the resulting code might be very confusing to the reader.
23222 Also this warning can be triggered if the user omits an operand before
23223 an immediate address, eg:
23227 GAS treats this as an assignment of the value of the symbol foo to a
23228 symbol LDR, and so (without this code) it will not issue any kind of
23229 warning or error message.
23231 Note - ARM instructions are case-insensitive but the strings in the hash
23232 table are all stored in lower case, so we must first ensure that name is
23234 if (flag_warn_syms
&& arm_ops_hsh
)
23236 char * nbuf
= strdup (name
);
23239 for (p
= nbuf
; *p
; p
++)
23241 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
23243 static struct hash_control
* already_warned
= NULL
;
23245 if (already_warned
== NULL
)
23246 already_warned
= hash_new ();
23247 /* Only warn about the symbol once. To keep the code
23248 simple we let hash_insert do the lookup for us. */
23249 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
23250 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
23259 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
23260 Otherwise we have no need to default values of symbols. */
23263 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
23266 if (name
[0] == '_' && name
[1] == 'G'
23267 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
23271 if (symbol_find (name
))
23272 as_bad (_("GOT already in the symbol table"));
23274 GOT_symbol
= symbol_new (name
, undefined_section
,
23275 (valueT
) 0, & zero_address_frag
);
23285 /* Subroutine of md_apply_fix. Check to see if an immediate can be
23286 computed as two separate immediate values, added together. We
23287 already know that this value cannot be computed by just one ARM
23290 static unsigned int
23291 validate_immediate_twopart (unsigned int val
,
23292 unsigned int * highpart
)
23297 for (i
= 0; i
< 32; i
+= 2)
23298 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
23304 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
23306 else if (a
& 0xff0000)
23308 if (a
& 0xff000000)
23310 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
23314 gas_assert (a
& 0xff000000);
23315 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
23318 return (a
& 0xff) | (i
<< 7);
23325 validate_offset_imm (unsigned int val
, int hwse
)
23327 if ((hwse
&& val
> 255) || val
> 4095)
23332 /* Subroutine of md_apply_fix. Do those data_ops which can take a
23333 negative immediate constant by altering the instruction. A bit of
23338 by inverting the second operand, and
23341 by negating the second operand. */
23344 negate_data_op (unsigned long * instruction
,
23345 unsigned long value
)
23348 unsigned long negated
, inverted
;
23350 negated
= encode_arm_immediate (-value
);
23351 inverted
= encode_arm_immediate (~value
);
23353 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
23356 /* First negates. */
23357 case OPCODE_SUB
: /* ADD <-> SUB */
23358 new_inst
= OPCODE_ADD
;
23363 new_inst
= OPCODE_SUB
;
23367 case OPCODE_CMP
: /* CMP <-> CMN */
23368 new_inst
= OPCODE_CMN
;
23373 new_inst
= OPCODE_CMP
;
23377 /* Now Inverted ops. */
23378 case OPCODE_MOV
: /* MOV <-> MVN */
23379 new_inst
= OPCODE_MVN
;
23384 new_inst
= OPCODE_MOV
;
23388 case OPCODE_AND
: /* AND <-> BIC */
23389 new_inst
= OPCODE_BIC
;
23394 new_inst
= OPCODE_AND
;
23398 case OPCODE_ADC
: /* ADC <-> SBC */
23399 new_inst
= OPCODE_SBC
;
23404 new_inst
= OPCODE_ADC
;
23408 /* We cannot do anything. */
23413 if (value
== (unsigned) FAIL
)
23416 *instruction
&= OPCODE_MASK
;
23417 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
23421 /* Like negate_data_op, but for Thumb-2. */
23423 static unsigned int
23424 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
23428 unsigned int negated
, inverted
;
23430 negated
= encode_thumb32_immediate (-value
);
23431 inverted
= encode_thumb32_immediate (~value
);
23433 rd
= (*instruction
>> 8) & 0xf;
23434 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
23437 /* ADD <-> SUB. Includes CMP <-> CMN. */
23438 case T2_OPCODE_SUB
:
23439 new_inst
= T2_OPCODE_ADD
;
23443 case T2_OPCODE_ADD
:
23444 new_inst
= T2_OPCODE_SUB
;
23448 /* ORR <-> ORN. Includes MOV <-> MVN. */
23449 case T2_OPCODE_ORR
:
23450 new_inst
= T2_OPCODE_ORN
;
23454 case T2_OPCODE_ORN
:
23455 new_inst
= T2_OPCODE_ORR
;
23459 /* AND <-> BIC. TST has no inverted equivalent. */
23460 case T2_OPCODE_AND
:
23461 new_inst
= T2_OPCODE_BIC
;
23468 case T2_OPCODE_BIC
:
23469 new_inst
= T2_OPCODE_AND
;
23474 case T2_OPCODE_ADC
:
23475 new_inst
= T2_OPCODE_SBC
;
23479 case T2_OPCODE_SBC
:
23480 new_inst
= T2_OPCODE_ADC
;
23484 /* We cannot do anything. */
23489 if (value
== (unsigned int)FAIL
)
23492 *instruction
&= T2_OPCODE_MASK
;
23493 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
23497 /* Read a 32-bit thumb instruction from buf. */
23499 static unsigned long
23500 get_thumb32_insn (char * buf
)
23502 unsigned long insn
;
23503 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
23504 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23509 /* We usually want to set the low bit on the address of thumb function
23510 symbols. In particular .word foo - . should have the low bit set.
23511 Generic code tries to fold the difference of two symbols to
23512 a constant. Prevent this and force a relocation when the first symbols
23513 is a thumb function. */
23516 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
23518 if (op
== O_subtract
23519 && l
->X_op
== O_symbol
23520 && r
->X_op
== O_symbol
23521 && THUMB_IS_FUNC (l
->X_add_symbol
))
23523 l
->X_op
= O_subtract
;
23524 l
->X_op_symbol
= r
->X_add_symbol
;
23525 l
->X_add_number
-= r
->X_add_number
;
23529 /* Process as normal. */
23533 /* Encode Thumb2 unconditional branches and calls. The encoding
23534 for the 2 are identical for the immediate values. */
23537 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
23539 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23542 addressT S
, I1
, I2
, lo
, hi
;
23544 S
= (value
>> 24) & 0x01;
23545 I1
= (value
>> 23) & 0x01;
23546 I2
= (value
>> 22) & 0x01;
23547 hi
= (value
>> 12) & 0x3ff;
23548 lo
= (value
>> 1) & 0x7ff;
23549 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23550 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23551 newval
|= (S
<< 10) | hi
;
23552 newval2
&= ~T2I1I2MASK
;
23553 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
23554 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23555 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23559 md_apply_fix (fixS
* fixP
,
23563 offsetT value
= * valP
;
23565 unsigned int newimm
;
23566 unsigned long temp
;
23568 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
23570 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
23572 /* Note whether this will delete the relocation. */
23574 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
23577 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23578 consistency with the behaviour on 32-bit hosts. Remember value
23580 value
&= 0xffffffff;
23581 value
^= 0x80000000;
23582 value
-= 0x80000000;
23585 fixP
->fx_addnumber
= value
;
23587 /* Same treatment for fixP->fx_offset. */
23588 fixP
->fx_offset
&= 0xffffffff;
23589 fixP
->fx_offset
^= 0x80000000;
23590 fixP
->fx_offset
-= 0x80000000;
23592 switch (fixP
->fx_r_type
)
23594 case BFD_RELOC_NONE
:
23595 /* This will need to go in the object file. */
23599 case BFD_RELOC_ARM_IMMEDIATE
:
23600 /* We claim that this fixup has been processed here,
23601 even if in fact we generate an error because we do
23602 not have a reloc for it, so tc_gen_reloc will reject it. */
23605 if (fixP
->fx_addsy
)
23607 const char *msg
= 0;
23609 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23610 msg
= _("undefined symbol %s used as an immediate value");
23611 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23612 msg
= _("symbol %s is in a different section");
23613 else if (S_IS_WEAK (fixP
->fx_addsy
))
23614 msg
= _("symbol %s is weak and may be overridden later");
23618 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23619 msg
, S_GET_NAME (fixP
->fx_addsy
));
23624 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23626 /* If the offset is negative, we should use encoding A2 for ADR. */
23627 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
23628 newimm
= negate_data_op (&temp
, value
);
23631 newimm
= encode_arm_immediate (value
);
23633 /* If the instruction will fail, see if we can fix things up by
23634 changing the opcode. */
23635 if (newimm
== (unsigned int) FAIL
)
23636 newimm
= negate_data_op (&temp
, value
);
23637 /* MOV accepts both ARM modified immediate (A1 encoding) and
23638 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23639 When disassembling, MOV is preferred when there is no encoding
23641 if (newimm
== (unsigned int) FAIL
23642 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
23643 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
23644 && !((temp
>> SBIT_SHIFT
) & 0x1)
23645 && value
>= 0 && value
<= 0xffff)
23647 /* Clear bits[23:20] to change encoding from A1 to A2. */
23648 temp
&= 0xff0fffff;
23649 /* Encoding high 4bits imm. Code below will encode the remaining
23651 temp
|= (value
& 0x0000f000) << 4;
23652 newimm
= value
& 0x00000fff;
23656 if (newimm
== (unsigned int) FAIL
)
23658 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23659 _("invalid constant (%lx) after fixup"),
23660 (unsigned long) value
);
23664 newimm
|= (temp
& 0xfffff000);
23665 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23668 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23670 unsigned int highpart
= 0;
23671 unsigned int newinsn
= 0xe1a00000; /* nop. */
23673 if (fixP
->fx_addsy
)
23675 const char *msg
= 0;
23677 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23678 msg
= _("undefined symbol %s used as an immediate value");
23679 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23680 msg
= _("symbol %s is in a different section");
23681 else if (S_IS_WEAK (fixP
->fx_addsy
))
23682 msg
= _("symbol %s is weak and may be overridden later");
23686 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23687 msg
, S_GET_NAME (fixP
->fx_addsy
));
23692 newimm
= encode_arm_immediate (value
);
23693 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23695 /* If the instruction will fail, see if we can fix things up by
23696 changing the opcode. */
23697 if (newimm
== (unsigned int) FAIL
23698 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
23700 /* No ? OK - try using two ADD instructions to generate
23702 newimm
= validate_immediate_twopart (value
, & highpart
);
23704 /* Yes - then make sure that the second instruction is
23706 if (newimm
!= (unsigned int) FAIL
)
23708 /* Still No ? Try using a negated value. */
23709 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
23710 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
23711 /* Otherwise - give up. */
23714 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23715 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23720 /* Replace the first operand in the 2nd instruction (which
23721 is the PC) with the destination register. We have
23722 already added in the PC in the first instruction and we
23723 do not want to do it again. */
23724 newinsn
&= ~ 0xf0000;
23725 newinsn
|= ((newinsn
& 0x0f000) << 4);
23728 newimm
|= (temp
& 0xfffff000);
23729 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23731 highpart
|= (newinsn
& 0xfffff000);
23732 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
23736 case BFD_RELOC_ARM_OFFSET_IMM
:
23737 if (!fixP
->fx_done
&& seg
->use_rela_p
)
23739 /* Fall through. */
23741 case BFD_RELOC_ARM_LITERAL
:
23747 if (validate_offset_imm (value
, 0) == FAIL
)
23749 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
23750 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23751 _("invalid literal constant: pool needs to be closer"));
23753 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23754 _("bad immediate value for offset (%ld)"),
23759 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23761 newval
&= 0xfffff000;
23764 newval
&= 0xff7ff000;
23765 newval
|= value
| (sign
? INDEX_UP
: 0);
23767 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23770 case BFD_RELOC_ARM_OFFSET_IMM8
:
23771 case BFD_RELOC_ARM_HWLITERAL
:
23777 if (validate_offset_imm (value
, 1) == FAIL
)
23779 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
23780 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23781 _("invalid literal constant: pool needs to be closer"));
23783 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23784 _("bad immediate value for 8-bit offset (%ld)"),
23789 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23791 newval
&= 0xfffff0f0;
23794 newval
&= 0xff7ff0f0;
23795 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
23797 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23800 case BFD_RELOC_ARM_T32_OFFSET_U8
:
23801 if (value
< 0 || value
> 1020 || value
% 4 != 0)
23802 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23803 _("bad immediate value for offset (%ld)"), (long) value
);
23806 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
23808 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
23811 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
23812 /* This is a complicated relocation used for all varieties of Thumb32
23813 load/store instruction with immediate offset:
23815 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23816 *4, optional writeback(W)
23817 (doubleword load/store)
23819 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23820 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23821 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23822 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23823 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23825 Uppercase letters indicate bits that are already encoded at
23826 this point. Lowercase letters are our problem. For the
23827 second block of instructions, the secondary opcode nybble
23828 (bits 8..11) is present, and bit 23 is zero, even if this is
23829 a PC-relative operation. */
23830 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23832 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
23834 if ((newval
& 0xf0000000) == 0xe0000000)
23836 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23838 newval
|= (1 << 23);
23841 if (value
% 4 != 0)
23843 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23844 _("offset not a multiple of 4"));
23850 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23851 _("offset out of range"));
23856 else if ((newval
& 0x000f0000) == 0x000f0000)
23858 /* PC-relative, 12-bit offset. */
23860 newval
|= (1 << 23);
23865 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23866 _("offset out of range"));
23871 else if ((newval
& 0x00000100) == 0x00000100)
23873 /* Writeback: 8-bit, +/- offset. */
23875 newval
|= (1 << 9);
23880 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23881 _("offset out of range"));
23886 else if ((newval
& 0x00000f00) == 0x00000e00)
23888 /* T-instruction: positive 8-bit offset. */
23889 if (value
< 0 || value
> 0xff)
23891 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23892 _("offset out of range"));
23900 /* Positive 12-bit or negative 8-bit offset. */
23904 newval
|= (1 << 23);
23914 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23915 _("offset out of range"));
23922 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
23923 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
23926 case BFD_RELOC_ARM_SHIFT_IMM
:
23927 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23928 if (((unsigned long) value
) > 32
23930 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
23932 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23933 _("shift expression is too large"));
23938 /* Shifts of zero must be done as lsl. */
23940 else if (value
== 32)
23942 newval
&= 0xfffff07f;
23943 newval
|= (value
& 0x1f) << 7;
23944 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23947 case BFD_RELOC_ARM_T32_IMMEDIATE
:
23948 case BFD_RELOC_ARM_T32_ADD_IMM
:
23949 case BFD_RELOC_ARM_T32_IMM12
:
23950 case BFD_RELOC_ARM_T32_ADD_PC12
:
23951 /* We claim that this fixup has been processed here,
23952 even if in fact we generate an error because we do
23953 not have a reloc for it, so tc_gen_reloc will reject it. */
23957 && ! S_IS_DEFINED (fixP
->fx_addsy
))
23959 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23960 _("undefined symbol %s used as an immediate value"),
23961 S_GET_NAME (fixP
->fx_addsy
));
23965 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23967 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
23970 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
23971 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23972 Thumb2 modified immediate encoding (T2). */
23973 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
23974 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23976 newimm
= encode_thumb32_immediate (value
);
23977 if (newimm
== (unsigned int) FAIL
)
23978 newimm
= thumb32_negate_data_op (&newval
, value
);
23980 if (newimm
== (unsigned int) FAIL
)
23982 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
23984 /* Turn add/sum into addw/subw. */
23985 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23986 newval
= (newval
& 0xfeffffff) | 0x02000000;
23987 /* No flat 12-bit imm encoding for addsw/subsw. */
23988 if ((newval
& 0x00100000) == 0)
23990 /* 12 bit immediate for addw/subw. */
23994 newval
^= 0x00a00000;
23997 newimm
= (unsigned int) FAIL
;
24004 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24005 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24006 disassembling, MOV is preferred when there is no encoding
24008 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
24009 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24010 but with the Rn field [19:16] set to 1111. */
24011 && (((newval
>> 16) & 0xf) == 0xf)
24012 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
24013 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
24014 && value
>= 0 && value
<= 0xffff)
24016 /* Toggle bit[25] to change encoding from T2 to T3. */
24018 /* Clear bits[19:16]. */
24019 newval
&= 0xfff0ffff;
24020 /* Encoding high 4bits imm. Code below will encode the
24021 remaining low 12bits. */
24022 newval
|= (value
& 0x0000f000) << 4;
24023 newimm
= value
& 0x00000fff;
24028 if (newimm
== (unsigned int)FAIL
)
24030 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24031 _("invalid constant (%lx) after fixup"),
24032 (unsigned long) value
);
24036 newval
|= (newimm
& 0x800) << 15;
24037 newval
|= (newimm
& 0x700) << 4;
24038 newval
|= (newimm
& 0x0ff);
24040 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
24041 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
24044 case BFD_RELOC_ARM_SMC
:
24045 if (((unsigned long) value
) > 0xffff)
24046 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24047 _("invalid smc expression"));
24048 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24049 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24050 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24053 case BFD_RELOC_ARM_HVC
:
24054 if (((unsigned long) value
) > 0xffff)
24055 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24056 _("invalid hvc expression"));
24057 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24058 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24059 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24062 case BFD_RELOC_ARM_SWI
:
24063 if (fixP
->tc_fix_data
!= 0)
24065 if (((unsigned long) value
) > 0xff)
24066 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24067 _("invalid swi expression"));
24068 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24070 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24074 if (((unsigned long) value
) > 0x00ffffff)
24075 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24076 _("invalid swi expression"));
24077 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24079 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24083 case BFD_RELOC_ARM_MULTI
:
24084 if (((unsigned long) value
) > 0xffff)
24085 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24086 _("invalid expression in load/store multiple"));
24087 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
24088 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24092 case BFD_RELOC_ARM_PCREL_CALL
:
24094 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24096 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24097 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24098 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24099 /* Flip the bl to blx. This is a simple flip
24100 bit here because we generate PCREL_CALL for
24101 unconditional bls. */
24103 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24104 newval
= newval
| 0x10000000;
24105 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24111 goto arm_branch_common
;
24113 case BFD_RELOC_ARM_PCREL_JUMP
:
24114 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24116 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24117 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24118 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24120 /* This would map to a bl<cond>, b<cond>,
24121 b<always> to a Thumb function. We
24122 need to force a relocation for this particular
24124 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24127 /* Fall through. */
24129 case BFD_RELOC_ARM_PLT32
:
24131 case BFD_RELOC_ARM_PCREL_BRANCH
:
24133 goto arm_branch_common
;
24135 case BFD_RELOC_ARM_PCREL_BLX
:
24138 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24140 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24141 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24142 && ARM_IS_FUNC (fixP
->fx_addsy
))
24144 /* Flip the blx to a bl and warn. */
24145 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
24146 newval
= 0xeb000000;
24147 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
24148 _("blx to '%s' an ARM ISA state function changed to bl"),
24150 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24156 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
24157 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
24161 /* We are going to store value (shifted right by two) in the
24162 instruction, in a 24 bit, signed field. Bits 26 through 32 either
24163 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
24166 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24167 _("misaligned branch destination"));
24168 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
24169 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
24170 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24172 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24174 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24175 newval
|= (value
>> 2) & 0x00ffffff;
24176 /* Set the H bit on BLX instructions. */
24180 newval
|= 0x01000000;
24182 newval
&= ~0x01000000;
24184 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24188 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
24189 /* CBZ can only branch forward. */
24191 /* Attempts to use CBZ to branch to the next instruction
24192 (which, strictly speaking, are prohibited) will be turned into
24195 FIXME: It may be better to remove the instruction completely and
24196 perform relaxation. */
24199 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24200 newval
= 0xbf00; /* NOP encoding T1 */
24201 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24206 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24208 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24210 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24211 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
24212 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24217 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
24218 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
24219 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24221 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24223 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24224 newval
|= (value
& 0x1ff) >> 1;
24225 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24229 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
24230 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
24231 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24233 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24235 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24236 newval
|= (value
& 0xfff) >> 1;
24237 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24241 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24243 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24244 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24245 && ARM_IS_FUNC (fixP
->fx_addsy
)
24246 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24248 /* Force a relocation for a branch 20 bits wide. */
24251 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
24252 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24253 _("conditional branch out of range"));
24255 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24258 addressT S
, J1
, J2
, lo
, hi
;
24260 S
= (value
& 0x00100000) >> 20;
24261 J2
= (value
& 0x00080000) >> 19;
24262 J1
= (value
& 0x00040000) >> 18;
24263 hi
= (value
& 0x0003f000) >> 12;
24264 lo
= (value
& 0x00000ffe) >> 1;
24266 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24267 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24268 newval
|= (S
<< 10) | hi
;
24269 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
24270 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24271 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
24275 case BFD_RELOC_THUMB_PCREL_BLX
:
24276 /* If there is a blx from a thumb state function to
24277 another thumb function flip this to a bl and warn
24281 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24282 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24283 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24285 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
24286 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
24287 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
24289 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24290 newval
= newval
| 0x1000;
24291 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
24292 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24297 goto thumb_bl_common
;
24299 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24300 /* A bl from Thumb state ISA to an internal ARM state function
24301 is converted to a blx. */
24303 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24304 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24305 && ARM_IS_FUNC (fixP
->fx_addsy
)
24306 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24308 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24309 newval
= newval
& ~0x1000;
24310 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
24311 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
24317 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
24318 /* For a BLX instruction, make sure that the relocation is rounded up
24319 to a word boundary. This follows the semantics of the instruction
24320 which specifies that bit 1 of the target address will come from bit
24321 1 of the base address. */
24322 value
= (value
+ 3) & ~ 3;
24325 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
24326 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
24327 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24330 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
24332 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
24333 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24334 else if ((value
& ~0x1ffffff)
24335 && ((value
& ~0x1ffffff) != ~0x1ffffff))
24336 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24337 _("Thumb2 branch out of range"));
24340 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24341 encode_thumb2_b_bl_offset (buf
, value
);
24345 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24346 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
24347 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24349 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24350 encode_thumb2_b_bl_offset (buf
, value
);
24355 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24360 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24361 md_number_to_chars (buf
, value
, 2);
24365 case BFD_RELOC_ARM_TLS_CALL
:
24366 case BFD_RELOC_ARM_THM_TLS_CALL
:
24367 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24368 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24369 case BFD_RELOC_ARM_TLS_GOTDESC
:
24370 case BFD_RELOC_ARM_TLS_GD32
:
24371 case BFD_RELOC_ARM_TLS_LE32
:
24372 case BFD_RELOC_ARM_TLS_IE32
:
24373 case BFD_RELOC_ARM_TLS_LDM32
:
24374 case BFD_RELOC_ARM_TLS_LDO32
:
24375 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
24378 /* Same handling as above, but with the arm_fdpic guard. */
24379 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
24380 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
24381 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
24384 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
24388 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24389 _("Relocation supported only in FDPIC mode"));
24393 case BFD_RELOC_ARM_GOT32
:
24394 case BFD_RELOC_ARM_GOTOFF
:
24397 case BFD_RELOC_ARM_GOT_PREL
:
24398 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24399 md_number_to_chars (buf
, value
, 4);
24402 case BFD_RELOC_ARM_TARGET2
:
24403 /* TARGET2 is not partial-inplace, so we need to write the
24404 addend here for REL targets, because it won't be written out
24405 during reloc processing later. */
24406 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24407 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
24410 /* Relocations for FDPIC. */
24411 case BFD_RELOC_ARM_GOTFUNCDESC
:
24412 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
24413 case BFD_RELOC_ARM_FUNCDESC
:
24416 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24417 md_number_to_chars (buf
, 0, 4);
24421 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24422 _("Relocation supported only in FDPIC mode"));
24427 case BFD_RELOC_RVA
:
24429 case BFD_RELOC_ARM_TARGET1
:
24430 case BFD_RELOC_ARM_ROSEGREL32
:
24431 case BFD_RELOC_ARM_SBREL32
:
24432 case BFD_RELOC_32_PCREL
:
24434 case BFD_RELOC_32_SECREL
:
24436 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24438 /* For WinCE we only do this for pcrel fixups. */
24439 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
24441 md_number_to_chars (buf
, value
, 4);
24445 case BFD_RELOC_ARM_PREL31
:
24446 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24448 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
24449 if ((value
^ (value
>> 1)) & 0x40000000)
24451 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24452 _("rel31 relocation overflow"));
24454 newval
|= value
& 0x7fffffff;
24455 md_number_to_chars (buf
, newval
, 4);
24460 case BFD_RELOC_ARM_CP_OFF_IMM
:
24461 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
24462 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
24463 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24465 newval
= get_thumb32_insn (buf
);
24466 if ((newval
& 0x0f200f00) == 0x0d000900)
24468 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24469 has permitted values that are multiples of 2, in the range 0
24471 if (value
< -510 || value
> 510 || (value
& 1))
24472 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24473 _("co-processor offset out of range"));
24475 else if (value
< -1023 || value
> 1023 || (value
& 3))
24476 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24477 _("co-processor offset out of range"));
24482 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24483 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
24484 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24486 newval
= get_thumb32_insn (buf
);
24488 newval
&= 0xffffff00;
24491 newval
&= 0xff7fff00;
24492 if ((newval
& 0x0f200f00) == 0x0d000900)
24494 /* This is a fp16 vstr/vldr.
24496 It requires the immediate offset in the instruction is shifted
24497 left by 1 to be a half-word offset.
24499 Here, left shift by 1 first, and later right shift by 2
24500 should get the right offset. */
24503 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
24505 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24506 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
24507 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24509 put_thumb32_insn (buf
, newval
);
24512 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
24513 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
24514 if (value
< -255 || value
> 255)
24515 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24516 _("co-processor offset out of range"));
24518 goto cp_off_common
;
24520 case BFD_RELOC_ARM_THUMB_OFFSET
:
24521 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24522 /* Exactly what ranges, and where the offset is inserted depends
24523 on the type of instruction, we can establish this from the
24525 switch (newval
>> 12)
24527 case 4: /* PC load. */
24528 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24529 forced to zero for these loads; md_pcrel_from has already
24530 compensated for this. */
24532 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24533 _("invalid offset, target not word aligned (0x%08lX)"),
24534 (((unsigned long) fixP
->fx_frag
->fr_address
24535 + (unsigned long) fixP
->fx_where
) & ~3)
24536 + (unsigned long) value
);
24538 if (value
& ~0x3fc)
24539 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24540 _("invalid offset, value too big (0x%08lX)"),
24543 newval
|= value
>> 2;
24546 case 9: /* SP load/store. */
24547 if (value
& ~0x3fc)
24548 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24549 _("invalid offset, value too big (0x%08lX)"),
24551 newval
|= value
>> 2;
24554 case 6: /* Word load/store. */
24556 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24557 _("invalid offset, value too big (0x%08lX)"),
24559 newval
|= value
<< 4; /* 6 - 2. */
24562 case 7: /* Byte load/store. */
24564 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24565 _("invalid offset, value too big (0x%08lX)"),
24567 newval
|= value
<< 6;
24570 case 8: /* Halfword load/store. */
24572 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24573 _("invalid offset, value too big (0x%08lX)"),
24575 newval
|= value
<< 5; /* 6 - 1. */
24579 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24580 "Unable to process relocation for thumb opcode: %lx",
24581 (unsigned long) newval
);
24584 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24587 case BFD_RELOC_ARM_THUMB_ADD
:
24588 /* This is a complicated relocation, since we use it for all of
24589 the following immediate relocations:
24593 9bit ADD/SUB SP word-aligned
24594 10bit ADD PC/SP word-aligned
24596 The type of instruction being processed is encoded in the
24603 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24605 int rd
= (newval
>> 4) & 0xf;
24606 int rs
= newval
& 0xf;
24607 int subtract
= !!(newval
& 0x8000);
24609 /* Check for HI regs, only very restricted cases allowed:
24610 Adjusting SP, and using PC or SP to get an address. */
24611 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
24612 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
24613 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24614 _("invalid Hi register with immediate"));
24616 /* If value is negative, choose the opposite instruction. */
24620 subtract
= !subtract
;
24622 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24623 _("immediate value out of range"));
24628 if (value
& ~0x1fc)
24629 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24630 _("invalid immediate for stack address calculation"));
24631 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
24632 newval
|= value
>> 2;
24634 else if (rs
== REG_PC
|| rs
== REG_SP
)
24636 /* PR gas/18541. If the addition is for a defined symbol
24637 within range of an ADR instruction then accept it. */
24640 && fixP
->fx_addsy
!= NULL
)
24644 if (! S_IS_DEFINED (fixP
->fx_addsy
)
24645 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
24646 || S_IS_WEAK (fixP
->fx_addsy
))
24648 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24649 _("address calculation needs a strongly defined nearby symbol"));
24653 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24655 /* Round up to the next 4-byte boundary. */
24660 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
24664 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24665 _("symbol too far away"));
24675 if (subtract
|| value
& ~0x3fc)
24676 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24677 _("invalid immediate for address calculation (value = 0x%08lX)"),
24678 (unsigned long) (subtract
? - value
: value
));
24679 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
24681 newval
|= value
>> 2;
24686 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24687 _("immediate value out of range"));
24688 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
24689 newval
|= (rd
<< 8) | value
;
24694 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24695 _("immediate value out of range"));
24696 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
24697 newval
|= rd
| (rs
<< 3) | (value
<< 6);
24700 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24703 case BFD_RELOC_ARM_THUMB_IMM
:
24704 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24705 if (value
< 0 || value
> 255)
24706 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24707 _("invalid immediate: %ld is out of range"),
24710 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24713 case BFD_RELOC_ARM_THUMB_SHIFT
:
24714 /* 5bit shift value (0..32). LSL cannot take 32. */
24715 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
24716 temp
= newval
& 0xf800;
24717 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
24718 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24719 _("invalid shift value: %ld"), (long) value
);
24720 /* Shifts of zero must be encoded as LSL. */
24722 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
24723 /* Shifts of 32 are encoded as zero. */
24724 else if (value
== 32)
24726 newval
|= value
<< 6;
24727 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24730 case BFD_RELOC_VTABLE_INHERIT
:
24731 case BFD_RELOC_VTABLE_ENTRY
:
24735 case BFD_RELOC_ARM_MOVW
:
24736 case BFD_RELOC_ARM_MOVT
:
24737 case BFD_RELOC_ARM_THUMB_MOVW
:
24738 case BFD_RELOC_ARM_THUMB_MOVT
:
24739 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24741 /* REL format relocations are limited to a 16-bit addend. */
24742 if (!fixP
->fx_done
)
24744 if (value
< -0x8000 || value
> 0x7fff)
24745 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24746 _("offset out of range"));
24748 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24749 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24754 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24755 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24757 newval
= get_thumb32_insn (buf
);
24758 newval
&= 0xfbf08f00;
24759 newval
|= (value
& 0xf000) << 4;
24760 newval
|= (value
& 0x0800) << 15;
24761 newval
|= (value
& 0x0700) << 4;
24762 newval
|= (value
& 0x00ff);
24763 put_thumb32_insn (buf
, newval
);
24767 newval
= md_chars_to_number (buf
, 4);
24768 newval
&= 0xfff0f000;
24769 newval
|= value
& 0x0fff;
24770 newval
|= (value
& 0xf000) << 4;
24771 md_number_to_chars (buf
, newval
, 4);
24776 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24777 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24778 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24779 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24780 gas_assert (!fixP
->fx_done
);
24783 bfd_boolean is_mov
;
24784 bfd_vma encoded_addend
= value
;
24786 /* Check that addend can be encoded in instruction. */
24787 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
24788 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24789 _("the offset 0x%08lX is not representable"),
24790 (unsigned long) encoded_addend
);
24792 /* Extract the instruction. */
24793 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
24794 is_mov
= (insn
& 0xf800) == 0x2000;
24799 if (!seg
->use_rela_p
)
24800 insn
|= encoded_addend
;
24806 /* Extract the instruction. */
24807 /* Encoding is the following
24812 /* The following conditions must be true :
24817 rd
= (insn
>> 4) & 0xf;
24819 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
24820 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24821 _("Unable to process relocation for thumb opcode: %lx"),
24822 (unsigned long) insn
);
24824 /* Encode as ADD immediate8 thumb 1 code. */
24825 insn
= 0x3000 | (rd
<< 8);
24827 /* Place the encoded addend into the first 8 bits of the
24829 if (!seg
->use_rela_p
)
24830 insn
|= encoded_addend
;
24833 /* Update the instruction. */
24834 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
24838 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
24839 case BFD_RELOC_ARM_ALU_PC_G0
:
24840 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
24841 case BFD_RELOC_ARM_ALU_PC_G1
:
24842 case BFD_RELOC_ARM_ALU_PC_G2
:
24843 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
24844 case BFD_RELOC_ARM_ALU_SB_G0
:
24845 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
24846 case BFD_RELOC_ARM_ALU_SB_G1
:
24847 case BFD_RELOC_ARM_ALU_SB_G2
:
24848 gas_assert (!fixP
->fx_done
);
24849 if (!seg
->use_rela_p
)
24852 bfd_vma encoded_addend
;
24853 bfd_vma addend_abs
= llabs (value
);
24855 /* Check that the absolute value of the addend can be
24856 expressed as an 8-bit constant plus a rotation. */
24857 encoded_addend
= encode_arm_immediate (addend_abs
);
24858 if (encoded_addend
== (unsigned int) FAIL
)
24859 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24860 _("the offset 0x%08lX is not representable"),
24861 (unsigned long) addend_abs
);
24863 /* Extract the instruction. */
24864 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24866 /* If the addend is positive, use an ADD instruction.
24867 Otherwise use a SUB. Take care not to destroy the S bit. */
24868 insn
&= 0xff1fffff;
24874 /* Place the encoded addend into the first 12 bits of the
24876 insn
&= 0xfffff000;
24877 insn
|= encoded_addend
;
24879 /* Update the instruction. */
24880 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24884 case BFD_RELOC_ARM_LDR_PC_G0
:
24885 case BFD_RELOC_ARM_LDR_PC_G1
:
24886 case BFD_RELOC_ARM_LDR_PC_G2
:
24887 case BFD_RELOC_ARM_LDR_SB_G0
:
24888 case BFD_RELOC_ARM_LDR_SB_G1
:
24889 case BFD_RELOC_ARM_LDR_SB_G2
:
24890 gas_assert (!fixP
->fx_done
);
24891 if (!seg
->use_rela_p
)
24894 bfd_vma addend_abs
= llabs (value
);
24896 /* Check that the absolute value of the addend can be
24897 encoded in 12 bits. */
24898 if (addend_abs
>= 0x1000)
24899 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24900 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24901 (unsigned long) addend_abs
);
24903 /* Extract the instruction. */
24904 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24906 /* If the addend is negative, clear bit 23 of the instruction.
24907 Otherwise set it. */
24909 insn
&= ~(1 << 23);
24913 /* Place the absolute value of the addend into the first 12 bits
24914 of the instruction. */
24915 insn
&= 0xfffff000;
24916 insn
|= addend_abs
;
24918 /* Update the instruction. */
24919 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24923 case BFD_RELOC_ARM_LDRS_PC_G0
:
24924 case BFD_RELOC_ARM_LDRS_PC_G1
:
24925 case BFD_RELOC_ARM_LDRS_PC_G2
:
24926 case BFD_RELOC_ARM_LDRS_SB_G0
:
24927 case BFD_RELOC_ARM_LDRS_SB_G1
:
24928 case BFD_RELOC_ARM_LDRS_SB_G2
:
24929 gas_assert (!fixP
->fx_done
);
24930 if (!seg
->use_rela_p
)
24933 bfd_vma addend_abs
= llabs (value
);
24935 /* Check that the absolute value of the addend can be
24936 encoded in 8 bits. */
24937 if (addend_abs
>= 0x100)
24938 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24939 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24940 (unsigned long) addend_abs
);
24942 /* Extract the instruction. */
24943 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24945 /* If the addend is negative, clear bit 23 of the instruction.
24946 Otherwise set it. */
24948 insn
&= ~(1 << 23);
24952 /* Place the first four bits of the absolute value of the addend
24953 into the first 4 bits of the instruction, and the remaining
24954 four into bits 8 .. 11. */
24955 insn
&= 0xfffff0f0;
24956 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
24958 /* Update the instruction. */
24959 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24963 case BFD_RELOC_ARM_LDC_PC_G0
:
24964 case BFD_RELOC_ARM_LDC_PC_G1
:
24965 case BFD_RELOC_ARM_LDC_PC_G2
:
24966 case BFD_RELOC_ARM_LDC_SB_G0
:
24967 case BFD_RELOC_ARM_LDC_SB_G1
:
24968 case BFD_RELOC_ARM_LDC_SB_G2
:
24969 gas_assert (!fixP
->fx_done
);
24970 if (!seg
->use_rela_p
)
24973 bfd_vma addend_abs
= llabs (value
);
24975 /* Check that the absolute value of the addend is a multiple of
24976 four and, when divided by four, fits in 8 bits. */
24977 if (addend_abs
& 0x3)
24978 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24979 _("bad offset 0x%08lX (must be word-aligned)"),
24980 (unsigned long) addend_abs
);
24982 if ((addend_abs
>> 2) > 0xff)
24983 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24984 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24985 (unsigned long) addend_abs
);
24987 /* Extract the instruction. */
24988 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24990 /* If the addend is negative, clear bit 23 of the instruction.
24991 Otherwise set it. */
24993 insn
&= ~(1 << 23);
24997 /* Place the addend (divided by four) into the first eight
24998 bits of the instruction. */
24999 insn
&= 0xfffffff0;
25000 insn
|= addend_abs
>> 2;
25002 /* Update the instruction. */
25003 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25007 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
25009 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25010 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25011 && ARM_IS_FUNC (fixP
->fx_addsy
)
25012 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25014 /* Force a relocation for a branch 5 bits wide. */
25017 if (v8_1_branch_value_check (value
, 5, FALSE
) == FAIL
)
25018 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25021 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25023 addressT boff
= value
>> 1;
25025 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25026 newval
|= (boff
<< 7);
25027 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25031 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
25033 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25034 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25035 && ARM_IS_FUNC (fixP
->fx_addsy
)
25036 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25040 if ((value
& ~0x7f) && ((value
& ~0x3f) != ~0x3f))
25041 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25042 _("branch out of range"));
25044 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25046 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25048 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
25049 addressT diff
= value
- boff
;
25053 newval
|= 1 << 1; /* T bit. */
25055 else if (diff
!= 2)
25057 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25058 _("out of range label-relative fixup value"));
25060 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25064 case BFD_RELOC_ARM_THUMB_BF17
:
25066 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25067 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25068 && ARM_IS_FUNC (fixP
->fx_addsy
)
25069 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25071 /* Force a relocation for a branch 17 bits wide. */
25075 if (v8_1_branch_value_check (value
, 17, TRUE
) == FAIL
)
25076 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25079 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25082 addressT immA
, immB
, immC
;
25084 immA
= (value
& 0x0001f000) >> 12;
25085 immB
= (value
& 0x00000ffc) >> 2;
25086 immC
= (value
& 0x00000002) >> 1;
25088 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25089 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25091 newval2
|= (immC
<< 11) | (immB
<< 1);
25092 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25093 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25097 case BFD_RELOC_ARM_THUMB_BF19
:
25099 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25100 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25101 && ARM_IS_FUNC (fixP
->fx_addsy
)
25102 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25104 /* Force a relocation for a branch 19 bits wide. */
25108 if (v8_1_branch_value_check (value
, 19, TRUE
) == FAIL
)
25109 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25112 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25115 addressT immA
, immB
, immC
;
25117 immA
= (value
& 0x0007f000) >> 12;
25118 immB
= (value
& 0x00000ffc) >> 2;
25119 immC
= (value
& 0x00000002) >> 1;
25121 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25122 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25124 newval2
|= (immC
<< 11) | (immB
<< 1);
25125 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25126 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25130 case BFD_RELOC_ARM_THUMB_BF13
:
25132 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25133 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25134 && ARM_IS_FUNC (fixP
->fx_addsy
)
25135 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25137 /* Force a relocation for a branch 13 bits wide. */
25141 if (v8_1_branch_value_check (value
, 13, TRUE
) == FAIL
)
25142 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25145 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25148 addressT immA
, immB
, immC
;
25150 immA
= (value
& 0x00001000) >> 12;
25151 immB
= (value
& 0x00000ffc) >> 2;
25152 immC
= (value
& 0x00000002) >> 1;
25154 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25155 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25157 newval2
|= (immC
<< 11) | (immB
<< 1);
25158 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25159 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25163 case BFD_RELOC_ARM_THUMB_LOOP12
:
25165 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25166 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25167 && ARM_IS_FUNC (fixP
->fx_addsy
)
25168 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25170 /* Force a relocation for a branch 12 bits wide. */
25174 bfd_vma insn
= get_thumb32_insn (buf
);
25175 /* le lr, <label> or le <label> */
25176 if (((insn
& 0xffffffff) == 0xf00fc001)
25177 || ((insn
& 0xffffffff) == 0xf02fc001))
25180 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
25181 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25183 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25185 addressT imml
, immh
;
25187 immh
= (value
& 0x00000ffc) >> 2;
25188 imml
= (value
& 0x00000002) >> 1;
25190 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25191 newval
|= (imml
<< 11) | (immh
<< 1);
25192 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
25196 case BFD_RELOC_ARM_V4BX
:
25197 /* This will need to go in the object file. */
25201 case BFD_RELOC_UNUSED
:
25203 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25204 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
25208 /* Translate internal representation of relocation info to BFD target
25212 tc_gen_reloc (asection
*section
, fixS
*fixp
)
25215 bfd_reloc_code_real_type code
;
25217 reloc
= XNEW (arelent
);
25219 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
25220 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
25221 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
25223 if (fixp
->fx_pcrel
)
25225 if (section
->use_rela_p
)
25226 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
25228 fixp
->fx_offset
= reloc
->address
;
25230 reloc
->addend
= fixp
->fx_offset
;
25232 switch (fixp
->fx_r_type
)
25235 if (fixp
->fx_pcrel
)
25237 code
= BFD_RELOC_8_PCREL
;
25240 /* Fall through. */
25243 if (fixp
->fx_pcrel
)
25245 code
= BFD_RELOC_16_PCREL
;
25248 /* Fall through. */
25251 if (fixp
->fx_pcrel
)
25253 code
= BFD_RELOC_32_PCREL
;
25256 /* Fall through. */
25258 case BFD_RELOC_ARM_MOVW
:
25259 if (fixp
->fx_pcrel
)
25261 code
= BFD_RELOC_ARM_MOVW_PCREL
;
25264 /* Fall through. */
25266 case BFD_RELOC_ARM_MOVT
:
25267 if (fixp
->fx_pcrel
)
25269 code
= BFD_RELOC_ARM_MOVT_PCREL
;
25272 /* Fall through. */
25274 case BFD_RELOC_ARM_THUMB_MOVW
:
25275 if (fixp
->fx_pcrel
)
25277 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
25280 /* Fall through. */
25282 case BFD_RELOC_ARM_THUMB_MOVT
:
25283 if (fixp
->fx_pcrel
)
25285 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
25288 /* Fall through. */
25290 case BFD_RELOC_NONE
:
25291 case BFD_RELOC_ARM_PCREL_BRANCH
:
25292 case BFD_RELOC_ARM_PCREL_BLX
:
25293 case BFD_RELOC_RVA
:
25294 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
25295 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
25296 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
25297 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25298 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25299 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25300 case BFD_RELOC_VTABLE_ENTRY
:
25301 case BFD_RELOC_VTABLE_INHERIT
:
25303 case BFD_RELOC_32_SECREL
:
25305 code
= fixp
->fx_r_type
;
25308 case BFD_RELOC_THUMB_PCREL_BLX
:
25310 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
25311 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25314 code
= BFD_RELOC_THUMB_PCREL_BLX
;
25317 case BFD_RELOC_ARM_LITERAL
:
25318 case BFD_RELOC_ARM_HWLITERAL
:
25319 /* If this is called then the a literal has
25320 been referenced across a section boundary. */
25321 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25322 _("literal referenced across section boundary"));
25326 case BFD_RELOC_ARM_TLS_CALL
:
25327 case BFD_RELOC_ARM_THM_TLS_CALL
:
25328 case BFD_RELOC_ARM_TLS_DESCSEQ
:
25329 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
25330 case BFD_RELOC_ARM_GOT32
:
25331 case BFD_RELOC_ARM_GOTOFF
:
25332 case BFD_RELOC_ARM_GOT_PREL
:
25333 case BFD_RELOC_ARM_PLT32
:
25334 case BFD_RELOC_ARM_TARGET1
:
25335 case BFD_RELOC_ARM_ROSEGREL32
:
25336 case BFD_RELOC_ARM_SBREL32
:
25337 case BFD_RELOC_ARM_PREL31
:
25338 case BFD_RELOC_ARM_TARGET2
:
25339 case BFD_RELOC_ARM_TLS_LDO32
:
25340 case BFD_RELOC_ARM_PCREL_CALL
:
25341 case BFD_RELOC_ARM_PCREL_JUMP
:
25342 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25343 case BFD_RELOC_ARM_ALU_PC_G0
:
25344 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25345 case BFD_RELOC_ARM_ALU_PC_G1
:
25346 case BFD_RELOC_ARM_ALU_PC_G2
:
25347 case BFD_RELOC_ARM_LDR_PC_G0
:
25348 case BFD_RELOC_ARM_LDR_PC_G1
:
25349 case BFD_RELOC_ARM_LDR_PC_G2
:
25350 case BFD_RELOC_ARM_LDRS_PC_G0
:
25351 case BFD_RELOC_ARM_LDRS_PC_G1
:
25352 case BFD_RELOC_ARM_LDRS_PC_G2
:
25353 case BFD_RELOC_ARM_LDC_PC_G0
:
25354 case BFD_RELOC_ARM_LDC_PC_G1
:
25355 case BFD_RELOC_ARM_LDC_PC_G2
:
25356 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25357 case BFD_RELOC_ARM_ALU_SB_G0
:
25358 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25359 case BFD_RELOC_ARM_ALU_SB_G1
:
25360 case BFD_RELOC_ARM_ALU_SB_G2
:
25361 case BFD_RELOC_ARM_LDR_SB_G0
:
25362 case BFD_RELOC_ARM_LDR_SB_G1
:
25363 case BFD_RELOC_ARM_LDR_SB_G2
:
25364 case BFD_RELOC_ARM_LDRS_SB_G0
:
25365 case BFD_RELOC_ARM_LDRS_SB_G1
:
25366 case BFD_RELOC_ARM_LDRS_SB_G2
:
25367 case BFD_RELOC_ARM_LDC_SB_G0
:
25368 case BFD_RELOC_ARM_LDC_SB_G1
:
25369 case BFD_RELOC_ARM_LDC_SB_G2
:
25370 case BFD_RELOC_ARM_V4BX
:
25371 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
25372 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
25373 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
25374 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
25375 case BFD_RELOC_ARM_GOTFUNCDESC
:
25376 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
25377 case BFD_RELOC_ARM_FUNCDESC
:
25378 case BFD_RELOC_ARM_THUMB_BF17
:
25379 case BFD_RELOC_ARM_THUMB_BF19
:
25380 case BFD_RELOC_ARM_THUMB_BF13
:
25381 code
= fixp
->fx_r_type
;
25384 case BFD_RELOC_ARM_TLS_GOTDESC
:
25385 case BFD_RELOC_ARM_TLS_GD32
:
25386 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
25387 case BFD_RELOC_ARM_TLS_LE32
:
25388 case BFD_RELOC_ARM_TLS_IE32
:
25389 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
25390 case BFD_RELOC_ARM_TLS_LDM32
:
25391 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
25392 /* BFD will include the symbol's address in the addend.
25393 But we don't want that, so subtract it out again here. */
25394 if (!S_IS_COMMON (fixp
->fx_addsy
))
25395 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
25396 code
= fixp
->fx_r_type
;
25400 case BFD_RELOC_ARM_IMMEDIATE
:
25401 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25402 _("internal relocation (type: IMMEDIATE) not fixed up"));
25405 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
25406 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25407 _("ADRL used for a symbol not defined in the same file"));
25410 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
25411 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
25412 case BFD_RELOC_ARM_THUMB_LOOP12
:
25413 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25414 _("%s used for a symbol not defined in the same file"),
25415 bfd_get_reloc_code_name (fixp
->fx_r_type
));
25418 case BFD_RELOC_ARM_OFFSET_IMM
:
25419 if (section
->use_rela_p
)
25421 code
= fixp
->fx_r_type
;
25425 if (fixp
->fx_addsy
!= NULL
25426 && !S_IS_DEFINED (fixp
->fx_addsy
)
25427 && S_IS_LOCAL (fixp
->fx_addsy
))
25429 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25430 _("undefined local label `%s'"),
25431 S_GET_NAME (fixp
->fx_addsy
));
25435 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25436 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
25443 switch (fixp
->fx_r_type
)
25445 case BFD_RELOC_NONE
: type
= "NONE"; break;
25446 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
25447 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
25448 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
25449 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
25450 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
25451 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
25452 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
25453 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
25454 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
25455 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
25456 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
25457 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
25458 default: type
= _("<unknown>"); break;
25460 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25461 _("cannot represent %s relocation in this object file format"),
25468 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
25470 && fixp
->fx_addsy
== GOT_symbol
)
25472 code
= BFD_RELOC_ARM_GOTPC
;
25473 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
25477 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
25479 if (reloc
->howto
== NULL
)
25481 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25482 _("cannot represent %s relocation in this object file format"),
25483 bfd_get_reloc_code_name (code
));
25487 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
25488 vtable entry to be used in the relocation's section offset. */
25489 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
25490 reloc
->address
= fixp
->fx_offset
;
25495 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
25498 cons_fix_new_arm (fragS
* frag
,
25502 bfd_reloc_code_real_type reloc
)
25507 FIXME: @@ Should look at CPU word size. */
25511 reloc
= BFD_RELOC_8
;
25514 reloc
= BFD_RELOC_16
;
25518 reloc
= BFD_RELOC_32
;
25521 reloc
= BFD_RELOC_64
;
25526 if (exp
->X_op
== O_secrel
)
25528 exp
->X_op
= O_symbol
;
25529 reloc
= BFD_RELOC_32_SECREL
;
25533 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
25536 #if defined (OBJ_COFF)
25538 arm_validate_fix (fixS
* fixP
)
25540 /* If the destination of the branch is a defined symbol which does not have
25541 the THUMB_FUNC attribute, then we must be calling a function which has
25542 the (interfacearm) attribute. We look for the Thumb entry point to that
25543 function and change the branch to refer to that function instead. */
25544 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
25545 && fixP
->fx_addsy
!= NULL
25546 && S_IS_DEFINED (fixP
->fx_addsy
)
25547 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
25549 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
25556 arm_force_relocation (struct fix
* fixp
)
25558 #if defined (OBJ_COFF) && defined (TE_PE)
25559 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
25563 /* In case we have a call or a branch to a function in ARM ISA mode from
25564 a thumb function or vice-versa force the relocation. These relocations
25565 are cleared off for some cores that might have blx and simple transformations
25569 switch (fixp
->fx_r_type
)
25571 case BFD_RELOC_ARM_PCREL_JUMP
:
25572 case BFD_RELOC_ARM_PCREL_CALL
:
25573 case BFD_RELOC_THUMB_PCREL_BLX
:
25574 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
25578 case BFD_RELOC_ARM_PCREL_BLX
:
25579 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25580 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25581 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25582 if (ARM_IS_FUNC (fixp
->fx_addsy
))
25591 /* Resolve these relocations even if the symbol is extern or weak.
25592 Technically this is probably wrong due to symbol preemption.
25593 In practice these relocations do not have enough range to be useful
25594 at dynamic link time, and some code (e.g. in the Linux kernel)
25595 expects these references to be resolved. */
25596 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
25597 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
25598 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
25599 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
25600 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25601 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
25602 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
25603 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
25604 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
25605 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
25606 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
25607 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
25608 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
25609 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
25612 /* Always leave these relocations for the linker. */
25613 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
25614 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
25615 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
25618 /* Always generate relocations against function symbols. */
25619 if (fixp
->fx_r_type
== BFD_RELOC_32
25621 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
25624 return generic_force_reloc (fixp
);
25627 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25628 /* Relocations against function names must be left unadjusted,
25629 so that the linker can use this information to generate interworking
25630 stubs. The MIPS version of this function
25631 also prevents relocations that are mips-16 specific, but I do not
25632 know why it does this.
25635 There is one other problem that ought to be addressed here, but
25636 which currently is not: Taking the address of a label (rather
25637 than a function) and then later jumping to that address. Such
25638 addresses also ought to have their bottom bit set (assuming that
25639 they reside in Thumb code), but at the moment they will not. */
25642 arm_fix_adjustable (fixS
* fixP
)
25644 if (fixP
->fx_addsy
== NULL
)
25647 /* Preserve relocations against symbols with function type. */
25648 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
25651 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
25652 && fixP
->fx_subsy
== NULL
)
25655 /* We need the symbol name for the VTABLE entries. */
25656 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
25657 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
25660 /* Don't allow symbols to be discarded on GOT related relocs. */
25661 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
25662 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
25663 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
25664 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
25665 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
25666 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
25667 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
25668 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
25669 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
25670 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
25671 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
25672 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
25673 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
25674 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
25675 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
25676 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
25677 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
25680 /* Similarly for group relocations. */
25681 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
25682 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
25683 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
25686 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25687 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
25688 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
25689 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
25690 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
25691 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
25692 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
25693 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
25694 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
25697 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25698 offsets, so keep these symbols. */
25699 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25700 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
25705 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25709 elf32_arm_target_format (void)
25712 return (target_big_endian
25713 ? "elf32-bigarm-symbian"
25714 : "elf32-littlearm-symbian");
25715 #elif defined (TE_VXWORKS)
25716 return (target_big_endian
25717 ? "elf32-bigarm-vxworks"
25718 : "elf32-littlearm-vxworks");
25719 #elif defined (TE_NACL)
25720 return (target_big_endian
25721 ? "elf32-bigarm-nacl"
25722 : "elf32-littlearm-nacl");
25726 if (target_big_endian
)
25727 return "elf32-bigarm-fdpic";
25729 return "elf32-littlearm-fdpic";
25733 if (target_big_endian
)
25734 return "elf32-bigarm";
25736 return "elf32-littlearm";
25742 armelf_frob_symbol (symbolS
* symp
,
25745 elf_frob_symbol (symp
, puntp
);
25749 /* MD interface: Finalization. */
25754 literal_pool
* pool
;
25756 /* Ensure that all the IT blocks are properly closed. */
25757 check_it_blocks_finished ();
25759 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
25761 /* Put it at the end of the relevant section. */
25762 subseg_set (pool
->section
, pool
->sub_section
);
25764 arm_elf_change_section ();
25771 /* Remove any excess mapping symbols generated for alignment frags in
25772 SEC. We may have created a mapping symbol before a zero byte
25773 alignment; remove it if there's a mapping symbol after the
25776 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
25777 void *dummy ATTRIBUTE_UNUSED
)
25779 segment_info_type
*seginfo
= seg_info (sec
);
25782 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
25785 for (fragp
= seginfo
->frchainP
->frch_root
;
25787 fragp
= fragp
->fr_next
)
25789 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
25790 fragS
*next
= fragp
->fr_next
;
25792 /* Variable-sized frags have been converted to fixed size by
25793 this point. But if this was variable-sized to start with,
25794 there will be a fixed-size frag after it. So don't handle
25796 if (sym
== NULL
|| next
== NULL
)
25799 if (S_GET_VALUE (sym
) < next
->fr_address
)
25800 /* Not at the end of this frag. */
25802 know (S_GET_VALUE (sym
) == next
->fr_address
);
25806 if (next
->tc_frag_data
.first_map
!= NULL
)
25808 /* Next frag starts with a mapping symbol. Discard this
25810 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
25814 if (next
->fr_next
== NULL
)
25816 /* This mapping symbol is at the end of the section. Discard
25818 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
25819 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
25823 /* As long as we have empty frags without any mapping symbols,
25825 /* If the next frag is non-empty and does not start with a
25826 mapping symbol, then this mapping symbol is required. */
25827 if (next
->fr_address
!= next
->fr_next
->fr_address
)
25830 next
= next
->fr_next
;
25832 while (next
!= NULL
);
25837 /* Adjust the symbol table. This marks Thumb symbols as distinct from
25841 arm_adjust_symtab (void)
25846 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
25848 if (ARM_IS_THUMB (sym
))
25850 if (THUMB_IS_FUNC (sym
))
25852 /* Mark the symbol as a Thumb function. */
25853 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
25854 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
25855 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
25857 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
25858 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
25860 as_bad (_("%s: unexpected function type: %d"),
25861 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
25863 else switch (S_GET_STORAGE_CLASS (sym
))
25866 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
25869 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
25872 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
25880 if (ARM_IS_INTERWORK (sym
))
25881 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
25888 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
25890 if (ARM_IS_THUMB (sym
))
25892 elf_symbol_type
* elf_sym
;
25894 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
25895 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
25897 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
25898 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
25900 /* If it's a .thumb_func, declare it as so,
25901 otherwise tag label as .code 16. */
25902 if (THUMB_IS_FUNC (sym
))
25903 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
25904 ST_BRANCH_TO_THUMB
);
25905 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
25906 elf_sym
->internal_elf_sym
.st_info
=
25907 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
25912 /* Remove any overlapping mapping symbols generated by alignment frags. */
25913 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
25914 /* Now do generic ELF adjustments. */
25915 elf_adjust_symtab ();
25919 /* MD interface: Initialization. */
25922 set_constant_flonums (void)
25926 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
25927 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
25931 /* Auto-select Thumb mode if it's the only available instruction set for the
25932 given architecture. */
25935 autoselect_thumb_from_cpu_variant (void)
25937 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
25938 opcode_select (16);
25947 if ( (arm_ops_hsh
= hash_new ()) == NULL
25948 || (arm_cond_hsh
= hash_new ()) == NULL
25949 || (arm_shift_hsh
= hash_new ()) == NULL
25950 || (arm_psr_hsh
= hash_new ()) == NULL
25951 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
25952 || (arm_reg_hsh
= hash_new ()) == NULL
25953 || (arm_reloc_hsh
= hash_new ()) == NULL
25954 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
25955 as_fatal (_("virtual memory exhausted"));
25957 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
25958 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
25959 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
25960 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
25961 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
25962 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
25963 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
25964 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
25965 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
25966 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
25967 (void *) (v7m_psrs
+ i
));
25968 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
25969 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
25971 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
25973 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
25974 (void *) (barrier_opt_names
+ i
));
25976 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
25978 struct reloc_entry
* entry
= reloc_names
+ i
;
25980 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
25981 /* This makes encode_branch() use the EABI versions of this relocation. */
25982 entry
->reloc
= BFD_RELOC_UNUSED
;
25984 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
25988 set_constant_flonums ();
25990 /* Set the cpu variant based on the command-line options. We prefer
25991 -mcpu= over -march= if both are set (as for GCC); and we prefer
25992 -mfpu= over any other way of setting the floating point unit.
25993 Use of legacy options with new options are faulted. */
25996 if (mcpu_cpu_opt
|| march_cpu_opt
)
25997 as_bad (_("use of old and new-style options to set CPU type"));
25999 selected_arch
= *legacy_cpu
;
26001 else if (mcpu_cpu_opt
)
26003 selected_arch
= *mcpu_cpu_opt
;
26004 selected_ext
= *mcpu_ext_opt
;
26006 else if (march_cpu_opt
)
26008 selected_arch
= *march_cpu_opt
;
26009 selected_ext
= *march_ext_opt
;
26011 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
26016 as_bad (_("use of old and new-style options to set FPU type"));
26018 selected_fpu
= *legacy_fpu
;
26021 selected_fpu
= *mfpu_opt
;
26024 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
26025 || defined (TE_NetBSD) || defined (TE_VXWORKS))
26026 /* Some environments specify a default FPU. If they don't, infer it
26027 from the processor. */
26029 selected_fpu
= *mcpu_fpu_opt
;
26030 else if (march_fpu_opt
)
26031 selected_fpu
= *march_fpu_opt
;
26033 selected_fpu
= fpu_default
;
26037 if (ARM_FEATURE_ZERO (selected_fpu
))
26039 if (!no_cpu_selected ())
26040 selected_fpu
= fpu_default
;
26042 selected_fpu
= fpu_arch_fpa
;
26046 if (ARM_FEATURE_ZERO (selected_arch
))
26048 selected_arch
= cpu_default
;
26049 selected_cpu
= selected_arch
;
26051 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26053 /* Autodection of feature mode: allow all features in cpu_variant but leave
26054 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
26055 after all instruction have been processed and we can decide what CPU
26056 should be selected. */
26057 if (ARM_FEATURE_ZERO (selected_arch
))
26058 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
26060 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26063 autoselect_thumb_from_cpu_variant ();
26065 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
26067 #if defined OBJ_COFF || defined OBJ_ELF
26069 unsigned int flags
= 0;
26071 #if defined OBJ_ELF
26072 flags
= meabi_flags
;
26074 switch (meabi_flags
)
26076 case EF_ARM_EABI_UNKNOWN
:
26078 /* Set the flags in the private structure. */
26079 if (uses_apcs_26
) flags
|= F_APCS26
;
26080 if (support_interwork
) flags
|= F_INTERWORK
;
26081 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
26082 if (pic_code
) flags
|= F_PIC
;
26083 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
26084 flags
|= F_SOFT_FLOAT
;
26086 switch (mfloat_abi_opt
)
26088 case ARM_FLOAT_ABI_SOFT
:
26089 case ARM_FLOAT_ABI_SOFTFP
:
26090 flags
|= F_SOFT_FLOAT
;
26093 case ARM_FLOAT_ABI_HARD
:
26094 if (flags
& F_SOFT_FLOAT
)
26095 as_bad (_("hard-float conflicts with specified fpu"));
26099 /* Using pure-endian doubles (even if soft-float). */
26100 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
26101 flags
|= F_VFP_FLOAT
;
26103 #if defined OBJ_ELF
26104 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
26105 flags
|= EF_ARM_MAVERICK_FLOAT
;
26108 case EF_ARM_EABI_VER4
:
26109 case EF_ARM_EABI_VER5
:
26110 /* No additional flags to set. */
26117 bfd_set_private_flags (stdoutput
, flags
);
26119 /* We have run out flags in the COFF header to encode the
26120 status of ATPCS support, so instead we create a dummy,
26121 empty, debug section called .arm.atpcs. */
26126 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
26130 bfd_set_section_flags
26131 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
26132 bfd_set_section_size (stdoutput
, sec
, 0);
26133 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
26139 /* Record the CPU type as well. */
26140 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
26141 mach
= bfd_mach_arm_iWMMXt2
;
26142 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
26143 mach
= bfd_mach_arm_iWMMXt
;
26144 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
26145 mach
= bfd_mach_arm_XScale
;
26146 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
26147 mach
= bfd_mach_arm_ep9312
;
26148 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
26149 mach
= bfd_mach_arm_5TE
;
26150 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
26152 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
26153 mach
= bfd_mach_arm_5T
;
26155 mach
= bfd_mach_arm_5
;
26157 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
26159 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
26160 mach
= bfd_mach_arm_4T
;
26162 mach
= bfd_mach_arm_4
;
26164 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
26165 mach
= bfd_mach_arm_3M
;
26166 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
26167 mach
= bfd_mach_arm_3
;
26168 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
26169 mach
= bfd_mach_arm_2a
;
26170 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
26171 mach
= bfd_mach_arm_2
;
26173 mach
= bfd_mach_arm_unknown
;
26175 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
26178 /* Command line processing. */
26181 Invocation line includes a switch not recognized by the base assembler.
26182 See if it's a processor-specific option.
26184 This routine is somewhat complicated by the need for backwards
26185 compatibility (since older releases of gcc can't be changed).
26186 The new options try to make the interface as compatible as
26189 New options (supported) are:
26191 -mcpu=<cpu name> Assemble for selected processor
26192 -march=<architecture name> Assemble for selected architecture
26193 -mfpu=<fpu architecture> Assemble for selected FPU.
26194 -EB/-mbig-endian Big-endian
26195 -EL/-mlittle-endian Little-endian
26196 -k Generate PIC code
26197 -mthumb Start in Thumb mode
26198 -mthumb-interwork Code supports ARM/Thumb interworking
26200 -m[no-]warn-deprecated Warn about deprecated features
26201 -m[no-]warn-syms Warn when symbols match instructions
26203 For now we will also provide support for:
26205 -mapcs-32 32-bit Program counter
26206 -mapcs-26 26-bit Program counter
26207 -macps-float Floats passed in FP registers
26208 -mapcs-reentrant Reentrant code
26210 (sometime these will probably be replaced with -mapcs=<list of options>
26211 and -matpcs=<list of options>)
26213 The remaining options are only supported for back-wards compatibility.
26214 Cpu variants, the arm part is optional:
26215 -m[arm]1 Currently not supported.
26216 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
26217 -m[arm]3 Arm 3 processor
26218 -m[arm]6[xx], Arm 6 processors
26219 -m[arm]7[xx][t][[d]m] Arm 7 processors
26220 -m[arm]8[10] Arm 8 processors
26221 -m[arm]9[20][tdmi] Arm 9 processors
26222 -mstrongarm[110[0]] StrongARM processors
26223 -mxscale XScale processors
26224 -m[arm]v[2345[t[e]]] Arm architectures
26225 -mall All (except the ARM1)
26227 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
26228 -mfpe-old (No float load/store multiples)
26229 -mvfpxd VFP Single precision
26231 -mno-fpu Disable all floating point instructions
26233 The following CPU names are recognized:
26234 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
26235 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
26236 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
26237 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
26238 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
26239 arm10t arm10e, arm1020t, arm1020e, arm10200e,
26240 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
26244 const char * md_shortopts
= "m:k";
26246 #ifdef ARM_BI_ENDIAN
26247 #define OPTION_EB (OPTION_MD_BASE + 0)
26248 #define OPTION_EL (OPTION_MD_BASE + 1)
26250 #if TARGET_BYTES_BIG_ENDIAN
26251 #define OPTION_EB (OPTION_MD_BASE + 0)
26253 #define OPTION_EL (OPTION_MD_BASE + 1)
26256 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
26257 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
26259 struct option md_longopts
[] =
26262 {"EB", no_argument
, NULL
, OPTION_EB
},
26265 {"EL", no_argument
, NULL
, OPTION_EL
},
26267 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
26269 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
26271 {NULL
, no_argument
, NULL
, 0}
26274 size_t md_longopts_size
= sizeof (md_longopts
);
26276 struct arm_option_table
26278 const char * option
; /* Option name to match. */
26279 const char * help
; /* Help information. */
26280 int * var
; /* Variable to change. */
26281 int value
; /* What to change it to. */
26282 const char * deprecated
; /* If non-null, print this message. */
26285 struct arm_option_table arm_opts
[] =
26287 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
26288 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
26289 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
26290 &support_interwork
, 1, NULL
},
26291 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
26292 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
26293 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
26295 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
26296 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
26297 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
26298 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
26301 /* These are recognized by the assembler, but have no affect on code. */
26302 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
26303 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
26305 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
26306 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
26307 &warn_on_deprecated
, 0, NULL
},
26308 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
26309 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
26310 {NULL
, NULL
, NULL
, 0, NULL
}
26313 struct arm_legacy_option_table
26315 const char * option
; /* Option name to match. */
26316 const arm_feature_set
** var
; /* Variable to change. */
26317 const arm_feature_set value
; /* What to change it to. */
26318 const char * deprecated
; /* If non-null, print this message. */
26321 const struct arm_legacy_option_table arm_legacy_opts
[] =
26323 /* DON'T add any new processors to this list -- we want the whole list
26324 to go away... Add them to the processors table instead. */
26325 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
26326 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
26327 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
26328 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
26329 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
26330 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
26331 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
26332 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
26333 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
26334 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
26335 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
26336 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
26337 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
26338 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
26339 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
26340 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
26341 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
26342 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
26343 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
26344 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
26345 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
26346 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
26347 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
26348 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
26349 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
26350 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
26351 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
26352 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
26353 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
26354 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
26355 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
26356 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
26357 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
26358 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
26359 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
26360 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
26361 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
26362 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
26363 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
26364 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
26365 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
26366 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
26367 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
26368 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
26369 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
26370 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
26371 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26372 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26373 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26374 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26375 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
26376 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
26377 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
26378 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
26379 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
26380 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
26381 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
26382 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
26383 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
26384 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
26385 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
26386 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
26387 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
26388 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
26389 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
26390 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
26391 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
26392 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
26393 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
26394 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
26395 N_("use -mcpu=strongarm110")},
26396 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
26397 N_("use -mcpu=strongarm1100")},
26398 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
26399 N_("use -mcpu=strongarm1110")},
26400 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
26401 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
26402 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
26404 /* Architecture variants -- don't add any more to this list either. */
26405 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
26406 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
26407 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
26408 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
26409 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
26410 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
26411 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
26412 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
26413 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
26414 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
26415 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
26416 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
26417 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
26418 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
26419 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
26420 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
26421 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
26422 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
26424 /* Floating point variants -- don't add any more to this list either. */
26425 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
26426 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
26427 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
26428 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
26429 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
26431 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
26434 struct arm_cpu_option_table
26438 const arm_feature_set value
;
26439 const arm_feature_set ext
;
26440 /* For some CPUs we assume an FPU unless the user explicitly sets
26442 const arm_feature_set default_fpu
;
26443 /* The canonical name of the CPU, or NULL to use NAME converted to upper
26445 const char * canonical_name
;
26448 /* This list should, at a minimum, contain all the cpu names
26449 recognized by GCC. */
26450 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
26452 static const struct arm_cpu_option_table arm_cpus
[] =
26454 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
26457 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
26460 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
26463 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
26466 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
26469 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
26472 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
26475 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
26478 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
26481 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
26484 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
26487 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
26490 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
26493 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
26496 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
26499 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
26502 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
26505 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
26508 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
26511 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
26514 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
26517 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
26520 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
26523 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
26526 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
26529 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
26532 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
26535 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
26538 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
26541 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
26544 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
26547 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
26550 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
26553 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
26556 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
26559 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
26562 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
26565 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
26568 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
26571 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
26574 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
26577 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
26580 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
26583 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
26586 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
26589 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
26593 /* For V5 or later processors we default to using VFP; but the user
26594 should really set the FPU type explicitly. */
26595 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
26598 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
26601 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
26604 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
26607 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
26610 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
26613 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
26616 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
26619 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
26622 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
26625 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
26628 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
26631 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
26634 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
26637 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
26640 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
26643 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
26646 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
26649 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
26652 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
26655 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
26658 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
26661 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
26664 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
26667 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
26670 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
26673 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
26676 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
26679 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
26682 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
26685 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
26688 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
26691 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
26694 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
26697 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
26700 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
26703 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
26704 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26706 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
26708 FPU_ARCH_NEON_VFP_V4
),
26709 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
26710 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
26711 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
26712 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
26713 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26714 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
26715 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
26717 FPU_ARCH_NEON_VFP_V4
),
26718 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
26720 FPU_ARCH_NEON_VFP_V4
),
26721 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
26723 FPU_ARCH_NEON_VFP_V4
),
26724 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
26725 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26726 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26727 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
26728 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26729 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26730 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
26731 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26732 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26733 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
26734 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26735 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26736 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
26737 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26738 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26739 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
26740 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26741 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26742 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
26743 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26744 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26745 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
26746 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26747 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26748 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
26749 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26750 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26751 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
26752 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26753 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26754 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
26757 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
26759 FPU_ARCH_VFP_V3D16
),
26760 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
26761 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26763 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
26764 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26765 FPU_ARCH_VFP_V3D16
),
26766 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
26767 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26768 FPU_ARCH_VFP_V3D16
),
26769 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
26770 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26771 FPU_ARCH_NEON_VFP_ARMV8
),
26772 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
26773 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
26775 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
26778 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
26781 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
26784 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
26787 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
26790 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
26793 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
26796 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
26797 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26798 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26799 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
26800 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26801 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26802 /* ??? XSCALE is really an architecture. */
26803 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
26807 /* ??? iwmmxt is not a processor. */
26808 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
26811 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
26814 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
26819 ARM_CPU_OPT ("ep9312", "ARM920T",
26820 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
26821 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
26823 /* Marvell processors. */
26824 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
26825 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26826 FPU_ARCH_VFP_V3D16
),
26827 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
26828 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26829 FPU_ARCH_NEON_VFP_V4
),
26831 /* APM X-Gene family. */
26832 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
26834 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26835 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
26836 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26837 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26839 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
26843 struct arm_ext_table
26847 const arm_feature_set merge
;
26848 const arm_feature_set clear
;
26851 struct arm_arch_option_table
26855 const arm_feature_set value
;
26856 const arm_feature_set default_fpu
;
26857 const struct arm_ext_table
* ext_table
;
26860 /* Used to add support for +E and +noE extension. */
26861 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
26862 /* Used to add support for a +E extension. */
26863 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
26864 /* Used to add support for a +noE extension. */
26865 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
26867 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
26868 ~0 & ~FPU_ENDIAN_PURE)
26870 static const struct arm_ext_table armv5te_ext_table
[] =
26872 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
26873 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
26876 static const struct arm_ext_table armv7_ext_table
[] =
26878 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
26879 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
26882 static const struct arm_ext_table armv7ve_ext_table
[] =
26884 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
26885 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
26886 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
26887 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
26888 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
26889 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
26890 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
26892 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
26893 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
26895 /* Aliases for +simd. */
26896 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
26898 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
26899 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
26900 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
26902 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
26905 static const struct arm_ext_table armv7a_ext_table
[] =
26907 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
26908 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
26909 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
26910 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
26911 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
26912 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
26913 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
26915 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
26916 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
26918 /* Aliases for +simd. */
26919 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
26920 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
26922 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
26923 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
26925 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
26926 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
26927 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
26930 static const struct arm_ext_table armv7r_ext_table
[] =
26932 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
26933 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
26934 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
26935 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
26936 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
26937 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
26938 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
26939 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
26940 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
26943 static const struct arm_ext_table armv7em_ext_table
[] =
26945 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
26946 /* Alias for +fp, used to be known as fpv4-sp-d16. */
26947 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
26948 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
26949 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
26950 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
26951 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
26954 static const struct arm_ext_table armv8a_ext_table
[] =
26956 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
26957 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
26958 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
26959 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
26961 /* Armv8-a does not allow an FP implementation without SIMD, so the user
26962 should use the +simd option to turn on FP. */
26963 ARM_REMOVE ("fp", ALL_FP
),
26964 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
26965 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
26966 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
26970 static const struct arm_ext_table armv81a_ext_table
[] =
26972 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
26973 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
26974 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
26976 /* Armv8-a does not allow an FP implementation without SIMD, so the user
26977 should use the +simd option to turn on FP. */
26978 ARM_REMOVE ("fp", ALL_FP
),
26979 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
26980 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
26981 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
26984 static const struct arm_ext_table armv82a_ext_table
[] =
26986 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
26987 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
26988 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
26989 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
26990 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
26991 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
26993 /* Armv8-a does not allow an FP implementation without SIMD, so the user
26994 should use the +simd option to turn on FP. */
26995 ARM_REMOVE ("fp", ALL_FP
),
26996 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
26997 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
26998 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27001 static const struct arm_ext_table armv84a_ext_table
[] =
27003 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27004 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27005 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27006 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27008 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27009 should use the +simd option to turn on FP. */
27010 ARM_REMOVE ("fp", ALL_FP
),
27011 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27012 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27013 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27016 static const struct arm_ext_table armv85a_ext_table
[] =
27018 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27019 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27020 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27021 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27023 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27024 should use the +simd option to turn on FP. */
27025 ARM_REMOVE ("fp", ALL_FP
),
27026 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27029 static const struct arm_ext_table armv8m_main_ext_table
[] =
27031 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27032 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27033 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
27034 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27035 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27038 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
27040 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27041 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27043 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27044 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
27047 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27048 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
27049 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27052 static const struct arm_ext_table armv8r_ext_table
[] =
27054 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27055 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27056 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27057 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27058 ARM_REMOVE ("fp", ALL_FP
),
27059 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
27060 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27063 /* This list should, at a minimum, contain all the architecture names
27064 recognized by GCC. */
27065 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
27066 #define ARM_ARCH_OPT2(N, V, DF, ext) \
27067 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
27069 static const struct arm_arch_option_table arm_archs
[] =
27071 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
27072 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
27073 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
27074 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27075 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27076 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
27077 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
27078 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
27079 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
27080 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
27081 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
27082 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
27083 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
27084 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
27085 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
27086 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
27087 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
27088 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
27089 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
27090 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
27091 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
27092 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
27093 kept to preserve existing behaviour. */
27094 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
27095 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
27096 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
27097 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
27098 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
27099 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
27100 kept to preserve existing behaviour. */
27101 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
27102 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
27103 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
27104 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
27105 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
27106 /* The official spelling of the ARMv7 profile variants is the dashed form.
27107 Accept the non-dashed form for compatibility with old toolchains. */
27108 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
27109 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
27110 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
27111 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
27112 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
27113 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
27114 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
27115 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
27116 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
27117 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
27119 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
27121 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
27122 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
27123 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
27124 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
27125 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
27126 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
27127 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
27128 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
27129 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
27130 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
27131 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27133 #undef ARM_ARCH_OPT
27135 /* ISA extensions in the co-processor and main instruction set space. */
27137 struct arm_option_extension_value_table
27141 const arm_feature_set merge_value
;
27142 const arm_feature_set clear_value
;
27143 /* List of architectures for which an extension is available. ARM_ARCH_NONE
27144 indicates that an extension is available for all architectures while
27145 ARM_ANY marks an empty entry. */
27146 const arm_feature_set allowed_archs
[2];
27149 /* The following table must be in alphabetical order with a NULL last entry. */
27151 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
27152 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
27154 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
27155 use the context sensitive approach using arm_ext_table's. */
27156 static const struct arm_option_extension_value_table arm_extensions
[] =
27158 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27159 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27160 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27161 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
27162 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27163 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
27164 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
27166 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27167 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27168 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
27169 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
27170 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27171 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27172 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27174 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27175 | ARM_EXT2_FP16_FML
),
27176 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27177 | ARM_EXT2_FP16_FML
),
27179 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27180 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27181 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
27182 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
27183 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
27184 Thumb divide instruction. Due to this having the same name as the
27185 previous entry, this will be ignored when doing command-line parsing and
27186 only considered by build attribute selection code. */
27187 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
27188 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
27189 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
27190 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
27191 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
27192 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
27193 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
27194 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
27195 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
27196 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
27197 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
27198 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
27199 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
27200 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
27201 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
27202 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
27203 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
27204 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
27205 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27206 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
27207 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
27209 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
27210 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
27211 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27212 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
27213 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
27214 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27215 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
27216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
27218 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27219 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27220 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
27221 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
27222 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
27223 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
27224 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27225 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
27227 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
27228 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
27229 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
27230 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
27231 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
27235 /* ISA floating-point and Advanced SIMD extensions. */
27236 struct arm_option_fpu_value_table
27239 const arm_feature_set value
;
27242 /* This list should, at a minimum, contain all the fpu names
27243 recognized by GCC. */
27244 static const struct arm_option_fpu_value_table arm_fpus
[] =
27246 {"softfpa", FPU_NONE
},
27247 {"fpe", FPU_ARCH_FPE
},
27248 {"fpe2", FPU_ARCH_FPE
},
27249 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
27250 {"fpa", FPU_ARCH_FPA
},
27251 {"fpa10", FPU_ARCH_FPA
},
27252 {"fpa11", FPU_ARCH_FPA
},
27253 {"arm7500fe", FPU_ARCH_FPA
},
27254 {"softvfp", FPU_ARCH_VFP
},
27255 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
27256 {"vfp", FPU_ARCH_VFP_V2
},
27257 {"vfp9", FPU_ARCH_VFP_V2
},
27258 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
27259 {"vfp10", FPU_ARCH_VFP_V2
},
27260 {"vfp10-r0", FPU_ARCH_VFP_V1
},
27261 {"vfpxd", FPU_ARCH_VFP_V1xD
},
27262 {"vfpv2", FPU_ARCH_VFP_V2
},
27263 {"vfpv3", FPU_ARCH_VFP_V3
},
27264 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
27265 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
27266 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
27267 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
27268 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
27269 {"arm1020t", FPU_ARCH_VFP_V1
},
27270 {"arm1020e", FPU_ARCH_VFP_V2
},
27271 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
27272 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
27273 {"maverick", FPU_ARCH_MAVERICK
},
27274 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
27275 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
27276 {"neon-fp16", FPU_ARCH_NEON_FP16
},
27277 {"vfpv4", FPU_ARCH_VFP_V4
},
27278 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
27279 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
27280 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
27281 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
27282 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
27283 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
27284 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
27285 {"crypto-neon-fp-armv8",
27286 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
27287 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
27288 {"crypto-neon-fp-armv8.1",
27289 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
27290 {NULL
, ARM_ARCH_NONE
}
27293 struct arm_option_value_table
27299 static const struct arm_option_value_table arm_float_abis
[] =
27301 {"hard", ARM_FLOAT_ABI_HARD
},
27302 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
27303 {"soft", ARM_FLOAT_ABI_SOFT
},
27308 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
27309 static const struct arm_option_value_table arm_eabis
[] =
27311 {"gnu", EF_ARM_EABI_UNKNOWN
},
27312 {"4", EF_ARM_EABI_VER4
},
27313 {"5", EF_ARM_EABI_VER5
},
27318 struct arm_long_option_table
27320 const char * option
; /* Substring to match. */
27321 const char * help
; /* Help information. */
27322 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
27323 const char * deprecated
; /* If non-null, print this message. */
27327 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
27328 arm_feature_set
*ext_set
,
27329 const struct arm_ext_table
*ext_table
)
27331 /* We insist on extensions being specified in alphabetical order, and with
27332 extensions being added before being removed. We achieve this by having
27333 the global ARM_EXTENSIONS table in alphabetical order, and using the
27334 ADDING_VALUE variable to indicate whether we are adding an extension (1)
27335 or removing it (0) and only allowing it to change in the order
27337 const struct arm_option_extension_value_table
* opt
= NULL
;
27338 const arm_feature_set arm_any
= ARM_ANY
;
27339 int adding_value
= -1;
27341 while (str
!= NULL
&& *str
!= 0)
27348 as_bad (_("invalid architectural extension"));
27353 ext
= strchr (str
, '+');
27358 len
= strlen (str
);
27360 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
27362 if (adding_value
!= 0)
27365 opt
= arm_extensions
;
27373 if (adding_value
== -1)
27376 opt
= arm_extensions
;
27378 else if (adding_value
!= 1)
27380 as_bad (_("must specify extensions to add before specifying "
27381 "those to remove"));
27388 as_bad (_("missing architectural extension"));
27392 gas_assert (adding_value
!= -1);
27393 gas_assert (opt
!= NULL
);
27395 if (ext_table
!= NULL
)
27397 const struct arm_ext_table
* ext_opt
= ext_table
;
27398 bfd_boolean found
= FALSE
;
27399 for (; ext_opt
->name
!= NULL
; ext_opt
++)
27400 if (ext_opt
->name_len
== len
27401 && strncmp (ext_opt
->name
, str
, len
) == 0)
27405 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
27406 /* TODO: Option not supported. When we remove the
27407 legacy table this case should error out. */
27410 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
27414 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
27415 /* TODO: Option not supported. When we remove the
27416 legacy table this case should error out. */
27418 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
27430 /* Scan over the options table trying to find an exact match. */
27431 for (; opt
->name
!= NULL
; opt
++)
27432 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27434 int i
, nb_allowed_archs
=
27435 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
27436 /* Check we can apply the extension to this architecture. */
27437 for (i
= 0; i
< nb_allowed_archs
; i
++)
27440 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
27442 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
27445 if (i
== nb_allowed_archs
)
27447 as_bad (_("extension does not apply to the base architecture"));
27451 /* Add or remove the extension. */
27453 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
27455 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
27457 /* Allowing Thumb division instructions for ARMv7 in autodetection
27458 rely on this break so that duplicate extensions (extensions
27459 with the same name as a previous extension in the list) are not
27460 considered for command-line parsing. */
27464 if (opt
->name
== NULL
)
27466 /* Did we fail to find an extension because it wasn't specified in
27467 alphabetical order, or because it does not exist? */
27469 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
27470 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27473 if (opt
->name
== NULL
)
27474 as_bad (_("unknown architectural extension `%s'"), str
);
27476 as_bad (_("architectural extensions must be specified in "
27477 "alphabetical order"));
27483 /* We should skip the extension we've just matched the next time
27495 arm_parse_cpu (const char *str
)
27497 const struct arm_cpu_option_table
*opt
;
27498 const char *ext
= strchr (str
, '+');
27504 len
= strlen (str
);
27508 as_bad (_("missing cpu name `%s'"), str
);
27512 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
27513 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27515 mcpu_cpu_opt
= &opt
->value
;
27516 if (mcpu_ext_opt
== NULL
)
27517 mcpu_ext_opt
= XNEW (arm_feature_set
);
27518 *mcpu_ext_opt
= opt
->ext
;
27519 mcpu_fpu_opt
= &opt
->default_fpu
;
27520 if (opt
->canonical_name
)
27522 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
27523 strcpy (selected_cpu_name
, opt
->canonical_name
);
27529 if (len
>= sizeof selected_cpu_name
)
27530 len
= (sizeof selected_cpu_name
) - 1;
27532 for (i
= 0; i
< len
; i
++)
27533 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
27534 selected_cpu_name
[i
] = 0;
27538 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
27543 as_bad (_("unknown cpu `%s'"), str
);
27548 arm_parse_arch (const char *str
)
27550 const struct arm_arch_option_table
*opt
;
27551 const char *ext
= strchr (str
, '+');
27557 len
= strlen (str
);
27561 as_bad (_("missing architecture name `%s'"), str
);
27565 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
27566 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27568 march_cpu_opt
= &opt
->value
;
27569 if (march_ext_opt
== NULL
)
27570 march_ext_opt
= XNEW (arm_feature_set
);
27571 *march_ext_opt
= arm_arch_none
;
27572 march_fpu_opt
= &opt
->default_fpu
;
27573 strcpy (selected_cpu_name
, opt
->name
);
27576 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
27582 as_bad (_("unknown architecture `%s'\n"), str
);
27587 arm_parse_fpu (const char * str
)
27589 const struct arm_option_fpu_value_table
* opt
;
27591 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
27592 if (streq (opt
->name
, str
))
27594 mfpu_opt
= &opt
->value
;
27598 as_bad (_("unknown floating point format `%s'\n"), str
);
27603 arm_parse_float_abi (const char * str
)
27605 const struct arm_option_value_table
* opt
;
27607 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
27608 if (streq (opt
->name
, str
))
27610 mfloat_abi_opt
= opt
->value
;
27614 as_bad (_("unknown floating point abi `%s'\n"), str
);
27620 arm_parse_eabi (const char * str
)
27622 const struct arm_option_value_table
*opt
;
27624 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
27625 if (streq (opt
->name
, str
))
27627 meabi_flags
= opt
->value
;
27630 as_bad (_("unknown EABI `%s'\n"), str
);
27636 arm_parse_it_mode (const char * str
)
27638 bfd_boolean ret
= TRUE
;
27640 if (streq ("arm", str
))
27641 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
27642 else if (streq ("thumb", str
))
27643 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
27644 else if (streq ("always", str
))
27645 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
27646 else if (streq ("never", str
))
27647 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
27650 as_bad (_("unknown implicit IT mode `%s', should be "\
27651 "arm, thumb, always, or never."), str
);
27659 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
27661 codecomposer_syntax
= TRUE
;
27662 arm_comment_chars
[0] = ';';
27663 arm_line_separator_chars
[0] = 0;
27667 struct arm_long_option_table arm_long_opts
[] =
27669 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
27670 arm_parse_cpu
, NULL
},
27671 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
27672 arm_parse_arch
, NULL
},
27673 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
27674 arm_parse_fpu
, NULL
},
27675 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
27676 arm_parse_float_abi
, NULL
},
27678 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
27679 arm_parse_eabi
, NULL
},
27681 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
27682 arm_parse_it_mode
, NULL
},
27683 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
27684 arm_ccs_mode
, NULL
},
27685 {NULL
, NULL
, 0, NULL
}
27689 md_parse_option (int c
, const char * arg
)
27691 struct arm_option_table
*opt
;
27692 const struct arm_legacy_option_table
*fopt
;
27693 struct arm_long_option_table
*lopt
;
27699 target_big_endian
= 1;
27705 target_big_endian
= 0;
27709 case OPTION_FIX_V4BX
:
27717 #endif /* OBJ_ELF */
27720 /* Listing option. Just ignore these, we don't support additional
27725 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
27727 if (c
== opt
->option
[0]
27728 && ((arg
== NULL
&& opt
->option
[1] == 0)
27729 || streq (arg
, opt
->option
+ 1)))
27731 /* If the option is deprecated, tell the user. */
27732 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
27733 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
27734 arg
? arg
: "", _(opt
->deprecated
));
27736 if (opt
->var
!= NULL
)
27737 *opt
->var
= opt
->value
;
27743 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
27745 if (c
== fopt
->option
[0]
27746 && ((arg
== NULL
&& fopt
->option
[1] == 0)
27747 || streq (arg
, fopt
->option
+ 1)))
27749 /* If the option is deprecated, tell the user. */
27750 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
27751 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
27752 arg
? arg
: "", _(fopt
->deprecated
));
27754 if (fopt
->var
!= NULL
)
27755 *fopt
->var
= &fopt
->value
;
27761 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
27763 /* These options are expected to have an argument. */
27764 if (c
== lopt
->option
[0]
27766 && strncmp (arg
, lopt
->option
+ 1,
27767 strlen (lopt
->option
+ 1)) == 0)
27769 /* If the option is deprecated, tell the user. */
27770 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
27771 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
27772 _(lopt
->deprecated
));
27774 /* Call the sup-option parser. */
27775 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
27786 md_show_usage (FILE * fp
)
27788 struct arm_option_table
*opt
;
27789 struct arm_long_option_table
*lopt
;
27791 fprintf (fp
, _(" ARM-specific assembler options:\n"));
27793 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
27794 if (opt
->help
!= NULL
)
27795 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
27797 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
27798 if (lopt
->help
!= NULL
)
27799 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
27803 -EB assemble code for a big-endian cpu\n"));
27808 -EL assemble code for a little-endian cpu\n"));
27812 --fix-v4bx Allow BX in ARMv4 code\n"));
27816 --fdpic generate an FDPIC object file\n"));
27817 #endif /* OBJ_ELF */
27825 arm_feature_set flags
;
27826 } cpu_arch_ver_table
;
27828 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
27829 chronologically for architectures, with an exception for ARMv6-M and
27830 ARMv6S-M due to legacy reasons. No new architecture should have a
27831 special case. This allows for build attribute selection results to be
27832 stable when new architectures are added. */
27833 static const cpu_arch_ver_table cpu_arch_ver
[] =
27835 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
27836 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
27837 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
27838 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
27839 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
27840 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
27841 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
27842 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
27843 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
27844 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
27845 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
27846 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
27847 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
27848 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
27849 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
27850 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
27851 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
27852 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
27853 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
27854 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
27855 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
27856 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
27857 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
27858 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
27860 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
27861 always selected build attributes to match those of ARMv6-M
27862 (resp. ARMv6S-M). However, due to these architectures being a strict
27863 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
27864 would be selected when fully respecting chronology of architectures.
27865 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
27866 move them before ARMv7 architectures. */
27867 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
27868 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
27870 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
27871 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
27872 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
27873 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
27874 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
27875 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
27876 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
27877 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
27878 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
27879 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
27880 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
27881 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
27882 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
27883 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
27884 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
27885 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
27886 {-1, ARM_ARCH_NONE
}
27889 /* Set an attribute if it has not already been set by the user. */
27892 aeabi_set_attribute_int (int tag
, int value
)
27895 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
27896 || !attributes_set_explicitly
[tag
])
27897 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
27901 aeabi_set_attribute_string (int tag
, const char *value
)
27904 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
27905 || !attributes_set_explicitly
[tag
])
27906 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
27909 /* Return whether features in the *NEEDED feature set are available via
27910 extensions for the architecture whose feature set is *ARCH_FSET. */
27913 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
27914 const arm_feature_set
*needed
)
27916 int i
, nb_allowed_archs
;
27917 arm_feature_set ext_fset
;
27918 const struct arm_option_extension_value_table
*opt
;
27920 ext_fset
= arm_arch_none
;
27921 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
27923 /* Extension does not provide any feature we need. */
27924 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
27928 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
27929 for (i
= 0; i
< nb_allowed_archs
; i
++)
27932 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
27935 /* Extension is available, add it. */
27936 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
27937 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
27941 /* Can we enable all features in *needed? */
27942 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
27945 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
27946 a given architecture feature set *ARCH_EXT_FSET including extension feature
27947 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
27948 - if true, check for an exact match of the architecture modulo extensions;
27949 - otherwise, select build attribute value of the first superset
27950 architecture released so that results remains stable when new architectures
27952 For -march/-mcpu=all the build attribute value of the most featureful
27953 architecture is returned. Tag_CPU_arch_profile result is returned in
27957 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
27958 const arm_feature_set
*ext_fset
,
27959 char *profile
, int exact_match
)
27961 arm_feature_set arch_fset
;
27962 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
27964 /* Select most featureful architecture with all its extensions if building
27965 for -march=all as the feature sets used to set build attributes. */
27966 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
27968 /* Force revisiting of decision for each new architecture. */
27969 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8_1M_MAIN
);
27971 return TAG_CPU_ARCH_V8
;
27974 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
27976 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
27978 arm_feature_set known_arch_fset
;
27980 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
27983 /* Base architecture match user-specified architecture and
27984 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
27985 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
27990 /* Base architecture match user-specified architecture only
27991 (eg. ARMv6-M in the same case as above). Record it in case we
27992 find a match with above condition. */
27993 else if (p_ver_ret
== NULL
27994 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
28000 /* Architecture has all features wanted. */
28001 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
28003 arm_feature_set added_fset
;
28005 /* Compute features added by this architecture over the one
28006 recorded in p_ver_ret. */
28007 if (p_ver_ret
!= NULL
)
28008 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
28010 /* First architecture that match incl. with extensions, or the
28011 only difference in features over the recorded match is
28012 features that were optional and are now mandatory. */
28013 if (p_ver_ret
== NULL
28014 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
28020 else if (p_ver_ret
== NULL
)
28022 arm_feature_set needed_ext_fset
;
28024 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
28026 /* Architecture has all features needed when using some
28027 extensions. Record it and continue searching in case there
28028 exist an architecture providing all needed features without
28029 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
28031 if (have_ext_for_needed_feat_p (&known_arch_fset
,
28038 if (p_ver_ret
== NULL
)
28042 /* Tag_CPU_arch_profile. */
28043 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
28044 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
28045 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
28046 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
28048 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
28050 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
28054 return p_ver_ret
->val
;
28057 /* Set the public EABI object attributes. */
28060 aeabi_set_public_attributes (void)
28062 char profile
= '\0';
28065 int fp16_optional
= 0;
28066 int skip_exact_match
= 0;
28067 arm_feature_set flags
, flags_arch
, flags_ext
;
28069 /* Autodetection mode, choose the architecture based the instructions
28071 if (no_cpu_selected ())
28073 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
28075 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
28076 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
28078 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
28079 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
28081 /* Code run during relaxation relies on selected_cpu being set. */
28082 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
28083 flags_ext
= arm_arch_none
;
28084 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
28085 selected_ext
= flags_ext
;
28086 selected_cpu
= flags
;
28088 /* Otherwise, choose the architecture based on the capabilities of the
28092 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
28093 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
28094 flags_ext
= selected_ext
;
28095 flags
= selected_cpu
;
28097 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
28099 /* Allow the user to override the reported architecture. */
28100 if (!ARM_FEATURE_ZERO (selected_object_arch
))
28102 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
28103 flags_ext
= arm_arch_none
;
28106 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
28108 /* When this function is run again after relaxation has happened there is no
28109 way to determine whether an architecture or CPU was specified by the user:
28110 - selected_cpu is set above for relaxation to work;
28111 - march_cpu_opt is not set if only -mcpu or .cpu is used;
28112 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
28113 Therefore, if not in -march=all case we first try an exact match and fall
28114 back to autodetection. */
28115 if (!skip_exact_match
)
28116 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
28118 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
28120 as_bad (_("no architecture contains all the instructions used\n"));
28122 /* Tag_CPU_name. */
28123 if (selected_cpu_name
[0])
28127 q
= selected_cpu_name
;
28128 if (strncmp (q
, "armv", 4) == 0)
28133 for (i
= 0; q
[i
]; i
++)
28134 q
[i
] = TOUPPER (q
[i
]);
28136 aeabi_set_attribute_string (Tag_CPU_name
, q
);
28139 /* Tag_CPU_arch. */
28140 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
28142 /* Tag_CPU_arch_profile. */
28143 if (profile
!= '\0')
28144 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
28146 /* Tag_DSP_extension. */
28147 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
28148 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
28150 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
28151 /* Tag_ARM_ISA_use. */
28152 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
28153 || ARM_FEATURE_ZERO (flags_arch
))
28154 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
28156 /* Tag_THUMB_ISA_use. */
28157 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
28158 || ARM_FEATURE_ZERO (flags_arch
))
28162 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
28163 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
28165 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
28169 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
28172 /* Tag_VFP_arch. */
28173 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
28174 aeabi_set_attribute_int (Tag_VFP_arch
,
28175 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
28177 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
28178 aeabi_set_attribute_int (Tag_VFP_arch
,
28179 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
28181 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
28184 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
28186 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
28188 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
28191 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
28192 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
28193 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
28194 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
28195 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
28197 /* Tag_ABI_HardFP_use. */
28198 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
28199 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
28200 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
28202 /* Tag_WMMX_arch. */
28203 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
28204 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
28205 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
28206 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
28208 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
28209 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
28210 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
28211 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
28212 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
28213 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
28215 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
28217 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
28221 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
28226 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
28227 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
28228 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
28232 We set Tag_DIV_use to two when integer divide instructions have been used
28233 in ARM state, or when Thumb integer divide instructions have been used,
28234 but we have no architecture profile set, nor have we any ARM instructions.
28236 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
28237 by the base architecture.
28239 For new architectures we will have to check these tests. */
28240 gas_assert (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
28241 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
28242 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
28243 aeabi_set_attribute_int (Tag_DIV_use
, 0);
28244 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
28245 || (profile
== '\0'
28246 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
28247 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
28248 aeabi_set_attribute_int (Tag_DIV_use
, 2);
28250 /* Tag_MP_extension_use. */
28251 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
28252 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
28254 /* Tag Virtualization_use. */
28255 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
28257 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
28260 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
28263 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
28264 finished and free extension feature bits which will not be used anymore. */
28267 arm_md_post_relax (void)
28269 aeabi_set_public_attributes ();
28270 XDELETE (mcpu_ext_opt
);
28271 mcpu_ext_opt
= NULL
;
28272 XDELETE (march_ext_opt
);
28273 march_ext_opt
= NULL
;
28276 /* Add the default contents for the .ARM.attributes section. */
28281 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
28284 aeabi_set_public_attributes ();
28286 #endif /* OBJ_ELF */
28288 /* Parse a .cpu directive. */
28291 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
28293 const struct arm_cpu_option_table
*opt
;
28297 name
= input_line_pointer
;
28298 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28299 input_line_pointer
++;
28300 saved_char
= *input_line_pointer
;
28301 *input_line_pointer
= 0;
28303 /* Skip the first "all" entry. */
28304 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
28305 if (streq (opt
->name
, name
))
28307 selected_arch
= opt
->value
;
28308 selected_ext
= opt
->ext
;
28309 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
28310 if (opt
->canonical_name
)
28311 strcpy (selected_cpu_name
, opt
->canonical_name
);
28315 for (i
= 0; opt
->name
[i
]; i
++)
28316 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
28318 selected_cpu_name
[i
] = 0;
28320 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28322 *input_line_pointer
= saved_char
;
28323 demand_empty_rest_of_line ();
28326 as_bad (_("unknown cpu `%s'"), name
);
28327 *input_line_pointer
= saved_char
;
28328 ignore_rest_of_line ();
28331 /* Parse a .arch directive. */
28334 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
28336 const struct arm_arch_option_table
*opt
;
28340 name
= input_line_pointer
;
28341 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28342 input_line_pointer
++;
28343 saved_char
= *input_line_pointer
;
28344 *input_line_pointer
= 0;
28346 /* Skip the first "all" entry. */
28347 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
28348 if (streq (opt
->name
, name
))
28350 selected_arch
= opt
->value
;
28351 selected_ext
= arm_arch_none
;
28352 selected_cpu
= selected_arch
;
28353 strcpy (selected_cpu_name
, opt
->name
);
28354 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28355 *input_line_pointer
= saved_char
;
28356 demand_empty_rest_of_line ();
28360 as_bad (_("unknown architecture `%s'\n"), name
);
28361 *input_line_pointer
= saved_char
;
28362 ignore_rest_of_line ();
28365 /* Parse a .object_arch directive. */
28368 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
28370 const struct arm_arch_option_table
*opt
;
28374 name
= input_line_pointer
;
28375 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28376 input_line_pointer
++;
28377 saved_char
= *input_line_pointer
;
28378 *input_line_pointer
= 0;
28380 /* Skip the first "all" entry. */
28381 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
28382 if (streq (opt
->name
, name
))
28384 selected_object_arch
= opt
->value
;
28385 *input_line_pointer
= saved_char
;
28386 demand_empty_rest_of_line ();
28390 as_bad (_("unknown architecture `%s'\n"), name
);
28391 *input_line_pointer
= saved_char
;
28392 ignore_rest_of_line ();
28395 /* Parse a .arch_extension directive. */
28398 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
28400 const struct arm_option_extension_value_table
*opt
;
28403 int adding_value
= 1;
28405 name
= input_line_pointer
;
28406 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28407 input_line_pointer
++;
28408 saved_char
= *input_line_pointer
;
28409 *input_line_pointer
= 0;
28411 if (strlen (name
) >= 2
28412 && strncmp (name
, "no", 2) == 0)
28418 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28419 if (streq (opt
->name
, name
))
28421 int i
, nb_allowed_archs
=
28422 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
28423 for (i
= 0; i
< nb_allowed_archs
; i
++)
28426 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
28428 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
28432 if (i
== nb_allowed_archs
)
28434 as_bad (_("architectural extension `%s' is not allowed for the "
28435 "current base architecture"), name
);
28440 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
28443 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
28445 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
28446 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28447 *input_line_pointer
= saved_char
;
28448 demand_empty_rest_of_line ();
28449 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
28450 on this return so that duplicate extensions (extensions with the
28451 same name as a previous extension in the list) are not considered
28452 for command-line parsing. */
28456 if (opt
->name
== NULL
)
28457 as_bad (_("unknown architecture extension `%s'\n"), name
);
28459 *input_line_pointer
= saved_char
;
28460 ignore_rest_of_line ();
28463 /* Parse a .fpu directive. */
28466 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
28468 const struct arm_option_fpu_value_table
*opt
;
28472 name
= input_line_pointer
;
28473 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28474 input_line_pointer
++;
28475 saved_char
= *input_line_pointer
;
28476 *input_line_pointer
= 0;
28478 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
28479 if (streq (opt
->name
, name
))
28481 selected_fpu
= opt
->value
;
28482 #ifndef CPU_DEFAULT
28483 if (no_cpu_selected ())
28484 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
28487 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28488 *input_line_pointer
= saved_char
;
28489 demand_empty_rest_of_line ();
28493 as_bad (_("unknown floating point format `%s'\n"), name
);
28494 *input_line_pointer
= saved_char
;
28495 ignore_rest_of_line ();
28498 /* Copy symbol information. */
28501 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
28503 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
28507 /* Given a symbolic attribute NAME, return the proper integer value.
28508 Returns -1 if the attribute is not known. */
28511 arm_convert_symbolic_attribute (const char *name
)
28513 static const struct
28518 attribute_table
[] =
28520 /* When you modify this table you should
28521 also modify the list in doc/c-arm.texi. */
28522 #define T(tag) {#tag, tag}
28523 T (Tag_CPU_raw_name
),
28526 T (Tag_CPU_arch_profile
),
28527 T (Tag_ARM_ISA_use
),
28528 T (Tag_THUMB_ISA_use
),
28532 T (Tag_Advanced_SIMD_arch
),
28533 T (Tag_PCS_config
),
28534 T (Tag_ABI_PCS_R9_use
),
28535 T (Tag_ABI_PCS_RW_data
),
28536 T (Tag_ABI_PCS_RO_data
),
28537 T (Tag_ABI_PCS_GOT_use
),
28538 T (Tag_ABI_PCS_wchar_t
),
28539 T (Tag_ABI_FP_rounding
),
28540 T (Tag_ABI_FP_denormal
),
28541 T (Tag_ABI_FP_exceptions
),
28542 T (Tag_ABI_FP_user_exceptions
),
28543 T (Tag_ABI_FP_number_model
),
28544 T (Tag_ABI_align_needed
),
28545 T (Tag_ABI_align8_needed
),
28546 T (Tag_ABI_align_preserved
),
28547 T (Tag_ABI_align8_preserved
),
28548 T (Tag_ABI_enum_size
),
28549 T (Tag_ABI_HardFP_use
),
28550 T (Tag_ABI_VFP_args
),
28551 T (Tag_ABI_WMMX_args
),
28552 T (Tag_ABI_optimization_goals
),
28553 T (Tag_ABI_FP_optimization_goals
),
28554 T (Tag_compatibility
),
28555 T (Tag_CPU_unaligned_access
),
28556 T (Tag_FP_HP_extension
),
28557 T (Tag_VFP_HP_extension
),
28558 T (Tag_ABI_FP_16bit_format
),
28559 T (Tag_MPextension_use
),
28561 T (Tag_nodefaults
),
28562 T (Tag_also_compatible_with
),
28563 T (Tag_conformance
),
28565 T (Tag_Virtualization_use
),
28566 T (Tag_DSP_extension
),
28567 /* We deliberately do not include Tag_MPextension_use_legacy. */
28575 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
28576 if (streq (name
, attribute_table
[i
].name
))
28577 return attribute_table
[i
].tag
;
28582 /* Apply sym value for relocations only in the case that they are for
28583 local symbols in the same segment as the fixup and you have the
28584 respective architectural feature for blx and simple switches. */
28587 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
28590 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
28591 /* PR 17444: If the local symbol is in a different section then a reloc
28592 will always be generated for it, so applying the symbol value now
28593 will result in a double offset being stored in the relocation. */
28594 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
28595 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
28597 switch (fixP
->fx_r_type
)
28599 case BFD_RELOC_ARM_PCREL_BLX
:
28600 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
28601 if (ARM_IS_FUNC (fixP
->fx_addsy
))
28605 case BFD_RELOC_ARM_PCREL_CALL
:
28606 case BFD_RELOC_THUMB_PCREL_BLX
:
28607 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
28618 #endif /* OBJ_ELF */