1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS
,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result
;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant
;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax
= FALSE
;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set
*legacy_cpu
= NULL
;
158 static const arm_feature_set
*legacy_fpu
= NULL
;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
162 static arm_feature_set
*mcpu_ext_opt
= NULL
;
163 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set
*march_cpu_opt
= NULL
;
167 static arm_feature_set
*march_ext_opt
= NULL
;
168 static const arm_feature_set
*march_fpu_opt
= NULL
;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set
*mfpu_opt
= NULL
;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
176 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
179 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
180 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
182 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
184 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
187 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
190 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
191 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
192 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
193 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
194 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
195 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
196 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
197 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
198 static const arm_feature_set arm_ext_v4t_5
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
200 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
201 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
202 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
203 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
204 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
205 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
206 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2
=
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
210 static const arm_feature_set arm_ext_v6_notm
=
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
212 static const arm_feature_set arm_ext_v6_dsp
=
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
214 static const arm_feature_set arm_ext_barrier
=
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
216 static const arm_feature_set arm_ext_msr
=
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
218 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
219 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
220 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
221 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
225 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
226 static const arm_feature_set arm_ext_m
=
227 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
228 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
229 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
230 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
231 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
232 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
233 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
234 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
235 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
236 static const arm_feature_set arm_ext_v8m_main
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
238 static const arm_feature_set arm_ext_v8_1m_main
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only
=
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
243 static const arm_feature_set arm_ext_v6t2_v8m
=
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics
=
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp
=
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
253 static const arm_feature_set arm_ext_ras
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16
=
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
258 static const arm_feature_set arm_ext_fp16_fml
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
260 static const arm_feature_set arm_ext_v8_2
=
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
262 static const arm_feature_set arm_ext_v8_3
=
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
264 static const arm_feature_set arm_ext_sb
=
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
266 static const arm_feature_set arm_ext_predres
=
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
269 static const arm_feature_set arm_arch_any
= ARM_ANY
;
271 static const arm_feature_set fpu_any
= FPU_ANY
;
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
275 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
277 static const arm_feature_set arm_cext_iwmmxt2
=
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
279 static const arm_feature_set arm_cext_iwmmxt
=
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
281 static const arm_feature_set arm_cext_xscale
=
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
283 static const arm_feature_set arm_cext_maverick
=
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
285 static const arm_feature_set fpu_fpa_ext_v1
=
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
287 static const arm_feature_set fpu_fpa_ext_v2
=
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
289 static const arm_feature_set fpu_vfp_ext_v1xd
=
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
291 static const arm_feature_set fpu_vfp_ext_v1
=
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
293 static const arm_feature_set fpu_vfp_ext_v2
=
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
295 static const arm_feature_set fpu_vfp_ext_v3xd
=
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
297 static const arm_feature_set fpu_vfp_ext_v3
=
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
299 static const arm_feature_set fpu_vfp_ext_d32
=
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
301 static const arm_feature_set fpu_neon_ext_v1
=
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
306 static const arm_feature_set fpu_vfp_fp16
=
307 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
308 static const arm_feature_set fpu_neon_ext_fma
=
309 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
311 static const arm_feature_set fpu_vfp_ext_fma
=
312 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
313 static const arm_feature_set fpu_vfp_ext_armv8
=
314 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
315 static const arm_feature_set fpu_vfp_ext_armv8xd
=
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
317 static const arm_feature_set fpu_neon_ext_armv8
=
318 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
319 static const arm_feature_set fpu_crypto_ext_armv8
=
320 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
321 static const arm_feature_set crc_ext_armv8
=
322 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
323 static const arm_feature_set fpu_neon_ext_v8_1
=
324 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
325 static const arm_feature_set fpu_neon_ext_dotprod
=
326 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
328 static int mfloat_abi_opt
= -1;
329 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
331 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
332 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
334 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
335 /* Feature bits selected by the last -mcpu/-march or by the combination of the
336 last .cpu/.arch directive .arch_extension directives since that
338 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
339 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
340 static arm_feature_set selected_fpu
= FPU_NONE
;
341 /* Feature bits selected by the last .object_arch directive. */
342 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
343 /* Must be long enough to hold any of the names in arm_cpus. */
344 static char selected_cpu_name
[20];
346 extern FLONUM_TYPE generic_floating_point_number
;
348 /* Return if no cpu was selected on command-line. */
350 no_cpu_selected (void)
352 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
357 static int meabi_flags
= EABI_DEFAULT
;
359 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
362 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
367 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
372 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
373 symbolS
* GOT_symbol
;
376 /* 0: assemble for ARM,
377 1: assemble for Thumb,
378 2: assemble for Thumb even though target CPU does not support thumb
380 static int thumb_mode
= 0;
381 /* A value distinct from the possible values for thumb_mode that we
382 can use to record whether thumb_mode has been copied into the
383 tc_frag_data field of a frag. */
384 #define MODE_RECORDED (1 << 4)
386 /* Specifies the intrinsic IT insn behavior mode. */
387 enum implicit_it_mode
389 IMPLICIT_IT_MODE_NEVER
= 0x00,
390 IMPLICIT_IT_MODE_ARM
= 0x01,
391 IMPLICIT_IT_MODE_THUMB
= 0x02,
392 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
394 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
396 /* If unified_syntax is true, we are processing the new unified
397 ARM/Thumb syntax. Important differences from the old ARM mode:
399 - Immediate operands do not require a # prefix.
400 - Conditional affixes always appear at the end of the
401 instruction. (For backward compatibility, those instructions
402 that formerly had them in the middle, continue to accept them
404 - The IT instruction may appear, and if it does is validated
405 against subsequent conditional affixes. It does not generate
408 Important differences from the old Thumb mode:
410 - Immediate operands do not require a # prefix.
411 - Most of the V6T2 instructions are only available in unified mode.
412 - The .N and .W suffixes are recognized and honored (it is an error
413 if they cannot be honored).
414 - All instructions set the flags if and only if they have an 's' affix.
415 - Conditional affixes may be used. They are validated against
416 preceding IT instructions. Unlike ARM mode, you cannot use a
417 conditional affix except in the scope of an IT instruction. */
419 static bfd_boolean unified_syntax
= FALSE
;
421 /* An immediate operand can start with #, and ld*, st*, pld operands
422 can contain [ and ]. We need to tell APP not to elide whitespace
423 before a [, which can appear as the first operand for pld.
424 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
425 const char arm_symbol_chars
[] = "#[]{}";
440 enum neon_el_type type
;
444 #define NEON_MAX_TYPE_ELS 4
448 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
452 enum it_instruction_type
457 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
458 if inside, should be the last one. */
459 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
460 i.e. BKPT and NOP. */
461 IT_INSN
/* The IT insn has been parsed. */
464 /* The maximum number of operands we need. */
465 #define ARM_IT_MAX_OPERANDS 6
466 #define ARM_IT_MAX_RELOCS 3
471 unsigned long instruction
;
475 /* "uncond_value" is set to the value in place of the conditional field in
476 unconditional versions of the instruction, or -1 if nothing is
479 struct neon_type vectype
;
480 /* This does not indicate an actual NEON instruction, only that
481 the mnemonic accepts neon-style type suffixes. */
483 /* Set to the opcode if the instruction needs relaxation.
484 Zero if the instruction is not relaxed. */
488 bfd_reloc_code_real_type type
;
491 } relocs
[ARM_IT_MAX_RELOCS
];
493 enum it_instruction_type it_insn_type
;
499 struct neon_type_el vectype
;
500 unsigned present
: 1; /* Operand present. */
501 unsigned isreg
: 1; /* Operand was a register. */
502 unsigned immisreg
: 1; /* .imm field is a second register. */
503 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
504 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
505 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
506 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
507 instructions. This allows us to disambiguate ARM <-> vector insns. */
508 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
509 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
510 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
511 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
512 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
513 unsigned writeback
: 1; /* Operand has trailing ! */
514 unsigned preind
: 1; /* Preindexed address. */
515 unsigned postind
: 1; /* Postindexed address. */
516 unsigned negative
: 1; /* Index register was negated. */
517 unsigned shifted
: 1; /* Shift applied to operation. */
518 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
519 } operands
[ARM_IT_MAX_OPERANDS
];
522 static struct arm_it inst
;
524 #define NUM_FLOAT_VALS 8
526 const char * fp_const
[] =
528 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
531 /* Number of littlenums required to hold an extended precision number. */
532 #define MAX_LITTLENUMS 6
534 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
544 #define CP_T_X 0x00008000
545 #define CP_T_Y 0x00400000
547 #define CONDS_BIT 0x00100000
548 #define LOAD_BIT 0x00100000
550 #define DOUBLE_LOAD_FLAG 0x00000001
554 const char * template_name
;
558 #define COND_ALWAYS 0xE
562 const char * template_name
;
566 struct asm_barrier_opt
568 const char * template_name
;
570 const arm_feature_set arch
;
573 /* The bit that distinguishes CPSR and SPSR. */
574 #define SPSR_BIT (1 << 22)
576 /* The individual PSR flag bits. */
577 #define PSR_c (1 << 16)
578 #define PSR_x (1 << 17)
579 #define PSR_s (1 << 18)
580 #define PSR_f (1 << 19)
585 bfd_reloc_code_real_type reloc
;
590 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
591 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
596 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
599 /* Bits for DEFINED field in neon_typed_alias. */
600 #define NTA_HASTYPE 1
601 #define NTA_HASINDEX 2
603 struct neon_typed_alias
605 unsigned char defined
;
607 struct neon_type_el eltype
;
610 /* ARM register categories. This includes coprocessor numbers and various
611 architecture extensions' registers. Each entry should have an error message
612 in reg_expected_msgs below. */
640 /* Structure for a hash table entry for a register.
641 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
642 information which states whether a vector type or index is specified (for a
643 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
649 unsigned char builtin
;
650 struct neon_typed_alias
* neon
;
653 /* Diagnostics used when we don't get a register of the expected type. */
654 const char * const reg_expected_msgs
[] =
656 [REG_TYPE_RN
] = N_("ARM register expected"),
657 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
658 [REG_TYPE_CN
] = N_("co-processor register expected"),
659 [REG_TYPE_FN
] = N_("FPA register expected"),
660 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
661 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
662 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
663 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
664 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
665 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
666 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
668 [REG_TYPE_VFC
] = N_("VFP system register expected"),
669 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
670 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
671 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
672 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
673 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
674 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
675 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
676 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
677 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
678 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
679 [REG_TYPE_RNB
] = N_("")
682 /* Some well known registers that we refer to directly elsewhere. */
688 /* ARM instructions take 4bytes in the object file, Thumb instructions
694 /* Basic string to match. */
695 const char * template_name
;
697 /* Parameters to instruction. */
698 unsigned int operands
[8];
700 /* Conditional tag - see opcode_lookup. */
701 unsigned int tag
: 4;
703 /* Basic instruction code. */
704 unsigned int avalue
: 28;
706 /* Thumb-format instruction code. */
709 /* Which architecture variant provides this instruction. */
710 const arm_feature_set
* avariant
;
711 const arm_feature_set
* tvariant
;
713 /* Function to call to encode instruction in ARM format. */
714 void (* aencode
) (void);
716 /* Function to call to encode instruction in Thumb format. */
717 void (* tencode
) (void);
720 /* Defines for various bits that we will want to toggle. */
721 #define INST_IMMEDIATE 0x02000000
722 #define OFFSET_REG 0x02000000
723 #define HWOFFSET_IMM 0x00400000
724 #define SHIFT_BY_REG 0x00000010
725 #define PRE_INDEX 0x01000000
726 #define INDEX_UP 0x00800000
727 #define WRITE_BACK 0x00200000
728 #define LDM_TYPE_2_OR_3 0x00400000
729 #define CPSI_MMOD 0x00020000
731 #define LITERAL_MASK 0xf000f000
732 #define OPCODE_MASK 0xfe1fffff
733 #define V4_STR_BIT 0x00000020
734 #define VLDR_VMOV_SAME 0x0040f000
736 #define T2_SUBS_PC_LR 0xf3de8f00
738 #define DATA_OP_SHIFT 21
739 #define SBIT_SHIFT 20
741 #define T2_OPCODE_MASK 0xfe1fffff
742 #define T2_DATA_OP_SHIFT 21
743 #define T2_SBIT_SHIFT 20
745 #define A_COND_MASK 0xf0000000
746 #define A_PUSH_POP_OP_MASK 0x0fff0000
748 /* Opcodes for pushing/poping registers to/from the stack. */
749 #define A1_OPCODE_PUSH 0x092d0000
750 #define A2_OPCODE_PUSH 0x052d0004
751 #define A2_OPCODE_POP 0x049d0004
753 /* Codes to distinguish the arithmetic instructions. */
764 #define OPCODE_CMP 10
765 #define OPCODE_CMN 11
766 #define OPCODE_ORR 12
767 #define OPCODE_MOV 13
768 #define OPCODE_BIC 14
769 #define OPCODE_MVN 15
771 #define T2_OPCODE_AND 0
772 #define T2_OPCODE_BIC 1
773 #define T2_OPCODE_ORR 2
774 #define T2_OPCODE_ORN 3
775 #define T2_OPCODE_EOR 4
776 #define T2_OPCODE_ADD 8
777 #define T2_OPCODE_ADC 10
778 #define T2_OPCODE_SBC 11
779 #define T2_OPCODE_SUB 13
780 #define T2_OPCODE_RSB 14
782 #define T_OPCODE_MUL 0x4340
783 #define T_OPCODE_TST 0x4200
784 #define T_OPCODE_CMN 0x42c0
785 #define T_OPCODE_NEG 0x4240
786 #define T_OPCODE_MVN 0x43c0
788 #define T_OPCODE_ADD_R3 0x1800
789 #define T_OPCODE_SUB_R3 0x1a00
790 #define T_OPCODE_ADD_HI 0x4400
791 #define T_OPCODE_ADD_ST 0xb000
792 #define T_OPCODE_SUB_ST 0xb080
793 #define T_OPCODE_ADD_SP 0xa800
794 #define T_OPCODE_ADD_PC 0xa000
795 #define T_OPCODE_ADD_I8 0x3000
796 #define T_OPCODE_SUB_I8 0x3800
797 #define T_OPCODE_ADD_I3 0x1c00
798 #define T_OPCODE_SUB_I3 0x1e00
800 #define T_OPCODE_ASR_R 0x4100
801 #define T_OPCODE_LSL_R 0x4080
802 #define T_OPCODE_LSR_R 0x40c0
803 #define T_OPCODE_ROR_R 0x41c0
804 #define T_OPCODE_ASR_I 0x1000
805 #define T_OPCODE_LSL_I 0x0000
806 #define T_OPCODE_LSR_I 0x0800
808 #define T_OPCODE_MOV_I8 0x2000
809 #define T_OPCODE_CMP_I8 0x2800
810 #define T_OPCODE_CMP_LR 0x4280
811 #define T_OPCODE_MOV_HR 0x4600
812 #define T_OPCODE_CMP_HR 0x4500
814 #define T_OPCODE_LDR_PC 0x4800
815 #define T_OPCODE_LDR_SP 0x9800
816 #define T_OPCODE_STR_SP 0x9000
817 #define T_OPCODE_LDR_IW 0x6800
818 #define T_OPCODE_STR_IW 0x6000
819 #define T_OPCODE_LDR_IH 0x8800
820 #define T_OPCODE_STR_IH 0x8000
821 #define T_OPCODE_LDR_IB 0x7800
822 #define T_OPCODE_STR_IB 0x7000
823 #define T_OPCODE_LDR_RW 0x5800
824 #define T_OPCODE_STR_RW 0x5000
825 #define T_OPCODE_LDR_RH 0x5a00
826 #define T_OPCODE_STR_RH 0x5200
827 #define T_OPCODE_LDR_RB 0x5c00
828 #define T_OPCODE_STR_RB 0x5400
830 #define T_OPCODE_PUSH 0xb400
831 #define T_OPCODE_POP 0xbc00
833 #define T_OPCODE_BRANCH 0xe000
835 #define THUMB_SIZE 2 /* Size of thumb instruction. */
836 #define THUMB_PP_PC_LR 0x0100
837 #define THUMB_LOAD_BIT 0x0800
838 #define THUMB2_LOAD_BIT 0x00100000
840 #define BAD_ARGS _("bad arguments to instruction")
841 #define BAD_SP _("r13 not allowed here")
842 #define BAD_PC _("r15 not allowed here")
843 #define BAD_COND _("instruction cannot be conditional")
844 #define BAD_OVERLAP _("registers may not be the same")
845 #define BAD_HIREG _("lo register required")
846 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
847 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
848 #define BAD_BRANCH _("branch must be last instruction in IT block")
849 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
850 #define BAD_NOT_IT _("instruction not allowed in IT block")
851 #define BAD_FPU _("selected FPU does not support instruction")
852 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
853 #define BAD_IT_COND _("incorrect condition in IT block")
854 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
855 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
856 #define BAD_PC_ADDRESSING \
857 _("cannot use register index with PC-relative addressing")
858 #define BAD_PC_WRITEBACK \
859 _("cannot use writeback with PC-relative addressing")
860 #define BAD_RANGE _("branch out of range")
861 #define BAD_FP16 _("selected processor does not support fp16 instruction")
862 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
863 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
865 static struct hash_control
* arm_ops_hsh
;
866 static struct hash_control
* arm_cond_hsh
;
867 static struct hash_control
* arm_shift_hsh
;
868 static struct hash_control
* arm_psr_hsh
;
869 static struct hash_control
* arm_v7m_psr_hsh
;
870 static struct hash_control
* arm_reg_hsh
;
871 static struct hash_control
* arm_reloc_hsh
;
872 static struct hash_control
* arm_barrier_opt_hsh
;
874 /* Stuff needed to resolve the label ambiguity
883 symbolS
* last_label_seen
;
884 static int label_is_thumb_function_name
= FALSE
;
886 /* Literal pool structure. Held on a per-section
887 and per-sub-section basis. */
889 #define MAX_LITERAL_POOL_SIZE 1024
890 typedef struct literal_pool
892 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
893 unsigned int next_free_entry
;
899 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
901 struct literal_pool
* next
;
902 unsigned int alignment
;
905 /* Pointer to a linked list of literal pools. */
906 literal_pool
* list_of_pools
= NULL
;
908 typedef enum asmfunc_states
911 WAITING_ASMFUNC_NAME
,
915 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
918 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
920 static struct current_it now_it
;
924 now_it_compatible (int cond
)
926 return (cond
& ~1) == (now_it
.cc
& ~1);
930 conditional_insn (void)
932 return inst
.cond
!= COND_ALWAYS
;
935 static int in_it_block (void);
937 static int handle_it_state (void);
939 static void force_automatic_it_block_close (void);
941 static void it_fsm_post_encode (void);
943 #define set_it_insn_type(type) \
946 inst.it_insn_type = type; \
947 if (handle_it_state () == FAIL) \
952 #define set_it_insn_type_nonvoid(type, failret) \
955 inst.it_insn_type = type; \
956 if (handle_it_state () == FAIL) \
961 #define set_it_insn_type_last() \
964 if (inst.cond == COND_ALWAYS) \
965 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
967 set_it_insn_type (INSIDE_IT_LAST_INSN); \
973 /* This array holds the chars that always start a comment. If the
974 pre-processor is disabled, these aren't very useful. */
975 char arm_comment_chars
[] = "@";
977 /* This array holds the chars that only start a comment at the beginning of
978 a line. If the line seems to have the form '# 123 filename'
979 .line and .file directives will appear in the pre-processed output. */
980 /* Note that input_file.c hand checks for '#' at the beginning of the
981 first line of the input file. This is because the compiler outputs
982 #NO_APP at the beginning of its output. */
983 /* Also note that comments like this one will always work. */
984 const char line_comment_chars
[] = "#";
986 char arm_line_separator_chars
[] = ";";
988 /* Chars that can be used to separate mant
989 from exp in floating point numbers. */
990 const char EXP_CHARS
[] = "eE";
992 /* Chars that mean this number is a floating point constant. */
996 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
998 /* Prefix characters that indicate the start of an immediate
1000 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1002 /* Separator character handling. */
1004 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1007 skip_past_char (char ** str
, char c
)
1009 /* PR gas/14987: Allow for whitespace before the expected character. */
1010 skip_whitespace (*str
);
1021 #define skip_past_comma(str) skip_past_char (str, ',')
1023 /* Arithmetic expressions (possibly involving symbols). */
1025 /* Return TRUE if anything in the expression is a bignum. */
1028 walk_no_bignums (symbolS
* sp
)
1030 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1033 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1035 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1036 || (symbol_get_value_expression (sp
)->X_op_symbol
1037 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1043 static bfd_boolean in_my_get_expression
= FALSE
;
1045 /* Third argument to my_get_expression. */
1046 #define GE_NO_PREFIX 0
1047 #define GE_IMM_PREFIX 1
1048 #define GE_OPT_PREFIX 2
1049 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1050 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1051 #define GE_OPT_PREFIX_BIG 3
1054 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1058 /* In unified syntax, all prefixes are optional. */
1060 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1063 switch (prefix_mode
)
1065 case GE_NO_PREFIX
: break;
1067 if (!is_immediate_prefix (**str
))
1069 inst
.error
= _("immediate expression requires a # prefix");
1075 case GE_OPT_PREFIX_BIG
:
1076 if (is_immediate_prefix (**str
))
1083 memset (ep
, 0, sizeof (expressionS
));
1085 save_in
= input_line_pointer
;
1086 input_line_pointer
= *str
;
1087 in_my_get_expression
= TRUE
;
1089 in_my_get_expression
= FALSE
;
1091 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1093 /* We found a bad or missing expression in md_operand(). */
1094 *str
= input_line_pointer
;
1095 input_line_pointer
= save_in
;
1096 if (inst
.error
== NULL
)
1097 inst
.error
= (ep
->X_op
== O_absent
1098 ? _("missing expression") :_("bad expression"));
1102 /* Get rid of any bignums now, so that we don't generate an error for which
1103 we can't establish a line number later on. Big numbers are never valid
1104 in instructions, which is where this routine is always called. */
1105 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1106 && (ep
->X_op
== O_big
1107 || (ep
->X_add_symbol
1108 && (walk_no_bignums (ep
->X_add_symbol
)
1110 && walk_no_bignums (ep
->X_op_symbol
))))))
1112 inst
.error
= _("invalid constant");
1113 *str
= input_line_pointer
;
1114 input_line_pointer
= save_in
;
1118 *str
= input_line_pointer
;
1119 input_line_pointer
= save_in
;
1123 /* Turn a string in input_line_pointer into a floating point constant
1124 of type TYPE, and store the appropriate bytes in *LITP. The number
1125 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1126 returned, or NULL on OK.
1128 Note that fp constants aren't represent in the normal way on the ARM.
1129 In big endian mode, things are as expected. However, in little endian
1130 mode fp constants are big-endian word-wise, and little-endian byte-wise
1131 within the words. For example, (double) 1.1 in big endian mode is
1132 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1133 the byte sequence 99 99 f1 3f 9a 99 99 99.
1135 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1138 md_atof (int type
, char * litP
, int * sizeP
)
1141 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1173 return _("Unrecognized or unsupported floating point constant");
1176 t
= atof_ieee (input_line_pointer
, type
, words
);
1178 input_line_pointer
= t
;
1179 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1181 if (target_big_endian
)
1183 for (i
= 0; i
< prec
; i
++)
1185 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1186 litP
+= sizeof (LITTLENUM_TYPE
);
1191 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1192 for (i
= prec
- 1; i
>= 0; i
--)
1194 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1195 litP
+= sizeof (LITTLENUM_TYPE
);
1198 /* For a 4 byte float the order of elements in `words' is 1 0.
1199 For an 8 byte float the order is 1 0 3 2. */
1200 for (i
= 0; i
< prec
; i
+= 2)
1202 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1203 sizeof (LITTLENUM_TYPE
));
1204 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1205 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1206 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1213 /* We handle all bad expressions here, so that we can report the faulty
1214 instruction in the error message. */
1217 md_operand (expressionS
* exp
)
1219 if (in_my_get_expression
)
1220 exp
->X_op
= O_illegal
;
1223 /* Immediate values. */
1226 /* Generic immediate-value read function for use in directives.
1227 Accepts anything that 'expression' can fold to a constant.
1228 *val receives the number. */
1231 immediate_for_directive (int *val
)
1234 exp
.X_op
= O_illegal
;
1236 if (is_immediate_prefix (*input_line_pointer
))
1238 input_line_pointer
++;
1242 if (exp
.X_op
!= O_constant
)
1244 as_bad (_("expected #constant"));
1245 ignore_rest_of_line ();
1248 *val
= exp
.X_add_number
;
1253 /* Register parsing. */
1255 /* Generic register parser. CCP points to what should be the
1256 beginning of a register name. If it is indeed a valid register
1257 name, advance CCP over it and return the reg_entry structure;
1258 otherwise return NULL. Does not issue diagnostics. */
1260 static struct reg_entry
*
1261 arm_reg_parse_multi (char **ccp
)
1265 struct reg_entry
*reg
;
1267 skip_whitespace (start
);
1269 #ifdef REGISTER_PREFIX
1270 if (*start
!= REGISTER_PREFIX
)
1274 #ifdef OPTIONAL_REGISTER_PREFIX
1275 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1280 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1285 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1287 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1297 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1298 enum arm_reg_type type
)
1300 /* Alternative syntaxes are accepted for a few register classes. */
1307 /* Generic coprocessor register names are allowed for these. */
1308 if (reg
&& reg
->type
== REG_TYPE_CN
)
1313 /* For backward compatibility, a bare number is valid here. */
1315 unsigned long processor
= strtoul (start
, ccp
, 10);
1316 if (*ccp
!= start
&& processor
<= 15)
1321 case REG_TYPE_MMXWC
:
1322 /* WC includes WCG. ??? I'm not sure this is true for all
1323 instructions that take WC registers. */
1324 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1335 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1336 return value is the register number or FAIL. */
1339 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1342 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1345 /* Do not allow a scalar (reg+index) to parse as a register. */
1346 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1349 if (reg
&& reg
->type
== type
)
1352 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1359 /* Parse a Neon type specifier. *STR should point at the leading '.'
1360 character. Does no verification at this stage that the type fits the opcode
1367 Can all be legally parsed by this function.
1369 Fills in neon_type struct pointer with parsed information, and updates STR
1370 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1371 type, FAIL if not. */
1374 parse_neon_type (struct neon_type
*type
, char **str
)
1381 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1383 enum neon_el_type thistype
= NT_untyped
;
1384 unsigned thissize
= -1u;
1391 /* Just a size without an explicit type. */
1395 switch (TOLOWER (*ptr
))
1397 case 'i': thistype
= NT_integer
; break;
1398 case 'f': thistype
= NT_float
; break;
1399 case 'p': thistype
= NT_poly
; break;
1400 case 's': thistype
= NT_signed
; break;
1401 case 'u': thistype
= NT_unsigned
; break;
1403 thistype
= NT_float
;
1408 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1414 /* .f is an abbreviation for .f32. */
1415 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1420 thissize
= strtoul (ptr
, &ptr
, 10);
1422 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1425 as_bad (_("bad size %d in type specifier"), thissize
);
1433 type
->el
[type
->elems
].type
= thistype
;
1434 type
->el
[type
->elems
].size
= thissize
;
1439 /* Empty/missing type is not a successful parse. */
1440 if (type
->elems
== 0)
1448 /* Errors may be set multiple times during parsing or bit encoding
1449 (particularly in the Neon bits), but usually the earliest error which is set
1450 will be the most meaningful. Avoid overwriting it with later (cascading)
1451 errors by calling this function. */
1454 first_error (const char *err
)
1460 /* Parse a single type, e.g. ".s32", leading period included. */
1462 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1465 struct neon_type optype
;
1469 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1471 if (optype
.elems
== 1)
1472 *vectype
= optype
.el
[0];
1475 first_error (_("only one type should be specified for operand"));
1481 first_error (_("vector type expected"));
1493 /* Special meanings for indices (which have a range of 0-7), which will fit into
1496 #define NEON_ALL_LANES 15
1497 #define NEON_INTERLEAVE_LANES 14
1499 /* Parse either a register or a scalar, with an optional type. Return the
1500 register number, and optionally fill in the actual type of the register
1501 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1502 type/index information in *TYPEINFO. */
1505 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1506 enum arm_reg_type
*rtype
,
1507 struct neon_typed_alias
*typeinfo
)
1510 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1511 struct neon_typed_alias atype
;
1512 struct neon_type_el parsetype
;
1516 atype
.eltype
.type
= NT_invtype
;
1517 atype
.eltype
.size
= -1;
1519 /* Try alternate syntax for some types of register. Note these are mutually
1520 exclusive with the Neon syntax extensions. */
1523 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1531 /* Undo polymorphism when a set of register types may be accepted. */
1532 if ((type
== REG_TYPE_NDQ
1533 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1534 || (type
== REG_TYPE_VFSD
1535 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1536 || (type
== REG_TYPE_NSDQ
1537 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1538 || reg
->type
== REG_TYPE_NQ
))
1539 || (type
== REG_TYPE_NSD
1540 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1541 || (type
== REG_TYPE_MMXWC
1542 && (reg
->type
== REG_TYPE_MMXWCG
)))
1543 type
= (enum arm_reg_type
) reg
->type
;
1545 if (type
!= reg
->type
)
1551 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1553 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1555 first_error (_("can't redefine type for operand"));
1558 atype
.defined
|= NTA_HASTYPE
;
1559 atype
.eltype
= parsetype
;
1562 if (skip_past_char (&str
, '[') == SUCCESS
)
1564 if (type
!= REG_TYPE_VFD
1565 && !(type
== REG_TYPE_VFS
1566 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
)))
1568 first_error (_("only D registers may be indexed"));
1572 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1574 first_error (_("can't change index for operand"));
1578 atype
.defined
|= NTA_HASINDEX
;
1580 if (skip_past_char (&str
, ']') == SUCCESS
)
1581 atype
.index
= NEON_ALL_LANES
;
1586 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1588 if (exp
.X_op
!= O_constant
)
1590 first_error (_("constant expression required"));
1594 if (skip_past_char (&str
, ']') == FAIL
)
1597 atype
.index
= exp
.X_add_number
;
1612 /* Like arm_reg_parse, but also allow the following extra features:
1613 - If RTYPE is non-zero, return the (possibly restricted) type of the
1614 register (e.g. Neon double or quad reg when either has been requested).
1615 - If this is a Neon vector type with additional type information, fill
1616 in the struct pointed to by VECTYPE (if non-NULL).
1617 This function will fault on encountering a scalar. */
1620 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1621 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1623 struct neon_typed_alias atype
;
1625 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1630 /* Do not allow regname(... to parse as a register. */
1634 /* Do not allow a scalar (reg+index) to parse as a register. */
1635 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1637 first_error (_("register operand expected, but got scalar"));
1642 *vectype
= atype
.eltype
;
1649 #define NEON_SCALAR_REG(X) ((X) >> 4)
1650 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1652 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1653 have enough information to be able to do a good job bounds-checking. So, we
1654 just do easy checks here, and do further checks later. */
1657 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1661 struct neon_typed_alias atype
;
1662 enum arm_reg_type reg_type
= REG_TYPE_VFD
;
1665 reg_type
= REG_TYPE_VFS
;
1667 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1669 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1672 if (atype
.index
== NEON_ALL_LANES
)
1674 first_error (_("scalar must have an index"));
1677 else if (atype
.index
>= 64 / elsize
)
1679 first_error (_("scalar index out of range"));
1684 *type
= atype
.eltype
;
1688 return reg
* 16 + atype
.index
;
1691 /* Types of registers in a list. */
1704 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1707 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1713 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
);
1715 /* We come back here if we get ranges concatenated by '+' or '|'. */
1718 skip_whitespace (str
);
1731 const char apsr_str
[] = "apsr";
1732 int apsr_str_len
= strlen (apsr_str
);
1734 reg
= arm_reg_parse (&str
, REGLIST_RN
);
1735 if (etype
== REGLIST_CLRM
)
1737 if (reg
== REG_SP
|| reg
== REG_PC
)
1739 else if (reg
== FAIL
1740 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1741 && !ISALPHA (*(str
+ apsr_str_len
)))
1744 str
+= apsr_str_len
;
1749 first_error (_("r0-r12, lr or APSR expected"));
1753 else /* etype == REGLIST_RN. */
1757 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1768 first_error (_("bad range in register list"));
1772 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1774 if (range
& (1 << i
))
1776 (_("Warning: duplicated register (r%d) in register list"),
1784 if (range
& (1 << reg
))
1785 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1787 else if (reg
<= cur_reg
)
1788 as_tsktsk (_("Warning: register range not in ascending order"));
1793 while (skip_past_comma (&str
) != FAIL
1794 || (in_range
= 1, *str
++ == '-'));
1797 if (skip_past_char (&str
, '}') == FAIL
)
1799 first_error (_("missing `}'"));
1803 else if (etype
== REGLIST_RN
)
1807 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1810 if (exp
.X_op
== O_constant
)
1812 if (exp
.X_add_number
1813 != (exp
.X_add_number
& 0x0000ffff))
1815 inst
.error
= _("invalid register mask");
1819 if ((range
& exp
.X_add_number
) != 0)
1821 int regno
= range
& exp
.X_add_number
;
1824 regno
= (1 << regno
) - 1;
1826 (_("Warning: duplicated register (r%d) in register list"),
1830 range
|= exp
.X_add_number
;
1834 if (inst
.relocs
[0].type
!= 0)
1836 inst
.error
= _("expression too complex");
1840 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
1841 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
1842 inst
.relocs
[0].pc_rel
= 0;
1846 if (*str
== '|' || *str
== '+')
1852 while (another_range
);
1858 /* Parse a VFP register list. If the string is invalid return FAIL.
1859 Otherwise return the number of registers, and set PBASE to the first
1860 register. Parses registers of type ETYPE.
1861 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1862 - Q registers can be used to specify pairs of D registers
1863 - { } can be omitted from around a singleton register list
1864 FIXME: This is not implemented, as it would require backtracking in
1867 This could be done (the meaning isn't really ambiguous), but doesn't
1868 fit in well with the current parsing framework.
1869 - 32 D registers may be used (also true for VFPv3).
1870 FIXME: Types are ignored in these register lists, which is probably a
1874 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
,
1875 bfd_boolean
*partial_match
)
1880 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1884 unsigned long mask
= 0;
1886 bfd_boolean vpr_seen
= FALSE
;
1887 bfd_boolean expect_vpr
=
1888 (etype
== REGLIST_VFP_S_VPR
) || (etype
== REGLIST_VFP_D_VPR
);
1890 if (skip_past_char (&str
, '{') == FAIL
)
1892 inst
.error
= _("expecting {");
1899 case REGLIST_VFP_S_VPR
:
1900 regtype
= REG_TYPE_VFS
;
1905 case REGLIST_VFP_D_VPR
:
1906 regtype
= REG_TYPE_VFD
;
1909 case REGLIST_NEON_D
:
1910 regtype
= REG_TYPE_NDQ
;
1917 if (etype
!= REGLIST_VFP_S
&& etype
!= REGLIST_VFP_S_VPR
)
1919 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1920 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1924 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1927 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1934 base_reg
= max_regs
;
1935 *partial_match
= FALSE
;
1939 int setmask
= 1, addregs
= 1;
1940 const char vpr_str
[] = "vpr";
1941 int vpr_str_len
= strlen (vpr_str
);
1943 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1947 if (new_base
== FAIL
1948 && !strncasecmp (str
, vpr_str
, vpr_str_len
)
1949 && !ISALPHA (*(str
+ vpr_str_len
))
1955 base_reg
= 0; /* Canonicalize VPR only on d0 with 0 regs. */
1959 first_error (_("VPR expected last"));
1962 else if (new_base
== FAIL
)
1964 if (regtype
== REG_TYPE_VFS
)
1965 first_error (_("VFP single precision register or VPR "
1967 else /* regtype == REG_TYPE_VFD. */
1968 first_error (_("VFP/Neon double precision register or VPR "
1973 else if (new_base
== FAIL
)
1975 first_error (_(reg_expected_msgs
[regtype
]));
1979 *partial_match
= TRUE
;
1983 if (new_base
>= max_regs
)
1985 first_error (_("register out of range in list"));
1989 /* Note: a value of 2 * n is returned for the register Q<n>. */
1990 if (regtype
== REG_TYPE_NQ
)
1996 if (new_base
< base_reg
)
1997 base_reg
= new_base
;
1999 if (mask
& (setmask
<< new_base
))
2001 first_error (_("invalid register list"));
2005 if ((mask
>> new_base
) != 0 && ! warned
&& !vpr_seen
)
2007 as_tsktsk (_("register list not in ascending order"));
2011 mask
|= setmask
<< new_base
;
2014 if (*str
== '-') /* We have the start of a range expression */
2020 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
2023 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
2027 if (high_range
>= max_regs
)
2029 first_error (_("register out of range in list"));
2033 if (regtype
== REG_TYPE_NQ
)
2034 high_range
= high_range
+ 1;
2036 if (high_range
<= new_base
)
2038 inst
.error
= _("register range not in ascending order");
2042 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2044 if (mask
& (setmask
<< new_base
))
2046 inst
.error
= _("invalid register list");
2050 mask
|= setmask
<< new_base
;
2055 while (skip_past_comma (&str
) != FAIL
);
2059 /* Sanity check -- should have raised a parse error above. */
2060 if ((!vpr_seen
&& count
== 0) || count
> max_regs
)
2065 if (expect_vpr
&& !vpr_seen
)
2067 first_error (_("VPR expected last"));
2071 /* Final test -- the registers must be consecutive. */
2073 for (i
= 0; i
< count
; i
++)
2075 if ((mask
& (1u << i
)) == 0)
2077 inst
.error
= _("non-contiguous register range");
2087 /* True if two alias types are the same. */
2090 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2098 if (a
->defined
!= b
->defined
)
2101 if ((a
->defined
& NTA_HASTYPE
) != 0
2102 && (a
->eltype
.type
!= b
->eltype
.type
2103 || a
->eltype
.size
!= b
->eltype
.size
))
2106 if ((a
->defined
& NTA_HASINDEX
) != 0
2107 && (a
->index
!= b
->index
))
2113 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2114 The base register is put in *PBASE.
2115 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2117 The register stride (minus one) is put in bit 4 of the return value.
2118 Bits [6:5] encode the list length (minus one).
2119 The type of the list elements is put in *ELTYPE, if non-NULL. */
2121 #define NEON_LANE(X) ((X) & 0xf)
2122 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2123 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2126 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2127 struct neon_type_el
*eltype
)
2134 int leading_brace
= 0;
2135 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2136 const char *const incr_error
= _("register stride must be 1 or 2");
2137 const char *const type_error
= _("mismatched element/structure types in list");
2138 struct neon_typed_alias firsttype
;
2139 firsttype
.defined
= 0;
2140 firsttype
.eltype
.type
= NT_invtype
;
2141 firsttype
.eltype
.size
= -1;
2142 firsttype
.index
= -1;
2144 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2149 struct neon_typed_alias atype
;
2150 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2154 first_error (_(reg_expected_msgs
[rtype
]));
2161 if (rtype
== REG_TYPE_NQ
)
2167 else if (reg_incr
== -1)
2169 reg_incr
= getreg
- base_reg
;
2170 if (reg_incr
< 1 || reg_incr
> 2)
2172 first_error (_(incr_error
));
2176 else if (getreg
!= base_reg
+ reg_incr
* count
)
2178 first_error (_(incr_error
));
2182 if (! neon_alias_types_same (&atype
, &firsttype
))
2184 first_error (_(type_error
));
2188 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2192 struct neon_typed_alias htype
;
2193 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2195 lane
= NEON_INTERLEAVE_LANES
;
2196 else if (lane
!= NEON_INTERLEAVE_LANES
)
2198 first_error (_(type_error
));
2203 else if (reg_incr
!= 1)
2205 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2209 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2212 first_error (_(reg_expected_msgs
[rtype
]));
2215 if (! neon_alias_types_same (&htype
, &firsttype
))
2217 first_error (_(type_error
));
2220 count
+= hireg
+ dregs
- getreg
;
2224 /* If we're using Q registers, we can't use [] or [n] syntax. */
2225 if (rtype
== REG_TYPE_NQ
)
2231 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2235 else if (lane
!= atype
.index
)
2237 first_error (_(type_error
));
2241 else if (lane
== -1)
2242 lane
= NEON_INTERLEAVE_LANES
;
2243 else if (lane
!= NEON_INTERLEAVE_LANES
)
2245 first_error (_(type_error
));
2250 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2252 /* No lane set by [x]. We must be interleaving structures. */
2254 lane
= NEON_INTERLEAVE_LANES
;
2257 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2258 || (count
> 1 && reg_incr
== -1))
2260 first_error (_("error parsing element/structure list"));
2264 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2266 first_error (_("expected }"));
2274 *eltype
= firsttype
.eltype
;
2279 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2282 /* Parse an explicit relocation suffix on an expression. This is
2283 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2284 arm_reloc_hsh contains no entries, so this function can only
2285 succeed if there is no () after the word. Returns -1 on error,
2286 BFD_RELOC_UNUSED if there wasn't any suffix. */
2289 parse_reloc (char **str
)
2291 struct reloc_entry
*r
;
2295 return BFD_RELOC_UNUSED
;
2300 while (*q
&& *q
!= ')' && *q
!= ',')
2305 if ((r
= (struct reloc_entry
*)
2306 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2313 /* Directives: register aliases. */
2315 static struct reg_entry
*
2316 insert_reg_alias (char *str
, unsigned number
, int type
)
2318 struct reg_entry
*new_reg
;
2321 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2323 if (new_reg
->builtin
)
2324 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2326 /* Only warn about a redefinition if it's not defined as the
2328 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2329 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2334 name
= xstrdup (str
);
2335 new_reg
= XNEW (struct reg_entry
);
2337 new_reg
->name
= name
;
2338 new_reg
->number
= number
;
2339 new_reg
->type
= type
;
2340 new_reg
->builtin
= FALSE
;
2341 new_reg
->neon
= NULL
;
2343 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2350 insert_neon_reg_alias (char *str
, int number
, int type
,
2351 struct neon_typed_alias
*atype
)
2353 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2357 first_error (_("attempt to redefine typed alias"));
2363 reg
->neon
= XNEW (struct neon_typed_alias
);
2364 *reg
->neon
= *atype
;
2368 /* Look for the .req directive. This is of the form:
2370 new_register_name .req existing_register_name
2372 If we find one, or if it looks sufficiently like one that we want to
2373 handle any error here, return TRUE. Otherwise return FALSE. */
2376 create_register_alias (char * newname
, char *p
)
2378 struct reg_entry
*old
;
2379 char *oldname
, *nbuf
;
2382 /* The input scrubber ensures that whitespace after the mnemonic is
2383 collapsed to single spaces. */
2385 if (strncmp (oldname
, " .req ", 6) != 0)
2389 if (*oldname
== '\0')
2392 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2395 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2399 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2400 the desired alias name, and p points to its end. If not, then
2401 the desired alias name is in the global original_case_string. */
2402 #ifdef TC_CASE_SENSITIVE
2405 newname
= original_case_string
;
2406 nlen
= strlen (newname
);
2409 nbuf
= xmemdup0 (newname
, nlen
);
2411 /* Create aliases under the new name as stated; an all-lowercase
2412 version of the new name; and an all-uppercase version of the new
2414 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2416 for (p
= nbuf
; *p
; p
++)
2419 if (strncmp (nbuf
, newname
, nlen
))
2421 /* If this attempt to create an additional alias fails, do not bother
2422 trying to create the all-lower case alias. We will fail and issue
2423 a second, duplicate error message. This situation arises when the
2424 programmer does something like:
2427 The second .req creates the "Foo" alias but then fails to create
2428 the artificial FOO alias because it has already been created by the
2430 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2437 for (p
= nbuf
; *p
; p
++)
2440 if (strncmp (nbuf
, newname
, nlen
))
2441 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2448 /* Create a Neon typed/indexed register alias using directives, e.g.:
2453 These typed registers can be used instead of the types specified after the
2454 Neon mnemonic, so long as all operands given have types. Types can also be
2455 specified directly, e.g.:
2456 vadd d0.s32, d1.s32, d2.s32 */
2459 create_neon_reg_alias (char *newname
, char *p
)
2461 enum arm_reg_type basetype
;
2462 struct reg_entry
*basereg
;
2463 struct reg_entry mybasereg
;
2464 struct neon_type ntype
;
2465 struct neon_typed_alias typeinfo
;
2466 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2469 typeinfo
.defined
= 0;
2470 typeinfo
.eltype
.type
= NT_invtype
;
2471 typeinfo
.eltype
.size
= -1;
2472 typeinfo
.index
= -1;
2476 if (strncmp (p
, " .dn ", 5) == 0)
2477 basetype
= REG_TYPE_VFD
;
2478 else if (strncmp (p
, " .qn ", 5) == 0)
2479 basetype
= REG_TYPE_NQ
;
2488 basereg
= arm_reg_parse_multi (&p
);
2490 if (basereg
&& basereg
->type
!= basetype
)
2492 as_bad (_("bad type for register"));
2496 if (basereg
== NULL
)
2499 /* Try parsing as an integer. */
2500 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2501 if (exp
.X_op
!= O_constant
)
2503 as_bad (_("expression must be constant"));
2506 basereg
= &mybasereg
;
2507 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2513 typeinfo
= *basereg
->neon
;
2515 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2517 /* We got a type. */
2518 if (typeinfo
.defined
& NTA_HASTYPE
)
2520 as_bad (_("can't redefine the type of a register alias"));
2524 typeinfo
.defined
|= NTA_HASTYPE
;
2525 if (ntype
.elems
!= 1)
2527 as_bad (_("you must specify a single type only"));
2530 typeinfo
.eltype
= ntype
.el
[0];
2533 if (skip_past_char (&p
, '[') == SUCCESS
)
2536 /* We got a scalar index. */
2538 if (typeinfo
.defined
& NTA_HASINDEX
)
2540 as_bad (_("can't redefine the index of a scalar alias"));
2544 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2546 if (exp
.X_op
!= O_constant
)
2548 as_bad (_("scalar index must be constant"));
2552 typeinfo
.defined
|= NTA_HASINDEX
;
2553 typeinfo
.index
= exp
.X_add_number
;
2555 if (skip_past_char (&p
, ']') == FAIL
)
2557 as_bad (_("expecting ]"));
2562 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2563 the desired alias name, and p points to its end. If not, then
2564 the desired alias name is in the global original_case_string. */
2565 #ifdef TC_CASE_SENSITIVE
2566 namelen
= nameend
- newname
;
2568 newname
= original_case_string
;
2569 namelen
= strlen (newname
);
2572 namebuf
= xmemdup0 (newname
, namelen
);
2574 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2575 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2577 /* Insert name in all uppercase. */
2578 for (p
= namebuf
; *p
; p
++)
2581 if (strncmp (namebuf
, newname
, namelen
))
2582 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2583 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2585 /* Insert name in all lowercase. */
2586 for (p
= namebuf
; *p
; p
++)
2589 if (strncmp (namebuf
, newname
, namelen
))
2590 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2591 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2597 /* Should never be called, as .req goes between the alias and the
2598 register name, not at the beginning of the line. */
2601 s_req (int a ATTRIBUTE_UNUSED
)
2603 as_bad (_("invalid syntax for .req directive"));
2607 s_dn (int a ATTRIBUTE_UNUSED
)
2609 as_bad (_("invalid syntax for .dn directive"));
2613 s_qn (int a ATTRIBUTE_UNUSED
)
2615 as_bad (_("invalid syntax for .qn directive"));
2618 /* The .unreq directive deletes an alias which was previously defined
2619 by .req. For example:
2625 s_unreq (int a ATTRIBUTE_UNUSED
)
2630 name
= input_line_pointer
;
2632 while (*input_line_pointer
!= 0
2633 && *input_line_pointer
!= ' '
2634 && *input_line_pointer
!= '\n')
2635 ++input_line_pointer
;
2637 saved_char
= *input_line_pointer
;
2638 *input_line_pointer
= 0;
2641 as_bad (_("invalid syntax for .unreq directive"));
2644 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2648 as_bad (_("unknown register alias '%s'"), name
);
2649 else if (reg
->builtin
)
2650 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2657 hash_delete (arm_reg_hsh
, name
, FALSE
);
2658 free ((char *) reg
->name
);
2663 /* Also locate the all upper case and all lower case versions.
2664 Do not complain if we cannot find one or the other as it
2665 was probably deleted above. */
2667 nbuf
= strdup (name
);
2668 for (p
= nbuf
; *p
; p
++)
2670 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2673 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2674 free ((char *) reg
->name
);
2680 for (p
= nbuf
; *p
; p
++)
2682 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2685 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2686 free ((char *) reg
->name
);
2696 *input_line_pointer
= saved_char
;
2697 demand_empty_rest_of_line ();
2700 /* Directives: Instruction set selection. */
2703 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2704 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2705 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2706 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2708 /* Create a new mapping symbol for the transition to STATE. */
2711 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2714 const char * symname
;
2721 type
= BSF_NO_FLAGS
;
2725 type
= BSF_NO_FLAGS
;
2729 type
= BSF_NO_FLAGS
;
2735 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2736 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2741 THUMB_SET_FUNC (symbolP
, 0);
2742 ARM_SET_THUMB (symbolP
, 0);
2743 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2747 THUMB_SET_FUNC (symbolP
, 1);
2748 ARM_SET_THUMB (symbolP
, 1);
2749 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2757 /* Save the mapping symbols for future reference. Also check that
2758 we do not place two mapping symbols at the same offset within a
2759 frag. We'll handle overlap between frags in
2760 check_mapping_symbols.
2762 If .fill or other data filling directive generates zero sized data,
2763 the mapping symbol for the following code will have the same value
2764 as the one generated for the data filling directive. In this case,
2765 we replace the old symbol with the new one at the same address. */
2768 if (frag
->tc_frag_data
.first_map
!= NULL
)
2770 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2771 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2773 frag
->tc_frag_data
.first_map
= symbolP
;
2775 if (frag
->tc_frag_data
.last_map
!= NULL
)
2777 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2778 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2779 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2781 frag
->tc_frag_data
.last_map
= symbolP
;
2784 /* We must sometimes convert a region marked as code to data during
2785 code alignment, if an odd number of bytes have to be padded. The
2786 code mapping symbol is pushed to an aligned address. */
2789 insert_data_mapping_symbol (enum mstate state
,
2790 valueT value
, fragS
*frag
, offsetT bytes
)
2792 /* If there was already a mapping symbol, remove it. */
2793 if (frag
->tc_frag_data
.last_map
!= NULL
2794 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2796 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2800 know (frag
->tc_frag_data
.first_map
== symp
);
2801 frag
->tc_frag_data
.first_map
= NULL
;
2803 frag
->tc_frag_data
.last_map
= NULL
;
2804 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2807 make_mapping_symbol (MAP_DATA
, value
, frag
);
2808 make_mapping_symbol (state
, value
+ bytes
, frag
);
2811 static void mapping_state_2 (enum mstate state
, int max_chars
);
2813 /* Set the mapping state to STATE. Only call this when about to
2814 emit some STATE bytes to the file. */
2816 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2818 mapping_state (enum mstate state
)
2820 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2822 if (mapstate
== state
)
2823 /* The mapping symbol has already been emitted.
2824 There is nothing else to do. */
2827 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2829 All ARM instructions require 4-byte alignment.
2830 (Almost) all Thumb instructions require 2-byte alignment.
2832 When emitting instructions into any section, mark the section
2835 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2836 but themselves require 2-byte alignment; this applies to some
2837 PC- relative forms. However, these cases will involve implicit
2838 literal pool generation or an explicit .align >=2, both of
2839 which will cause the section to me marked with sufficient
2840 alignment. Thus, we don't handle those cases here. */
2841 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2843 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2844 /* This case will be evaluated later. */
2847 mapping_state_2 (state
, 0);
2850 /* Same as mapping_state, but MAX_CHARS bytes have already been
2851 allocated. Put the mapping symbol that far back. */
2854 mapping_state_2 (enum mstate state
, int max_chars
)
2856 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2858 if (!SEG_NORMAL (now_seg
))
2861 if (mapstate
== state
)
2862 /* The mapping symbol has already been emitted.
2863 There is nothing else to do. */
2866 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2867 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2869 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2870 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2873 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2876 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2877 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2881 #define mapping_state(x) ((void)0)
2882 #define mapping_state_2(x, y) ((void)0)
2885 /* Find the real, Thumb encoded start of a Thumb function. */
2889 find_real_start (symbolS
* symbolP
)
2892 const char * name
= S_GET_NAME (symbolP
);
2893 symbolS
* new_target
;
2895 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2896 #define STUB_NAME ".real_start_of"
2901 /* The compiler may generate BL instructions to local labels because
2902 it needs to perform a branch to a far away location. These labels
2903 do not have a corresponding ".real_start_of" label. We check
2904 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2905 the ".real_start_of" convention for nonlocal branches. */
2906 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2909 real_start
= concat (STUB_NAME
, name
, NULL
);
2910 new_target
= symbol_find (real_start
);
2913 if (new_target
== NULL
)
2915 as_warn (_("Failed to find real start of function: %s\n"), name
);
2916 new_target
= symbolP
;
2924 opcode_select (int width
)
2931 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2932 as_bad (_("selected processor does not support THUMB opcodes"));
2935 /* No need to force the alignment, since we will have been
2936 coming from ARM mode, which is word-aligned. */
2937 record_alignment (now_seg
, 1);
2944 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2945 as_bad (_("selected processor does not support ARM opcodes"));
2950 frag_align (2, 0, 0);
2952 record_alignment (now_seg
, 1);
2957 as_bad (_("invalid instruction size selected (%d)"), width
);
2962 s_arm (int ignore ATTRIBUTE_UNUSED
)
2965 demand_empty_rest_of_line ();
2969 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2972 demand_empty_rest_of_line ();
2976 s_code (int unused ATTRIBUTE_UNUSED
)
2980 temp
= get_absolute_expression ();
2985 opcode_select (temp
);
2989 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2994 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2996 /* If we are not already in thumb mode go into it, EVEN if
2997 the target processor does not support thumb instructions.
2998 This is used by gcc/config/arm/lib1funcs.asm for example
2999 to compile interworking support functions even if the
3000 target processor should not support interworking. */
3004 record_alignment (now_seg
, 1);
3007 demand_empty_rest_of_line ();
3011 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
3015 /* The following label is the name/address of the start of a Thumb function.
3016 We need to know this for the interworking support. */
3017 label_is_thumb_function_name
= TRUE
;
3020 /* Perform a .set directive, but also mark the alias as
3021 being a thumb function. */
3024 s_thumb_set (int equiv
)
3026 /* XXX the following is a duplicate of the code for s_set() in read.c
3027 We cannot just call that code as we need to get at the symbol that
3034 /* Especial apologies for the random logic:
3035 This just grew, and could be parsed much more simply!
3037 delim
= get_symbol_name (& name
);
3038 end_name
= input_line_pointer
;
3039 (void) restore_line_pointer (delim
);
3041 if (*input_line_pointer
!= ',')
3044 as_bad (_("expected comma after name \"%s\""), name
);
3046 ignore_rest_of_line ();
3050 input_line_pointer
++;
3053 if (name
[0] == '.' && name
[1] == '\0')
3055 /* XXX - this should not happen to .thumb_set. */
3059 if ((symbolP
= symbol_find (name
)) == NULL
3060 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3063 /* When doing symbol listings, play games with dummy fragments living
3064 outside the normal fragment chain to record the file and line info
3066 if (listing
& LISTING_SYMBOLS
)
3068 extern struct list_info_struct
* listing_tail
;
3069 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3071 memset (dummy_frag
, 0, sizeof (fragS
));
3072 dummy_frag
->fr_type
= rs_fill
;
3073 dummy_frag
->line
= listing_tail
;
3074 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
3075 dummy_frag
->fr_symbol
= symbolP
;
3079 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
3082 /* "set" symbols are local unless otherwise specified. */
3083 SF_SET_LOCAL (symbolP
);
3084 #endif /* OBJ_COFF */
3085 } /* Make a new symbol. */
3087 symbol_table_insert (symbolP
);
3092 && S_IS_DEFINED (symbolP
)
3093 && S_GET_SEGMENT (symbolP
) != reg_section
)
3094 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3096 pseudo_set (symbolP
);
3098 demand_empty_rest_of_line ();
3100 /* XXX Now we come to the Thumb specific bit of code. */
3102 THUMB_SET_FUNC (symbolP
, 1);
3103 ARM_SET_THUMB (symbolP
, 1);
3104 #if defined OBJ_ELF || defined OBJ_COFF
3105 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3109 /* Directives: Mode selection. */
3111 /* .syntax [unified|divided] - choose the new unified syntax
3112 (same for Arm and Thumb encoding, modulo slight differences in what
3113 can be represented) or the old divergent syntax for each mode. */
3115 s_syntax (int unused ATTRIBUTE_UNUSED
)
3119 delim
= get_symbol_name (& name
);
3121 if (!strcasecmp (name
, "unified"))
3122 unified_syntax
= TRUE
;
3123 else if (!strcasecmp (name
, "divided"))
3124 unified_syntax
= FALSE
;
3127 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3130 (void) restore_line_pointer (delim
);
3131 demand_empty_rest_of_line ();
3134 /* Directives: sectioning and alignment. */
3137 s_bss (int ignore ATTRIBUTE_UNUSED
)
3139 /* We don't support putting frags in the BSS segment, we fake it by
3140 marking in_bss, then looking at s_skip for clues. */
3141 subseg_set (bss_section
, 0);
3142 demand_empty_rest_of_line ();
3144 #ifdef md_elf_section_change_hook
3145 md_elf_section_change_hook ();
3150 s_even (int ignore ATTRIBUTE_UNUSED
)
3152 /* Never make frag if expect extra pass. */
3154 frag_align (1, 0, 0);
3156 record_alignment (now_seg
, 1);
3158 demand_empty_rest_of_line ();
3161 /* Directives: CodeComposer Studio. */
3163 /* .ref (for CodeComposer Studio syntax only). */
3165 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3167 if (codecomposer_syntax
)
3168 ignore_rest_of_line ();
3170 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3173 /* If name is not NULL, then it is used for marking the beginning of a
3174 function, whereas if it is NULL then it means the function end. */
3176 asmfunc_debug (const char * name
)
3178 static const char * last_name
= NULL
;
3182 gas_assert (last_name
== NULL
);
3185 if (debug_type
== DEBUG_STABS
)
3186 stabs_generate_asm_func (name
, name
);
3190 gas_assert (last_name
!= NULL
);
3192 if (debug_type
== DEBUG_STABS
)
3193 stabs_generate_asm_endfunc (last_name
, last_name
);
3200 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3202 if (codecomposer_syntax
)
3204 switch (asmfunc_state
)
3206 case OUTSIDE_ASMFUNC
:
3207 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3210 case WAITING_ASMFUNC_NAME
:
3211 as_bad (_(".asmfunc repeated."));
3214 case WAITING_ENDASMFUNC
:
3215 as_bad (_(".asmfunc without function."));
3218 demand_empty_rest_of_line ();
3221 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3225 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3227 if (codecomposer_syntax
)
3229 switch (asmfunc_state
)
3231 case OUTSIDE_ASMFUNC
:
3232 as_bad (_(".endasmfunc without a .asmfunc."));
3235 case WAITING_ASMFUNC_NAME
:
3236 as_bad (_(".endasmfunc without function."));
3239 case WAITING_ENDASMFUNC
:
3240 asmfunc_state
= OUTSIDE_ASMFUNC
;
3241 asmfunc_debug (NULL
);
3244 demand_empty_rest_of_line ();
3247 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3251 s_ccs_def (int name
)
3253 if (codecomposer_syntax
)
3256 as_bad (_(".def pseudo-op only available with -mccs flag."));
3259 /* Directives: Literal pools. */
3261 static literal_pool
*
3262 find_literal_pool (void)
3264 literal_pool
* pool
;
3266 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3268 if (pool
->section
== now_seg
3269 && pool
->sub_section
== now_subseg
)
3276 static literal_pool
*
3277 find_or_make_literal_pool (void)
3279 /* Next literal pool ID number. */
3280 static unsigned int latest_pool_num
= 1;
3281 literal_pool
* pool
;
3283 pool
= find_literal_pool ();
3287 /* Create a new pool. */
3288 pool
= XNEW (literal_pool
);
3292 pool
->next_free_entry
= 0;
3293 pool
->section
= now_seg
;
3294 pool
->sub_section
= now_subseg
;
3295 pool
->next
= list_of_pools
;
3296 pool
->symbol
= NULL
;
3297 pool
->alignment
= 2;
3299 /* Add it to the list. */
3300 list_of_pools
= pool
;
3303 /* New pools, and emptied pools, will have a NULL symbol. */
3304 if (pool
->symbol
== NULL
)
3306 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3307 (valueT
) 0, &zero_address_frag
);
3308 pool
->id
= latest_pool_num
++;
3315 /* Add the literal in the global 'inst'
3316 structure to the relevant literal pool. */
3319 add_to_lit_pool (unsigned int nbytes
)
3321 #define PADDING_SLOT 0x1
3322 #define LIT_ENTRY_SIZE_MASK 0xFF
3323 literal_pool
* pool
;
3324 unsigned int entry
, pool_size
= 0;
3325 bfd_boolean padding_slot_p
= FALSE
;
3331 imm1
= inst
.operands
[1].imm
;
3332 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3333 : inst
.relocs
[0].exp
.X_unsigned
? 0
3334 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3335 if (target_big_endian
)
3338 imm2
= inst
.operands
[1].imm
;
3342 pool
= find_or_make_literal_pool ();
3344 /* Check if this literal value is already in the pool. */
3345 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3349 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3350 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3351 && (pool
->literals
[entry
].X_add_number
3352 == inst
.relocs
[0].exp
.X_add_number
)
3353 && (pool
->literals
[entry
].X_md
== nbytes
)
3354 && (pool
->literals
[entry
].X_unsigned
3355 == inst
.relocs
[0].exp
.X_unsigned
))
3358 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3359 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3360 && (pool
->literals
[entry
].X_add_number
3361 == inst
.relocs
[0].exp
.X_add_number
)
3362 && (pool
->literals
[entry
].X_add_symbol
3363 == inst
.relocs
[0].exp
.X_add_symbol
)
3364 && (pool
->literals
[entry
].X_op_symbol
3365 == inst
.relocs
[0].exp
.X_op_symbol
)
3366 && (pool
->literals
[entry
].X_md
== nbytes
))
3369 else if ((nbytes
== 8)
3370 && !(pool_size
& 0x7)
3371 && ((entry
+ 1) != pool
->next_free_entry
)
3372 && (pool
->literals
[entry
].X_op
== O_constant
)
3373 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3374 && (pool
->literals
[entry
].X_unsigned
3375 == inst
.relocs
[0].exp
.X_unsigned
)
3376 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3377 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3378 && (pool
->literals
[entry
+ 1].X_unsigned
3379 == inst
.relocs
[0].exp
.X_unsigned
))
3382 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3383 if (padding_slot_p
&& (nbytes
== 4))
3389 /* Do we need to create a new entry? */
3390 if (entry
== pool
->next_free_entry
)
3392 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3394 inst
.error
= _("literal pool overflow");
3400 /* For 8-byte entries, we align to an 8-byte boundary,
3401 and split it into two 4-byte entries, because on 32-bit
3402 host, 8-byte constants are treated as big num, thus
3403 saved in "generic_bignum" which will be overwritten
3404 by later assignments.
3406 We also need to make sure there is enough space for
3409 We also check to make sure the literal operand is a
3411 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3412 || inst
.relocs
[0].exp
.X_op
== O_big
))
3414 inst
.error
= _("invalid type for literal pool");
3417 else if (pool_size
& 0x7)
3419 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3421 inst
.error
= _("literal pool overflow");
3425 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3426 pool
->literals
[entry
].X_op
= O_constant
;
3427 pool
->literals
[entry
].X_add_number
= 0;
3428 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3429 pool
->next_free_entry
+= 1;
3432 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3434 inst
.error
= _("literal pool overflow");
3438 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3439 pool
->literals
[entry
].X_op
= O_constant
;
3440 pool
->literals
[entry
].X_add_number
= imm1
;
3441 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3442 pool
->literals
[entry
++].X_md
= 4;
3443 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3444 pool
->literals
[entry
].X_op
= O_constant
;
3445 pool
->literals
[entry
].X_add_number
= imm2
;
3446 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3447 pool
->literals
[entry
].X_md
= 4;
3448 pool
->alignment
= 3;
3449 pool
->next_free_entry
+= 1;
3453 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3454 pool
->literals
[entry
].X_md
= 4;
3458 /* PR ld/12974: Record the location of the first source line to reference
3459 this entry in the literal pool. If it turns out during linking that the
3460 symbol does not exist we will be able to give an accurate line number for
3461 the (first use of the) missing reference. */
3462 if (debug_type
== DEBUG_DWARF2
)
3463 dwarf2_where (pool
->locs
+ entry
);
3465 pool
->next_free_entry
+= 1;
3467 else if (padding_slot_p
)
3469 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3470 pool
->literals
[entry
].X_md
= nbytes
;
3473 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3474 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3475 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3481 tc_start_label_without_colon (void)
3483 bfd_boolean ret
= TRUE
;
3485 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3487 const char *label
= input_line_pointer
;
3489 while (!is_end_of_line
[(int) label
[-1]])
3494 as_bad (_("Invalid label '%s'"), label
);
3498 asmfunc_debug (label
);
3500 asmfunc_state
= WAITING_ENDASMFUNC
;
3506 /* Can't use symbol_new here, so have to create a symbol and then at
3507 a later date assign it a value. That's what these functions do. */
3510 symbol_locate (symbolS
* symbolP
,
3511 const char * name
, /* It is copied, the caller can modify. */
3512 segT segment
, /* Segment identifier (SEG_<something>). */
3513 valueT valu
, /* Symbol value. */
3514 fragS
* frag
) /* Associated fragment. */
3517 char * preserved_copy_of_name
;
3519 name_length
= strlen (name
) + 1; /* +1 for \0. */
3520 obstack_grow (¬es
, name
, name_length
);
3521 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3523 #ifdef tc_canonicalize_symbol_name
3524 preserved_copy_of_name
=
3525 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3528 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3530 S_SET_SEGMENT (symbolP
, segment
);
3531 S_SET_VALUE (symbolP
, valu
);
3532 symbol_clear_list_pointers (symbolP
);
3534 symbol_set_frag (symbolP
, frag
);
3536 /* Link to end of symbol chain. */
3538 extern int symbol_table_frozen
;
3540 if (symbol_table_frozen
)
3544 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3546 obj_symbol_new_hook (symbolP
);
3548 #ifdef tc_symbol_new_hook
3549 tc_symbol_new_hook (symbolP
);
3553 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3554 #endif /* DEBUG_SYMS */
3558 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3561 literal_pool
* pool
;
3564 pool
= find_literal_pool ();
3566 || pool
->symbol
== NULL
3567 || pool
->next_free_entry
== 0)
3570 /* Align pool as you have word accesses.
3571 Only make a frag if we have to. */
3573 frag_align (pool
->alignment
, 0, 0);
3575 record_alignment (now_seg
, 2);
3578 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3579 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3581 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3583 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3584 (valueT
) frag_now_fix (), frag_now
);
3585 symbol_table_insert (pool
->symbol
);
3587 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3589 #if defined OBJ_COFF || defined OBJ_ELF
3590 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3593 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3596 if (debug_type
== DEBUG_DWARF2
)
3597 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3599 /* First output the expression in the instruction to the pool. */
3600 emit_expr (&(pool
->literals
[entry
]),
3601 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3604 /* Mark the pool as empty. */
3605 pool
->next_free_entry
= 0;
3606 pool
->symbol
= NULL
;
3610 /* Forward declarations for functions below, in the MD interface
3612 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3613 static valueT
create_unwind_entry (int);
3614 static void start_unwind_section (const segT
, int);
3615 static void add_unwind_opcode (valueT
, int);
3616 static void flush_pending_unwind (void);
3618 /* Directives: Data. */
3621 s_arm_elf_cons (int nbytes
)
3625 #ifdef md_flush_pending_output
3626 md_flush_pending_output ();
3629 if (is_it_end_of_statement ())
3631 demand_empty_rest_of_line ();
3635 #ifdef md_cons_align
3636 md_cons_align (nbytes
);
3639 mapping_state (MAP_DATA
);
3643 char *base
= input_line_pointer
;
3647 if (exp
.X_op
!= O_symbol
)
3648 emit_expr (&exp
, (unsigned int) nbytes
);
3651 char *before_reloc
= input_line_pointer
;
3652 reloc
= parse_reloc (&input_line_pointer
);
3655 as_bad (_("unrecognized relocation suffix"));
3656 ignore_rest_of_line ();
3659 else if (reloc
== BFD_RELOC_UNUSED
)
3660 emit_expr (&exp
, (unsigned int) nbytes
);
3663 reloc_howto_type
*howto
= (reloc_howto_type
*)
3664 bfd_reloc_type_lookup (stdoutput
,
3665 (bfd_reloc_code_real_type
) reloc
);
3666 int size
= bfd_get_reloc_size (howto
);
3668 if (reloc
== BFD_RELOC_ARM_PLT32
)
3670 as_bad (_("(plt) is only valid on branch targets"));
3671 reloc
= BFD_RELOC_UNUSED
;
3676 as_bad (ngettext ("%s relocations do not fit in %d byte",
3677 "%s relocations do not fit in %d bytes",
3679 howto
->name
, nbytes
);
3682 /* We've parsed an expression stopping at O_symbol.
3683 But there may be more expression left now that we
3684 have parsed the relocation marker. Parse it again.
3685 XXX Surely there is a cleaner way to do this. */
3686 char *p
= input_line_pointer
;
3688 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3690 memcpy (save_buf
, base
, input_line_pointer
- base
);
3691 memmove (base
+ (input_line_pointer
- before_reloc
),
3692 base
, before_reloc
- base
);
3694 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3696 memcpy (base
, save_buf
, p
- base
);
3698 offset
= nbytes
- size
;
3699 p
= frag_more (nbytes
);
3700 memset (p
, 0, nbytes
);
3701 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3702 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3708 while (*input_line_pointer
++ == ',');
3710 /* Put terminator back into stream. */
3711 input_line_pointer
--;
3712 demand_empty_rest_of_line ();
3715 /* Emit an expression containing a 32-bit thumb instruction.
3716 Implementation based on put_thumb32_insn. */
3719 emit_thumb32_expr (expressionS
* exp
)
3721 expressionS exp_high
= *exp
;
3723 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3724 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3725 exp
->X_add_number
&= 0xffff;
3726 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3729 /* Guess the instruction size based on the opcode. */
3732 thumb_insn_size (int opcode
)
3734 if ((unsigned int) opcode
< 0xe800u
)
3736 else if ((unsigned int) opcode
>= 0xe8000000u
)
3743 emit_insn (expressionS
*exp
, int nbytes
)
3747 if (exp
->X_op
== O_constant
)
3752 size
= thumb_insn_size (exp
->X_add_number
);
3756 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3758 as_bad (_(".inst.n operand too big. "\
3759 "Use .inst.w instead"));
3764 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3765 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3767 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3769 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3770 emit_thumb32_expr (exp
);
3772 emit_expr (exp
, (unsigned int) size
);
3774 it_fsm_post_encode ();
3778 as_bad (_("cannot determine Thumb instruction size. " \
3779 "Use .inst.n/.inst.w instead"));
3782 as_bad (_("constant expression required"));
3787 /* Like s_arm_elf_cons but do not use md_cons_align and
3788 set the mapping state to MAP_ARM/MAP_THUMB. */
3791 s_arm_elf_inst (int nbytes
)
3793 if (is_it_end_of_statement ())
3795 demand_empty_rest_of_line ();
3799 /* Calling mapping_state () here will not change ARM/THUMB,
3800 but will ensure not to be in DATA state. */
3803 mapping_state (MAP_THUMB
);
3808 as_bad (_("width suffixes are invalid in ARM mode"));
3809 ignore_rest_of_line ();
3815 mapping_state (MAP_ARM
);
3824 if (! emit_insn (& exp
, nbytes
))
3826 ignore_rest_of_line ();
3830 while (*input_line_pointer
++ == ',');
3832 /* Put terminator back into stream. */
3833 input_line_pointer
--;
3834 demand_empty_rest_of_line ();
3837 /* Parse a .rel31 directive. */
3840 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3847 if (*input_line_pointer
== '1')
3848 highbit
= 0x80000000;
3849 else if (*input_line_pointer
!= '0')
3850 as_bad (_("expected 0 or 1"));
3852 input_line_pointer
++;
3853 if (*input_line_pointer
!= ',')
3854 as_bad (_("missing comma"));
3855 input_line_pointer
++;
3857 #ifdef md_flush_pending_output
3858 md_flush_pending_output ();
3861 #ifdef md_cons_align
3865 mapping_state (MAP_DATA
);
3870 md_number_to_chars (p
, highbit
, 4);
3871 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3872 BFD_RELOC_ARM_PREL31
);
3874 demand_empty_rest_of_line ();
3877 /* Directives: AEABI stack-unwind tables. */
3879 /* Parse an unwind_fnstart directive. Simply records the current location. */
3882 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3884 demand_empty_rest_of_line ();
3885 if (unwind
.proc_start
)
3887 as_bad (_("duplicate .fnstart directive"));
3891 /* Mark the start of the function. */
3892 unwind
.proc_start
= expr_build_dot ();
3894 /* Reset the rest of the unwind info. */
3895 unwind
.opcode_count
= 0;
3896 unwind
.table_entry
= NULL
;
3897 unwind
.personality_routine
= NULL
;
3898 unwind
.personality_index
= -1;
3899 unwind
.frame_size
= 0;
3900 unwind
.fp_offset
= 0;
3901 unwind
.fp_reg
= REG_SP
;
3903 unwind
.sp_restored
= 0;
3907 /* Parse a handlerdata directive. Creates the exception handling table entry
3908 for the function. */
3911 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3913 demand_empty_rest_of_line ();
3914 if (!unwind
.proc_start
)
3915 as_bad (MISSING_FNSTART
);
3917 if (unwind
.table_entry
)
3918 as_bad (_("duplicate .handlerdata directive"));
3920 create_unwind_entry (1);
3923 /* Parse an unwind_fnend directive. Generates the index table entry. */
3926 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3931 unsigned int marked_pr_dependency
;
3933 demand_empty_rest_of_line ();
3935 if (!unwind
.proc_start
)
3937 as_bad (_(".fnend directive without .fnstart"));
3941 /* Add eh table entry. */
3942 if (unwind
.table_entry
== NULL
)
3943 val
= create_unwind_entry (0);
3947 /* Add index table entry. This is two words. */
3948 start_unwind_section (unwind
.saved_seg
, 1);
3949 frag_align (2, 0, 0);
3950 record_alignment (now_seg
, 2);
3952 ptr
= frag_more (8);
3954 where
= frag_now_fix () - 8;
3956 /* Self relative offset of the function start. */
3957 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3958 BFD_RELOC_ARM_PREL31
);
3960 /* Indicate dependency on EHABI-defined personality routines to the
3961 linker, if it hasn't been done already. */
3962 marked_pr_dependency
3963 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3964 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3965 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3967 static const char *const name
[] =
3969 "__aeabi_unwind_cpp_pr0",
3970 "__aeabi_unwind_cpp_pr1",
3971 "__aeabi_unwind_cpp_pr2"
3973 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3974 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3975 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3976 |= 1 << unwind
.personality_index
;
3980 /* Inline exception table entry. */
3981 md_number_to_chars (ptr
+ 4, val
, 4);
3983 /* Self relative offset of the table entry. */
3984 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3985 BFD_RELOC_ARM_PREL31
);
3987 /* Restore the original section. */
3988 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3990 unwind
.proc_start
= NULL
;
3994 /* Parse an unwind_cantunwind directive. */
3997 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3999 demand_empty_rest_of_line ();
4000 if (!unwind
.proc_start
)
4001 as_bad (MISSING_FNSTART
);
4003 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4004 as_bad (_("personality routine specified for cantunwind frame"));
4006 unwind
.personality_index
= -2;
4010 /* Parse a personalityindex directive. */
4013 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
4017 if (!unwind
.proc_start
)
4018 as_bad (MISSING_FNSTART
);
4020 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4021 as_bad (_("duplicate .personalityindex directive"));
4025 if (exp
.X_op
!= O_constant
4026 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
4028 as_bad (_("bad personality routine number"));
4029 ignore_rest_of_line ();
4033 unwind
.personality_index
= exp
.X_add_number
;
4035 demand_empty_rest_of_line ();
4039 /* Parse a personality directive. */
4042 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
4046 if (!unwind
.proc_start
)
4047 as_bad (MISSING_FNSTART
);
4049 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4050 as_bad (_("duplicate .personality directive"));
4052 c
= get_symbol_name (& name
);
4053 p
= input_line_pointer
;
4055 ++ input_line_pointer
;
4056 unwind
.personality_routine
= symbol_find_or_make (name
);
4058 demand_empty_rest_of_line ();
4062 /* Parse a directive saving core registers. */
4065 s_arm_unwind_save_core (void)
4071 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4074 as_bad (_("expected register list"));
4075 ignore_rest_of_line ();
4079 demand_empty_rest_of_line ();
4081 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4082 into .unwind_save {..., sp...}. We aren't bothered about the value of
4083 ip because it is clobbered by calls. */
4084 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4085 && (range
& 0x3000) == 0x1000)
4087 unwind
.opcode_count
--;
4088 unwind
.sp_restored
= 0;
4089 range
= (range
| 0x2000) & ~0x1000;
4090 unwind
.pending_offset
= 0;
4096 /* See if we can use the short opcodes. These pop a block of up to 8
4097 registers starting with r4, plus maybe r14. */
4098 for (n
= 0; n
< 8; n
++)
4100 /* Break at the first non-saved register. */
4101 if ((range
& (1 << (n
+ 4))) == 0)
4104 /* See if there are any other bits set. */
4105 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4107 /* Use the long form. */
4108 op
= 0x8000 | ((range
>> 4) & 0xfff);
4109 add_unwind_opcode (op
, 2);
4113 /* Use the short form. */
4115 op
= 0xa8; /* Pop r14. */
4117 op
= 0xa0; /* Do not pop r14. */
4119 add_unwind_opcode (op
, 1);
4126 op
= 0xb100 | (range
& 0xf);
4127 add_unwind_opcode (op
, 2);
4130 /* Record the number of bytes pushed. */
4131 for (n
= 0; n
< 16; n
++)
4133 if (range
& (1 << n
))
4134 unwind
.frame_size
+= 4;
4139 /* Parse a directive saving FPA registers. */
4142 s_arm_unwind_save_fpa (int reg
)
4148 /* Get Number of registers to transfer. */
4149 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4152 exp
.X_op
= O_illegal
;
4154 if (exp
.X_op
!= O_constant
)
4156 as_bad (_("expected , <constant>"));
4157 ignore_rest_of_line ();
4161 num_regs
= exp
.X_add_number
;
4163 if (num_regs
< 1 || num_regs
> 4)
4165 as_bad (_("number of registers must be in the range [1:4]"));
4166 ignore_rest_of_line ();
4170 demand_empty_rest_of_line ();
4175 op
= 0xb4 | (num_regs
- 1);
4176 add_unwind_opcode (op
, 1);
4181 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4182 add_unwind_opcode (op
, 2);
4184 unwind
.frame_size
+= num_regs
* 12;
4188 /* Parse a directive saving VFP registers for ARMv6 and above. */
4191 s_arm_unwind_save_vfp_armv6 (void)
4196 int num_vfpv3_regs
= 0;
4197 int num_regs_below_16
;
4198 bfd_boolean partial_match
;
4200 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
,
4204 as_bad (_("expected register list"));
4205 ignore_rest_of_line ();
4209 demand_empty_rest_of_line ();
4211 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4212 than FSTMX/FLDMX-style ones). */
4214 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4216 num_vfpv3_regs
= count
;
4217 else if (start
+ count
> 16)
4218 num_vfpv3_regs
= start
+ count
- 16;
4220 if (num_vfpv3_regs
> 0)
4222 int start_offset
= start
> 16 ? start
- 16 : 0;
4223 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4224 add_unwind_opcode (op
, 2);
4227 /* Generate opcode for registers numbered in the range 0 .. 15. */
4228 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4229 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4230 if (num_regs_below_16
> 0)
4232 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4233 add_unwind_opcode (op
, 2);
4236 unwind
.frame_size
+= count
* 8;
4240 /* Parse a directive saving VFP registers for pre-ARMv6. */
4243 s_arm_unwind_save_vfp (void)
4248 bfd_boolean partial_match
;
4250 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
,
4254 as_bad (_("expected register list"));
4255 ignore_rest_of_line ();
4259 demand_empty_rest_of_line ();
4264 op
= 0xb8 | (count
- 1);
4265 add_unwind_opcode (op
, 1);
4270 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4271 add_unwind_opcode (op
, 2);
4273 unwind
.frame_size
+= count
* 8 + 4;
4277 /* Parse a directive saving iWMMXt data registers. */
4280 s_arm_unwind_save_mmxwr (void)
4288 if (*input_line_pointer
== '{')
4289 input_line_pointer
++;
4293 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4297 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4302 as_tsktsk (_("register list not in ascending order"));
4305 if (*input_line_pointer
== '-')
4307 input_line_pointer
++;
4308 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4311 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4314 else if (reg
>= hi_reg
)
4316 as_bad (_("bad register range"));
4319 for (; reg
< hi_reg
; reg
++)
4323 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4325 skip_past_char (&input_line_pointer
, '}');
4327 demand_empty_rest_of_line ();
4329 /* Generate any deferred opcodes because we're going to be looking at
4331 flush_pending_unwind ();
4333 for (i
= 0; i
< 16; i
++)
4335 if (mask
& (1 << i
))
4336 unwind
.frame_size
+= 8;
4339 /* Attempt to combine with a previous opcode. We do this because gcc
4340 likes to output separate unwind directives for a single block of
4342 if (unwind
.opcode_count
> 0)
4344 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4345 if ((i
& 0xf8) == 0xc0)
4348 /* Only merge if the blocks are contiguous. */
4351 if ((mask
& 0xfe00) == (1 << 9))
4353 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4354 unwind
.opcode_count
--;
4357 else if (i
== 6 && unwind
.opcode_count
>= 2)
4359 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4363 op
= 0xffff << (reg
- 1);
4365 && ((mask
& op
) == (1u << (reg
- 1))))
4367 op
= (1 << (reg
+ i
+ 1)) - 1;
4368 op
&= ~((1 << reg
) - 1);
4370 unwind
.opcode_count
-= 2;
4377 /* We want to generate opcodes in the order the registers have been
4378 saved, ie. descending order. */
4379 for (reg
= 15; reg
>= -1; reg
--)
4381 /* Save registers in blocks. */
4383 || !(mask
& (1 << reg
)))
4385 /* We found an unsaved reg. Generate opcodes to save the
4392 op
= 0xc0 | (hi_reg
- 10);
4393 add_unwind_opcode (op
, 1);
4398 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4399 add_unwind_opcode (op
, 2);
4408 ignore_rest_of_line ();
4412 s_arm_unwind_save_mmxwcg (void)
4419 if (*input_line_pointer
== '{')
4420 input_line_pointer
++;
4422 skip_whitespace (input_line_pointer
);
4426 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4430 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4436 as_tsktsk (_("register list not in ascending order"));
4439 if (*input_line_pointer
== '-')
4441 input_line_pointer
++;
4442 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4445 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4448 else if (reg
>= hi_reg
)
4450 as_bad (_("bad register range"));
4453 for (; reg
< hi_reg
; reg
++)
4457 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4459 skip_past_char (&input_line_pointer
, '}');
4461 demand_empty_rest_of_line ();
4463 /* Generate any deferred opcodes because we're going to be looking at
4465 flush_pending_unwind ();
4467 for (reg
= 0; reg
< 16; reg
++)
4469 if (mask
& (1 << reg
))
4470 unwind
.frame_size
+= 4;
4473 add_unwind_opcode (op
, 2);
4476 ignore_rest_of_line ();
4480 /* Parse an unwind_save directive.
4481 If the argument is non-zero, this is a .vsave directive. */
4484 s_arm_unwind_save (int arch_v6
)
4487 struct reg_entry
*reg
;
4488 bfd_boolean had_brace
= FALSE
;
4490 if (!unwind
.proc_start
)
4491 as_bad (MISSING_FNSTART
);
4493 /* Figure out what sort of save we have. */
4494 peek
= input_line_pointer
;
4502 reg
= arm_reg_parse_multi (&peek
);
4506 as_bad (_("register expected"));
4507 ignore_rest_of_line ();
4516 as_bad (_("FPA .unwind_save does not take a register list"));
4517 ignore_rest_of_line ();
4520 input_line_pointer
= peek
;
4521 s_arm_unwind_save_fpa (reg
->number
);
4525 s_arm_unwind_save_core ();
4530 s_arm_unwind_save_vfp_armv6 ();
4532 s_arm_unwind_save_vfp ();
4535 case REG_TYPE_MMXWR
:
4536 s_arm_unwind_save_mmxwr ();
4539 case REG_TYPE_MMXWCG
:
4540 s_arm_unwind_save_mmxwcg ();
4544 as_bad (_(".unwind_save does not support this kind of register"));
4545 ignore_rest_of_line ();
4550 /* Parse an unwind_movsp directive. */
4553 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4559 if (!unwind
.proc_start
)
4560 as_bad (MISSING_FNSTART
);
4562 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4565 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4566 ignore_rest_of_line ();
4570 /* Optional constant. */
4571 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4573 if (immediate_for_directive (&offset
) == FAIL
)
4579 demand_empty_rest_of_line ();
4581 if (reg
== REG_SP
|| reg
== REG_PC
)
4583 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4587 if (unwind
.fp_reg
!= REG_SP
)
4588 as_bad (_("unexpected .unwind_movsp directive"));
4590 /* Generate opcode to restore the value. */
4592 add_unwind_opcode (op
, 1);
4594 /* Record the information for later. */
4595 unwind
.fp_reg
= reg
;
4596 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4597 unwind
.sp_restored
= 1;
4600 /* Parse an unwind_pad directive. */
4603 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4607 if (!unwind
.proc_start
)
4608 as_bad (MISSING_FNSTART
);
4610 if (immediate_for_directive (&offset
) == FAIL
)
4615 as_bad (_("stack increment must be multiple of 4"));
4616 ignore_rest_of_line ();
4620 /* Don't generate any opcodes, just record the details for later. */
4621 unwind
.frame_size
+= offset
;
4622 unwind
.pending_offset
+= offset
;
4624 demand_empty_rest_of_line ();
4627 /* Parse an unwind_setfp directive. */
4630 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4636 if (!unwind
.proc_start
)
4637 as_bad (MISSING_FNSTART
);
4639 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4640 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4643 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4645 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4647 as_bad (_("expected <reg>, <reg>"));
4648 ignore_rest_of_line ();
4652 /* Optional constant. */
4653 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4655 if (immediate_for_directive (&offset
) == FAIL
)
4661 demand_empty_rest_of_line ();
4663 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4665 as_bad (_("register must be either sp or set by a previous"
4666 "unwind_movsp directive"));
4670 /* Don't generate any opcodes, just record the information for later. */
4671 unwind
.fp_reg
= fp_reg
;
4673 if (sp_reg
== REG_SP
)
4674 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4676 unwind
.fp_offset
-= offset
;
4679 /* Parse an unwind_raw directive. */
4682 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4685 /* This is an arbitrary limit. */
4686 unsigned char op
[16];
4689 if (!unwind
.proc_start
)
4690 as_bad (MISSING_FNSTART
);
4693 if (exp
.X_op
== O_constant
4694 && skip_past_comma (&input_line_pointer
) != FAIL
)
4696 unwind
.frame_size
+= exp
.X_add_number
;
4700 exp
.X_op
= O_illegal
;
4702 if (exp
.X_op
!= O_constant
)
4704 as_bad (_("expected <offset>, <opcode>"));
4705 ignore_rest_of_line ();
4711 /* Parse the opcode. */
4716 as_bad (_("unwind opcode too long"));
4717 ignore_rest_of_line ();
4719 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4721 as_bad (_("invalid unwind opcode"));
4722 ignore_rest_of_line ();
4725 op
[count
++] = exp
.X_add_number
;
4727 /* Parse the next byte. */
4728 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4734 /* Add the opcode bytes in reverse order. */
4736 add_unwind_opcode (op
[count
], 1);
4738 demand_empty_rest_of_line ();
4742 /* Parse a .eabi_attribute directive. */
4745 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4747 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4749 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4750 attributes_set_explicitly
[tag
] = 1;
4753 /* Emit a tls fix for the symbol. */
4756 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4760 #ifdef md_flush_pending_output
4761 md_flush_pending_output ();
4764 #ifdef md_cons_align
4768 /* Since we're just labelling the code, there's no need to define a
4771 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4772 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4773 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4774 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4776 #endif /* OBJ_ELF */
4778 static void s_arm_arch (int);
4779 static void s_arm_object_arch (int);
4780 static void s_arm_cpu (int);
4781 static void s_arm_fpu (int);
4782 static void s_arm_arch_extension (int);
4787 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4794 if (exp
.X_op
== O_symbol
)
4795 exp
.X_op
= O_secrel
;
4797 emit_expr (&exp
, 4);
4799 while (*input_line_pointer
++ == ',');
4801 input_line_pointer
--;
4802 demand_empty_rest_of_line ();
4806 /* This table describes all the machine specific pseudo-ops the assembler
4807 has to support. The fields are:
4808 pseudo-op name without dot
4809 function to call to execute this pseudo-op
4810 Integer arg to pass to the function. */
4812 const pseudo_typeS md_pseudo_table
[] =
4814 /* Never called because '.req' does not start a line. */
4815 { "req", s_req
, 0 },
4816 /* Following two are likewise never called. */
4819 { "unreq", s_unreq
, 0 },
4820 { "bss", s_bss
, 0 },
4821 { "align", s_align_ptwo
, 2 },
4822 { "arm", s_arm
, 0 },
4823 { "thumb", s_thumb
, 0 },
4824 { "code", s_code
, 0 },
4825 { "force_thumb", s_force_thumb
, 0 },
4826 { "thumb_func", s_thumb_func
, 0 },
4827 { "thumb_set", s_thumb_set
, 0 },
4828 { "even", s_even
, 0 },
4829 { "ltorg", s_ltorg
, 0 },
4830 { "pool", s_ltorg
, 0 },
4831 { "syntax", s_syntax
, 0 },
4832 { "cpu", s_arm_cpu
, 0 },
4833 { "arch", s_arm_arch
, 0 },
4834 { "object_arch", s_arm_object_arch
, 0 },
4835 { "fpu", s_arm_fpu
, 0 },
4836 { "arch_extension", s_arm_arch_extension
, 0 },
4838 { "word", s_arm_elf_cons
, 4 },
4839 { "long", s_arm_elf_cons
, 4 },
4840 { "inst.n", s_arm_elf_inst
, 2 },
4841 { "inst.w", s_arm_elf_inst
, 4 },
4842 { "inst", s_arm_elf_inst
, 0 },
4843 { "rel31", s_arm_rel31
, 0 },
4844 { "fnstart", s_arm_unwind_fnstart
, 0 },
4845 { "fnend", s_arm_unwind_fnend
, 0 },
4846 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4847 { "personality", s_arm_unwind_personality
, 0 },
4848 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4849 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4850 { "save", s_arm_unwind_save
, 0 },
4851 { "vsave", s_arm_unwind_save
, 1 },
4852 { "movsp", s_arm_unwind_movsp
, 0 },
4853 { "pad", s_arm_unwind_pad
, 0 },
4854 { "setfp", s_arm_unwind_setfp
, 0 },
4855 { "unwind_raw", s_arm_unwind_raw
, 0 },
4856 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4857 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4861 /* These are used for dwarf. */
4865 /* These are used for dwarf2. */
4866 { "file", dwarf2_directive_file
, 0 },
4867 { "loc", dwarf2_directive_loc
, 0 },
4868 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4870 { "extend", float_cons
, 'x' },
4871 { "ldouble", float_cons
, 'x' },
4872 { "packed", float_cons
, 'p' },
4874 {"secrel32", pe_directive_secrel
, 0},
4877 /* These are for compatibility with CodeComposer Studio. */
4878 {"ref", s_ccs_ref
, 0},
4879 {"def", s_ccs_def
, 0},
4880 {"asmfunc", s_ccs_asmfunc
, 0},
4881 {"endasmfunc", s_ccs_endasmfunc
, 0},
4886 /* Parser functions used exclusively in instruction operands. */
4888 /* Generic immediate-value read function for use in insn parsing.
4889 STR points to the beginning of the immediate (the leading #);
4890 VAL receives the value; if the value is outside [MIN, MAX]
4891 issue an error. PREFIX_OPT is true if the immediate prefix is
4895 parse_immediate (char **str
, int *val
, int min
, int max
,
4896 bfd_boolean prefix_opt
)
4900 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4901 if (exp
.X_op
!= O_constant
)
4903 inst
.error
= _("constant expression required");
4907 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4909 inst
.error
= _("immediate value out of range");
4913 *val
= exp
.X_add_number
;
4917 /* Less-generic immediate-value read function with the possibility of loading a
4918 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4919 instructions. Puts the result directly in inst.operands[i]. */
4922 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4923 bfd_boolean allow_symbol_p
)
4926 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4929 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4931 if (exp_p
->X_op
== O_constant
)
4933 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4934 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4935 O_constant. We have to be careful not to break compilation for
4936 32-bit X_add_number, though. */
4937 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4939 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4940 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4942 inst
.operands
[i
].regisimm
= 1;
4945 else if (exp_p
->X_op
== O_big
4946 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4948 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4950 /* Bignums have their least significant bits in
4951 generic_bignum[0]. Make sure we put 32 bits in imm and
4952 32 bits in reg, in a (hopefully) portable way. */
4953 gas_assert (parts
!= 0);
4955 /* Make sure that the number is not too big.
4956 PR 11972: Bignums can now be sign-extended to the
4957 size of a .octa so check that the out of range bits
4958 are all zero or all one. */
4959 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4961 LITTLENUM_TYPE m
= -1;
4963 if (generic_bignum
[parts
* 2] != 0
4964 && generic_bignum
[parts
* 2] != m
)
4967 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4968 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4972 inst
.operands
[i
].imm
= 0;
4973 for (j
= 0; j
< parts
; j
++, idx
++)
4974 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4975 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4976 inst
.operands
[i
].reg
= 0;
4977 for (j
= 0; j
< parts
; j
++, idx
++)
4978 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4979 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4980 inst
.operands
[i
].regisimm
= 1;
4982 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4990 /* Returns the pseudo-register number of an FPA immediate constant,
4991 or FAIL if there isn't a valid constant here. */
4994 parse_fpa_immediate (char ** str
)
4996 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5002 /* First try and match exact strings, this is to guarantee
5003 that some formats will work even for cross assembly. */
5005 for (i
= 0; fp_const
[i
]; i
++)
5007 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
5011 *str
+= strlen (fp_const
[i
]);
5012 if (is_end_of_line
[(unsigned char) **str
])
5018 /* Just because we didn't get a match doesn't mean that the constant
5019 isn't valid, just that it is in a format that we don't
5020 automatically recognize. Try parsing it with the standard
5021 expression routines. */
5023 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
5025 /* Look for a raw floating point number. */
5026 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
5027 && is_end_of_line
[(unsigned char) *save_in
])
5029 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5031 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5033 if (words
[j
] != fp_values
[i
][j
])
5037 if (j
== MAX_LITTLENUMS
)
5045 /* Try and parse a more complex expression, this will probably fail
5046 unless the code uses a floating point prefix (eg "0f"). */
5047 save_in
= input_line_pointer
;
5048 input_line_pointer
= *str
;
5049 if (expression (&exp
) == absolute_section
5050 && exp
.X_op
== O_big
5051 && exp
.X_add_number
< 0)
5053 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5055 #define X_PRECISION 5
5056 #define E_PRECISION 15L
5057 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5059 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5061 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5063 if (words
[j
] != fp_values
[i
][j
])
5067 if (j
== MAX_LITTLENUMS
)
5069 *str
= input_line_pointer
;
5070 input_line_pointer
= save_in
;
5077 *str
= input_line_pointer
;
5078 input_line_pointer
= save_in
;
5079 inst
.error
= _("invalid FPA immediate expression");
5083 /* Returns 1 if a number has "quarter-precision" float format
5084 0baBbbbbbc defgh000 00000000 00000000. */
5087 is_quarter_float (unsigned imm
)
5089 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5090 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5094 /* Detect the presence of a floating point or integer zero constant,
5098 parse_ifimm_zero (char **in
)
5102 if (!is_immediate_prefix (**in
))
5104 /* In unified syntax, all prefixes are optional. */
5105 if (!unified_syntax
)
5111 /* Accept #0x0 as a synonym for #0. */
5112 if (strncmp (*in
, "0x", 2) == 0)
5115 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
5120 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5121 &generic_floating_point_number
);
5124 && generic_floating_point_number
.sign
== '+'
5125 && (generic_floating_point_number
.low
5126 > generic_floating_point_number
.leader
))
5132 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5133 0baBbbbbbc defgh000 00000000 00000000.
5134 The zero and minus-zero cases need special handling, since they can't be
5135 encoded in the "quarter-precision" float format, but can nonetheless be
5136 loaded as integer constants. */
5139 parse_qfloat_immediate (char **ccp
, int *immed
)
5143 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5144 int found_fpchar
= 0;
5146 skip_past_char (&str
, '#');
5148 /* We must not accidentally parse an integer as a floating-point number. Make
5149 sure that the value we parse is not an integer by checking for special
5150 characters '.' or 'e'.
5151 FIXME: This is a horrible hack, but doing better is tricky because type
5152 information isn't in a very usable state at parse time. */
5154 skip_whitespace (fpnum
);
5156 if (strncmp (fpnum
, "0x", 2) == 0)
5160 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5161 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5171 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5173 unsigned fpword
= 0;
5176 /* Our FP word must be 32 bits (single-precision FP). */
5177 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5179 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5183 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5196 /* Shift operands. */
5199 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5202 struct asm_shift_name
5205 enum shift_kind kind
;
5208 /* Third argument to parse_shift. */
5209 enum parse_shift_mode
5211 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5212 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5213 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5214 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5215 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5218 /* Parse a <shift> specifier on an ARM data processing instruction.
5219 This has three forms:
5221 (LSL|LSR|ASL|ASR|ROR) Rs
5222 (LSL|LSR|ASL|ASR|ROR) #imm
5225 Note that ASL is assimilated to LSL in the instruction encoding, and
5226 RRX to ROR #0 (which cannot be written as such). */
5229 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5231 const struct asm_shift_name
*shift_name
;
5232 enum shift_kind shift
;
5237 for (p
= *str
; ISALPHA (*p
); p
++)
5242 inst
.error
= _("shift expression expected");
5246 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5249 if (shift_name
== NULL
)
5251 inst
.error
= _("shift expression expected");
5255 shift
= shift_name
->kind
;
5259 case NO_SHIFT_RESTRICT
:
5260 case SHIFT_IMMEDIATE
: break;
5262 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5263 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5265 inst
.error
= _("'LSL' or 'ASR' required");
5270 case SHIFT_LSL_IMMEDIATE
:
5271 if (shift
!= SHIFT_LSL
)
5273 inst
.error
= _("'LSL' required");
5278 case SHIFT_ASR_IMMEDIATE
:
5279 if (shift
!= SHIFT_ASR
)
5281 inst
.error
= _("'ASR' required");
5289 if (shift
!= SHIFT_RRX
)
5291 /* Whitespace can appear here if the next thing is a bare digit. */
5292 skip_whitespace (p
);
5294 if (mode
== NO_SHIFT_RESTRICT
5295 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5297 inst
.operands
[i
].imm
= reg
;
5298 inst
.operands
[i
].immisreg
= 1;
5300 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5303 inst
.operands
[i
].shift_kind
= shift
;
5304 inst
.operands
[i
].shifted
= 1;
5309 /* Parse a <shifter_operand> for an ARM data processing instruction:
5312 #<immediate>, <rotate>
5316 where <shift> is defined by parse_shift above, and <rotate> is a
5317 multiple of 2 between 0 and 30. Validation of immediate operands
5318 is deferred to md_apply_fix. */
5321 parse_shifter_operand (char **str
, int i
)
5326 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5328 inst
.operands
[i
].reg
= value
;
5329 inst
.operands
[i
].isreg
= 1;
5331 /* parse_shift will override this if appropriate */
5332 inst
.relocs
[0].exp
.X_op
= O_constant
;
5333 inst
.relocs
[0].exp
.X_add_number
= 0;
5335 if (skip_past_comma (str
) == FAIL
)
5338 /* Shift operation on register. */
5339 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5342 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5345 if (skip_past_comma (str
) == SUCCESS
)
5347 /* #x, y -- ie explicit rotation by Y. */
5348 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5351 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5353 inst
.error
= _("constant expression expected");
5357 value
= exp
.X_add_number
;
5358 if (value
< 0 || value
> 30 || value
% 2 != 0)
5360 inst
.error
= _("invalid rotation");
5363 if (inst
.relocs
[0].exp
.X_add_number
< 0
5364 || inst
.relocs
[0].exp
.X_add_number
> 255)
5366 inst
.error
= _("invalid constant");
5370 /* Encode as specified. */
5371 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5375 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5376 inst
.relocs
[0].pc_rel
= 0;
5380 /* Group relocation information. Each entry in the table contains the
5381 textual name of the relocation as may appear in assembler source
5382 and must end with a colon.
5383 Along with this textual name are the relocation codes to be used if
5384 the corresponding instruction is an ALU instruction (ADD or SUB only),
5385 an LDR, an LDRS, or an LDC. */
5387 struct group_reloc_table_entry
5398 /* Varieties of non-ALU group relocation. */
5405 static struct group_reloc_table_entry group_reloc_table
[] =
5406 { /* Program counter relative: */
5408 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5413 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5414 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5415 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5416 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5418 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5423 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5424 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5425 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5426 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5428 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5429 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5430 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5431 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5432 /* Section base relative */
5434 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5439 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5440 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5441 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5442 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5444 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5449 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5450 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5451 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5452 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5454 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5455 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5456 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5457 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5458 /* Absolute thumb alu relocations. */
5460 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5465 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5470 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5475 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5480 /* Given the address of a pointer pointing to the textual name of a group
5481 relocation as may appear in assembler source, attempt to find its details
5482 in group_reloc_table. The pointer will be updated to the character after
5483 the trailing colon. On failure, FAIL will be returned; SUCCESS
5484 otherwise. On success, *entry will be updated to point at the relevant
5485 group_reloc_table entry. */
5488 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5491 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5493 int length
= strlen (group_reloc_table
[i
].name
);
5495 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5496 && (*str
)[length
] == ':')
5498 *out
= &group_reloc_table
[i
];
5499 *str
+= (length
+ 1);
5507 /* Parse a <shifter_operand> for an ARM data processing instruction
5508 (as for parse_shifter_operand) where group relocations are allowed:
5511 #<immediate>, <rotate>
5512 #:<group_reloc>:<expression>
5516 where <group_reloc> is one of the strings defined in group_reloc_table.
5517 The hashes are optional.
5519 Everything else is as for parse_shifter_operand. */
5521 static parse_operand_result
5522 parse_shifter_operand_group_reloc (char **str
, int i
)
5524 /* Determine if we have the sequence of characters #: or just :
5525 coming next. If we do, then we check for a group relocation.
5526 If we don't, punt the whole lot to parse_shifter_operand. */
5528 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5529 || (*str
)[0] == ':')
5531 struct group_reloc_table_entry
*entry
;
5533 if ((*str
)[0] == '#')
5538 /* Try to parse a group relocation. Anything else is an error. */
5539 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5541 inst
.error
= _("unknown group relocation");
5542 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5545 /* We now have the group relocation table entry corresponding to
5546 the name in the assembler source. Next, we parse the expression. */
5547 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5548 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5550 /* Record the relocation type (always the ALU variant here). */
5551 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5552 gas_assert (inst
.relocs
[0].type
!= 0);
5554 return PARSE_OPERAND_SUCCESS
;
5557 return parse_shifter_operand (str
, i
) == SUCCESS
5558 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5560 /* Never reached. */
5563 /* Parse a Neon alignment expression. Information is written to
5564 inst.operands[i]. We assume the initial ':' has been skipped.
5566 align .imm = align << 8, .immisalign=1, .preind=0 */
5567 static parse_operand_result
5568 parse_neon_alignment (char **str
, int i
)
5573 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5575 if (exp
.X_op
!= O_constant
)
5577 inst
.error
= _("alignment must be constant");
5578 return PARSE_OPERAND_FAIL
;
5581 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5582 inst
.operands
[i
].immisalign
= 1;
5583 /* Alignments are not pre-indexes. */
5584 inst
.operands
[i
].preind
= 0;
5587 return PARSE_OPERAND_SUCCESS
;
5590 /* Parse all forms of an ARM address expression. Information is written
5591 to inst.operands[i] and/or inst.relocs[0].
5593 Preindexed addressing (.preind=1):
5595 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5596 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5597 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5598 .shift_kind=shift .relocs[0].exp=shift_imm
5600 These three may have a trailing ! which causes .writeback to be set also.
5602 Postindexed addressing (.postind=1, .writeback=1):
5604 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5605 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5606 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5607 .shift_kind=shift .relocs[0].exp=shift_imm
5609 Unindexed addressing (.preind=0, .postind=0):
5611 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5615 [Rn]{!} shorthand for [Rn,#0]{!}
5616 =immediate .isreg=0 .relocs[0].exp=immediate
5617 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5619 It is the caller's responsibility to check for addressing modes not
5620 supported by the instruction, and to set inst.relocs[0].type. */
5622 static parse_operand_result
5623 parse_address_main (char **str
, int i
, int group_relocations
,
5624 group_reloc_type group_type
)
5629 if (skip_past_char (&p
, '[') == FAIL
)
5631 if (skip_past_char (&p
, '=') == FAIL
)
5633 /* Bare address - translate to PC-relative offset. */
5634 inst
.relocs
[0].pc_rel
= 1;
5635 inst
.operands
[i
].reg
= REG_PC
;
5636 inst
.operands
[i
].isreg
= 1;
5637 inst
.operands
[i
].preind
= 1;
5639 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5640 return PARSE_OPERAND_FAIL
;
5642 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5643 /*allow_symbol_p=*/TRUE
))
5644 return PARSE_OPERAND_FAIL
;
5647 return PARSE_OPERAND_SUCCESS
;
5650 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5651 skip_whitespace (p
);
5653 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5655 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5656 return PARSE_OPERAND_FAIL
;
5658 inst
.operands
[i
].reg
= reg
;
5659 inst
.operands
[i
].isreg
= 1;
5661 if (skip_past_comma (&p
) == SUCCESS
)
5663 inst
.operands
[i
].preind
= 1;
5666 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5668 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5670 inst
.operands
[i
].imm
= reg
;
5671 inst
.operands
[i
].immisreg
= 1;
5673 if (skip_past_comma (&p
) == SUCCESS
)
5674 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5675 return PARSE_OPERAND_FAIL
;
5677 else if (skip_past_char (&p
, ':') == SUCCESS
)
5679 /* FIXME: '@' should be used here, but it's filtered out by generic
5680 code before we get to see it here. This may be subject to
5682 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5684 if (result
!= PARSE_OPERAND_SUCCESS
)
5689 if (inst
.operands
[i
].negative
)
5691 inst
.operands
[i
].negative
= 0;
5695 if (group_relocations
5696 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5698 struct group_reloc_table_entry
*entry
;
5700 /* Skip over the #: or : sequence. */
5706 /* Try to parse a group relocation. Anything else is an
5708 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5710 inst
.error
= _("unknown group relocation");
5711 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5714 /* We now have the group relocation table entry corresponding to
5715 the name in the assembler source. Next, we parse the
5717 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5718 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5720 /* Record the relocation type. */
5725 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
5730 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5735 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
5742 if (inst
.relocs
[0].type
== 0)
5744 inst
.error
= _("this group relocation is not allowed on this instruction");
5745 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5752 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5753 return PARSE_OPERAND_FAIL
;
5754 /* If the offset is 0, find out if it's a +0 or -0. */
5755 if (inst
.relocs
[0].exp
.X_op
== O_constant
5756 && inst
.relocs
[0].exp
.X_add_number
== 0)
5758 skip_whitespace (q
);
5762 skip_whitespace (q
);
5765 inst
.operands
[i
].negative
= 1;
5770 else if (skip_past_char (&p
, ':') == SUCCESS
)
5772 /* FIXME: '@' should be used here, but it's filtered out by generic code
5773 before we get to see it here. This may be subject to change. */
5774 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5776 if (result
!= PARSE_OPERAND_SUCCESS
)
5780 if (skip_past_char (&p
, ']') == FAIL
)
5782 inst
.error
= _("']' expected");
5783 return PARSE_OPERAND_FAIL
;
5786 if (skip_past_char (&p
, '!') == SUCCESS
)
5787 inst
.operands
[i
].writeback
= 1;
5789 else if (skip_past_comma (&p
) == SUCCESS
)
5791 if (skip_past_char (&p
, '{') == SUCCESS
)
5793 /* [Rn], {expr} - unindexed, with option */
5794 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5795 0, 255, TRUE
) == FAIL
)
5796 return PARSE_OPERAND_FAIL
;
5798 if (skip_past_char (&p
, '}') == FAIL
)
5800 inst
.error
= _("'}' expected at end of 'option' field");
5801 return PARSE_OPERAND_FAIL
;
5803 if (inst
.operands
[i
].preind
)
5805 inst
.error
= _("cannot combine index with option");
5806 return PARSE_OPERAND_FAIL
;
5809 return PARSE_OPERAND_SUCCESS
;
5813 inst
.operands
[i
].postind
= 1;
5814 inst
.operands
[i
].writeback
= 1;
5816 if (inst
.operands
[i
].preind
)
5818 inst
.error
= _("cannot combine pre- and post-indexing");
5819 return PARSE_OPERAND_FAIL
;
5823 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5825 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5827 /* We might be using the immediate for alignment already. If we
5828 are, OR the register number into the low-order bits. */
5829 if (inst
.operands
[i
].immisalign
)
5830 inst
.operands
[i
].imm
|= reg
;
5832 inst
.operands
[i
].imm
= reg
;
5833 inst
.operands
[i
].immisreg
= 1;
5835 if (skip_past_comma (&p
) == SUCCESS
)
5836 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5837 return PARSE_OPERAND_FAIL
;
5843 if (inst
.operands
[i
].negative
)
5845 inst
.operands
[i
].negative
= 0;
5848 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5849 return PARSE_OPERAND_FAIL
;
5850 /* If the offset is 0, find out if it's a +0 or -0. */
5851 if (inst
.relocs
[0].exp
.X_op
== O_constant
5852 && inst
.relocs
[0].exp
.X_add_number
== 0)
5854 skip_whitespace (q
);
5858 skip_whitespace (q
);
5861 inst
.operands
[i
].negative
= 1;
5867 /* If at this point neither .preind nor .postind is set, we have a
5868 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5869 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5871 inst
.operands
[i
].preind
= 1;
5872 inst
.relocs
[0].exp
.X_op
= O_constant
;
5873 inst
.relocs
[0].exp
.X_add_number
= 0;
5876 return PARSE_OPERAND_SUCCESS
;
5880 parse_address (char **str
, int i
)
5882 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5886 static parse_operand_result
5887 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5889 return parse_address_main (str
, i
, 1, type
);
5892 /* Parse an operand for a MOVW or MOVT instruction. */
5894 parse_half (char **str
)
5899 skip_past_char (&p
, '#');
5900 if (strncasecmp (p
, ":lower16:", 9) == 0)
5901 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
5902 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5903 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
5905 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
5908 skip_whitespace (p
);
5911 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5914 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
5916 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
5918 inst
.error
= _("constant expression expected");
5921 if (inst
.relocs
[0].exp
.X_add_number
< 0
5922 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
5924 inst
.error
= _("immediate value out of range");
5932 /* Miscellaneous. */
5934 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5935 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5937 parse_psr (char **str
, bfd_boolean lhs
)
5940 unsigned long psr_field
;
5941 const struct asm_psr
*psr
;
5943 bfd_boolean is_apsr
= FALSE
;
5944 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5946 /* PR gas/12698: If the user has specified -march=all then m_profile will
5947 be TRUE, but we want to ignore it in this case as we are building for any
5948 CPU type, including non-m variants. */
5949 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5952 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5953 feature for ease of use and backwards compatibility. */
5955 if (strncasecmp (p
, "SPSR", 4) == 0)
5958 goto unsupported_psr
;
5960 psr_field
= SPSR_BIT
;
5962 else if (strncasecmp (p
, "CPSR", 4) == 0)
5965 goto unsupported_psr
;
5969 else if (strncasecmp (p
, "APSR", 4) == 0)
5971 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5972 and ARMv7-R architecture CPUs. */
5981 while (ISALNUM (*p
) || *p
== '_');
5983 if (strncasecmp (start
, "iapsr", 5) == 0
5984 || strncasecmp (start
, "eapsr", 5) == 0
5985 || strncasecmp (start
, "xpsr", 4) == 0
5986 || strncasecmp (start
, "psr", 3) == 0)
5987 p
= start
+ strcspn (start
, "rR") + 1;
5989 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5995 /* If APSR is being written, a bitfield may be specified. Note that
5996 APSR itself is handled above. */
5997 if (psr
->field
<= 3)
5999 psr_field
= psr
->field
;
6005 /* M-profile MSR instructions have the mask field set to "10", except
6006 *PSR variants which modify APSR, which may use a different mask (and
6007 have been handled already). Do that by setting the PSR_f field
6009 return psr
->field
| (lhs
? PSR_f
: 0);
6012 goto unsupported_psr
;
6018 /* A suffix follows. */
6024 while (ISALNUM (*p
) || *p
== '_');
6028 /* APSR uses a notation for bits, rather than fields. */
6029 unsigned int nzcvq_bits
= 0;
6030 unsigned int g_bit
= 0;
6033 for (bit
= start
; bit
!= p
; bit
++)
6035 switch (TOLOWER (*bit
))
6038 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
6042 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
6046 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
6050 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6054 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6058 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6062 inst
.error
= _("unexpected bit specified after APSR");
6067 if (nzcvq_bits
== 0x1f)
6072 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6074 inst
.error
= _("selected processor does not "
6075 "support DSP extension");
6082 if ((nzcvq_bits
& 0x20) != 0
6083 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6084 || (g_bit
& 0x2) != 0)
6086 inst
.error
= _("bad bitmask specified after APSR");
6092 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
6097 psr_field
|= psr
->field
;
6103 goto error
; /* Garbage after "[CS]PSR". */
6105 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6106 is deprecated, but allow it anyway. */
6110 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6113 else if (!m_profile
)
6114 /* These bits are never right for M-profile devices: don't set them
6115 (only code paths which read/write APSR reach here). */
6116 psr_field
|= (PSR_c
| PSR_f
);
6122 inst
.error
= _("selected processor does not support requested special "
6123 "purpose register");
6127 inst
.error
= _("flag for {c}psr instruction expected");
6132 parse_sys_vldr_vstr (char **str
)
6141 {"FPSCR", 0x1, 0x0},
6142 {"FPSCR_nzcvqc", 0x2, 0x0},
6145 {"FPCXTNS", 0x6, 0x1},
6146 {"FPCXTS", 0x7, 0x1}
6148 char *op_end
= strchr (*str
, ',');
6149 size_t op_strlen
= op_end
- *str
;
6151 for (i
= 0; i
< sizeof (sysregs
) / sizeof (sysregs
[0]); i
++)
6153 if (!strncmp (*str
, sysregs
[i
].name
, op_strlen
))
6155 val
= sysregs
[i
].regl
| (sysregs
[i
].regh
<< 3);
6164 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6165 value suitable for splatting into the AIF field of the instruction. */
6168 parse_cps_flags (char **str
)
6177 case '\0': case ',':
6180 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6181 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6182 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6185 inst
.error
= _("unrecognized CPS flag");
6190 if (saw_a_flag
== 0)
6192 inst
.error
= _("missing CPS flags");
6200 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6201 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6204 parse_endian_specifier (char **str
)
6209 if (strncasecmp (s
, "BE", 2))
6211 else if (strncasecmp (s
, "LE", 2))
6215 inst
.error
= _("valid endian specifiers are be or le");
6219 if (ISALNUM (s
[2]) || s
[2] == '_')
6221 inst
.error
= _("valid endian specifiers are be or le");
6226 return little_endian
;
6229 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6230 value suitable for poking into the rotate field of an sxt or sxta
6231 instruction, or FAIL on error. */
6234 parse_ror (char **str
)
6239 if (strncasecmp (s
, "ROR", 3) == 0)
6243 inst
.error
= _("missing rotation field after comma");
6247 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6252 case 0: *str
= s
; return 0x0;
6253 case 8: *str
= s
; return 0x1;
6254 case 16: *str
= s
; return 0x2;
6255 case 24: *str
= s
; return 0x3;
6258 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6263 /* Parse a conditional code (from conds[] below). The value returned is in the
6264 range 0 .. 14, or FAIL. */
6266 parse_cond (char **str
)
6269 const struct asm_cond
*c
;
6271 /* Condition codes are always 2 characters, so matching up to
6272 3 characters is sufficient. */
6277 while (ISALPHA (*q
) && n
< 3)
6279 cond
[n
] = TOLOWER (*q
);
6284 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6287 inst
.error
= _("condition required");
6295 /* Record a use of the given feature. */
6297 record_feature_use (const arm_feature_set
*feature
)
6300 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6302 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6305 /* If the given feature is currently allowed, mark it as used and return TRUE.
6306 Return FALSE otherwise. */
6308 mark_feature_used (const arm_feature_set
*feature
)
6310 /* Ensure the option is currently allowed. */
6311 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6314 /* Add the appropriate architecture feature for the barrier option used. */
6315 record_feature_use (feature
);
6320 /* Parse an option for a barrier instruction. Returns the encoding for the
6323 parse_barrier (char **str
)
6326 const struct asm_barrier_opt
*o
;
6329 while (ISALPHA (*q
))
6332 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6337 if (!mark_feature_used (&o
->arch
))
6344 /* Parse the operands of a table branch instruction. Similar to a memory
6347 parse_tb (char **str
)
6352 if (skip_past_char (&p
, '[') == FAIL
)
6354 inst
.error
= _("'[' expected");
6358 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6360 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6363 inst
.operands
[0].reg
= reg
;
6365 if (skip_past_comma (&p
) == FAIL
)
6367 inst
.error
= _("',' expected");
6371 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6373 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6376 inst
.operands
[0].imm
= reg
;
6378 if (skip_past_comma (&p
) == SUCCESS
)
6380 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6382 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6384 inst
.error
= _("invalid shift");
6387 inst
.operands
[0].shifted
= 1;
6390 if (skip_past_char (&p
, ']') == FAIL
)
6392 inst
.error
= _("']' expected");
6399 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6400 information on the types the operands can take and how they are encoded.
6401 Up to four operands may be read; this function handles setting the
6402 ".present" field for each read operand itself.
6403 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6404 else returns FAIL. */
6407 parse_neon_mov (char **str
, int *which_operand
)
6409 int i
= *which_operand
, val
;
6410 enum arm_reg_type rtype
;
6412 struct neon_type_el optype
;
6414 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6416 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6417 inst
.operands
[i
].reg
= val
;
6418 inst
.operands
[i
].isscalar
= 1;
6419 inst
.operands
[i
].vectype
= optype
;
6420 inst
.operands
[i
++].present
= 1;
6422 if (skip_past_comma (&ptr
) == FAIL
)
6425 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6428 inst
.operands
[i
].reg
= val
;
6429 inst
.operands
[i
].isreg
= 1;
6430 inst
.operands
[i
].present
= 1;
6432 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6435 /* Cases 0, 1, 2, 3, 5 (D only). */
6436 if (skip_past_comma (&ptr
) == FAIL
)
6439 inst
.operands
[i
].reg
= val
;
6440 inst
.operands
[i
].isreg
= 1;
6441 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6442 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6443 inst
.operands
[i
].isvec
= 1;
6444 inst
.operands
[i
].vectype
= optype
;
6445 inst
.operands
[i
++].present
= 1;
6447 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6449 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6450 Case 13: VMOV <Sd>, <Rm> */
6451 inst
.operands
[i
].reg
= val
;
6452 inst
.operands
[i
].isreg
= 1;
6453 inst
.operands
[i
].present
= 1;
6455 if (rtype
== REG_TYPE_NQ
)
6457 first_error (_("can't use Neon quad register here"));
6460 else if (rtype
!= REG_TYPE_VFS
)
6463 if (skip_past_comma (&ptr
) == FAIL
)
6465 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6467 inst
.operands
[i
].reg
= val
;
6468 inst
.operands
[i
].isreg
= 1;
6469 inst
.operands
[i
].present
= 1;
6472 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6475 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6476 Case 1: VMOV<c><q> <Dd>, <Dm>
6477 Case 8: VMOV.F32 <Sd>, <Sm>
6478 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6480 inst
.operands
[i
].reg
= val
;
6481 inst
.operands
[i
].isreg
= 1;
6482 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6483 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6484 inst
.operands
[i
].isvec
= 1;
6485 inst
.operands
[i
].vectype
= optype
;
6486 inst
.operands
[i
].present
= 1;
6488 if (skip_past_comma (&ptr
) == SUCCESS
)
6493 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6496 inst
.operands
[i
].reg
= val
;
6497 inst
.operands
[i
].isreg
= 1;
6498 inst
.operands
[i
++].present
= 1;
6500 if (skip_past_comma (&ptr
) == FAIL
)
6503 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6506 inst
.operands
[i
].reg
= val
;
6507 inst
.operands
[i
].isreg
= 1;
6508 inst
.operands
[i
].present
= 1;
6511 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6512 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6513 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6514 Case 10: VMOV.F32 <Sd>, #<imm>
6515 Case 11: VMOV.F64 <Dd>, #<imm> */
6516 inst
.operands
[i
].immisfloat
= 1;
6517 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6519 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6520 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6524 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6528 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6531 inst
.operands
[i
].reg
= val
;
6532 inst
.operands
[i
].isreg
= 1;
6533 inst
.operands
[i
++].present
= 1;
6535 if (skip_past_comma (&ptr
) == FAIL
)
6538 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6540 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6541 inst
.operands
[i
].reg
= val
;
6542 inst
.operands
[i
].isscalar
= 1;
6543 inst
.operands
[i
].present
= 1;
6544 inst
.operands
[i
].vectype
= optype
;
6546 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6548 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6549 inst
.operands
[i
].reg
= val
;
6550 inst
.operands
[i
].isreg
= 1;
6551 inst
.operands
[i
++].present
= 1;
6553 if (skip_past_comma (&ptr
) == FAIL
)
6556 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6559 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6563 inst
.operands
[i
].reg
= val
;
6564 inst
.operands
[i
].isreg
= 1;
6565 inst
.operands
[i
].isvec
= 1;
6566 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6567 inst
.operands
[i
].vectype
= optype
;
6568 inst
.operands
[i
].present
= 1;
6570 if (rtype
== REG_TYPE_VFS
)
6574 if (skip_past_comma (&ptr
) == FAIL
)
6576 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6579 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6582 inst
.operands
[i
].reg
= val
;
6583 inst
.operands
[i
].isreg
= 1;
6584 inst
.operands
[i
].isvec
= 1;
6585 inst
.operands
[i
].issingle
= 1;
6586 inst
.operands
[i
].vectype
= optype
;
6587 inst
.operands
[i
].present
= 1;
6590 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6594 inst
.operands
[i
].reg
= val
;
6595 inst
.operands
[i
].isreg
= 1;
6596 inst
.operands
[i
].isvec
= 1;
6597 inst
.operands
[i
].issingle
= 1;
6598 inst
.operands
[i
].vectype
= optype
;
6599 inst
.operands
[i
].present
= 1;
6604 first_error (_("parse error"));
6608 /* Successfully parsed the operands. Update args. */
6614 first_error (_("expected comma"));
6618 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6622 /* Use this macro when the operand constraints are different
6623 for ARM and THUMB (e.g. ldrd). */
6624 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6625 ((arm_operand) | ((thumb_operand) << 16))
6627 /* Matcher codes for parse_operands. */
6628 enum operand_parse_code
6630 OP_stop
, /* end of line */
6632 OP_RR
, /* ARM register */
6633 OP_RRnpc
, /* ARM register, not r15 */
6634 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6635 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6636 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6637 optional trailing ! */
6638 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6639 OP_RCP
, /* Coprocessor number */
6640 OP_RCN
, /* Coprocessor register */
6641 OP_RF
, /* FPA register */
6642 OP_RVS
, /* VFP single precision register */
6643 OP_RVD
, /* VFP double precision register (0..15) */
6644 OP_RND
, /* Neon double precision register (0..31) */
6645 OP_RNQ
, /* Neon quad precision register */
6646 OP_RVSD
, /* VFP single or double precision register */
6647 OP_RNSD
, /* Neon single or double precision register */
6648 OP_RNDQ
, /* Neon double or quad precision register */
6649 OP_RNSDQ
, /* Neon single, double or quad precision register */
6650 OP_RNSC
, /* Neon scalar D[X] */
6651 OP_RVC
, /* VFP control register */
6652 OP_RMF
, /* Maverick F register */
6653 OP_RMD
, /* Maverick D register */
6654 OP_RMFX
, /* Maverick FX register */
6655 OP_RMDX
, /* Maverick DX register */
6656 OP_RMAX
, /* Maverick AX register */
6657 OP_RMDS
, /* Maverick DSPSC register */
6658 OP_RIWR
, /* iWMMXt wR register */
6659 OP_RIWC
, /* iWMMXt wC register */
6660 OP_RIWG
, /* iWMMXt wCG register */
6661 OP_RXA
, /* XScale accumulator register */
6663 /* New operands for Armv8.1-M Mainline. */
6664 OP_LR
, /* ARM LR register */
6665 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
6667 OP_REGLST
, /* ARM register list */
6668 OP_CLRMLST
, /* CLRM register list */
6669 OP_VRSLST
, /* VFP single-precision register list */
6670 OP_VRDLST
, /* VFP double-precision register list */
6671 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6672 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6673 OP_NSTRLST
, /* Neon element/structure list */
6674 OP_VRSDVLST
, /* VFP single or double-precision register list and VPR */
6676 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6677 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6678 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6679 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6680 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
6681 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6682 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6683 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6684 OP_VMOV
, /* Neon VMOV operands. */
6685 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6686 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6687 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6688 OP_VLDR
, /* VLDR operand. */
6690 OP_I0
, /* immediate zero */
6691 OP_I7
, /* immediate value 0 .. 7 */
6692 OP_I15
, /* 0 .. 15 */
6693 OP_I16
, /* 1 .. 16 */
6694 OP_I16z
, /* 0 .. 16 */
6695 OP_I31
, /* 0 .. 31 */
6696 OP_I31w
, /* 0 .. 31, optional trailing ! */
6697 OP_I32
, /* 1 .. 32 */
6698 OP_I32z
, /* 0 .. 32 */
6699 OP_I63
, /* 0 .. 63 */
6700 OP_I63s
, /* -64 .. 63 */
6701 OP_I64
, /* 1 .. 64 */
6702 OP_I64z
, /* 0 .. 64 */
6703 OP_I255
, /* 0 .. 255 */
6705 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6706 OP_I7b
, /* 0 .. 7 */
6707 OP_I15b
, /* 0 .. 15 */
6708 OP_I31b
, /* 0 .. 31 */
6710 OP_SH
, /* shifter operand */
6711 OP_SHG
, /* shifter operand with possible group relocation */
6712 OP_ADDR
, /* Memory address expression (any mode) */
6713 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6714 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6715 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6716 OP_EXP
, /* arbitrary expression */
6717 OP_EXPi
, /* same, with optional immediate prefix */
6718 OP_EXPr
, /* same, with optional relocation suffix */
6719 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
6720 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6721 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6722 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6724 OP_CPSF
, /* CPS flags */
6725 OP_ENDI
, /* Endianness specifier */
6726 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6727 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6728 OP_COND
, /* conditional code */
6729 OP_TB
, /* Table branch. */
6731 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6733 OP_RRnpc_I0
, /* ARM register or literal 0 */
6734 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6735 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6736 OP_RF_IF
, /* FPA register or immediate */
6737 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6738 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6740 /* Optional operands. */
6741 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6742 OP_oI31b
, /* 0 .. 31 */
6743 OP_oI32b
, /* 1 .. 32 */
6744 OP_oI32z
, /* 0 .. 32 */
6745 OP_oIffffb
, /* 0 .. 65535 */
6746 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6748 OP_oRR
, /* ARM register */
6749 OP_oLR
, /* ARM LR register */
6750 OP_oRRnpc
, /* ARM register, not the PC */
6751 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6752 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6753 OP_oRND
, /* Optional Neon double precision register */
6754 OP_oRNQ
, /* Optional Neon quad precision register */
6755 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6756 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6757 OP_oSHll
, /* LSL immediate */
6758 OP_oSHar
, /* ASR immediate */
6759 OP_oSHllar
, /* LSL or ASR immediate */
6760 OP_oROR
, /* ROR 0/8/16/24 */
6761 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6763 /* Some pre-defined mixed (ARM/THUMB) operands. */
6764 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6765 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6766 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6768 OP_FIRST_OPTIONAL
= OP_oI7b
6771 /* Generic instruction operand parser. This does no encoding and no
6772 semantic validation; it merely squirrels values away in the inst
6773 structure. Returns SUCCESS or FAIL depending on whether the
6774 specified grammar matched. */
6776 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6778 unsigned const int *upat
= pattern
;
6779 char *backtrack_pos
= 0;
6780 const char *backtrack_error
= 0;
6781 int i
, val
= 0, backtrack_index
= 0;
6782 enum arm_reg_type rtype
;
6783 parse_operand_result result
;
6784 unsigned int op_parse_code
;
6785 bfd_boolean partial_match
;
6787 #define po_char_or_fail(chr) \
6790 if (skip_past_char (&str, chr) == FAIL) \
6795 #define po_reg_or_fail(regtype) \
6798 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6799 & inst.operands[i].vectype); \
6802 first_error (_(reg_expected_msgs[regtype])); \
6805 inst.operands[i].reg = val; \
6806 inst.operands[i].isreg = 1; \
6807 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6808 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6809 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6810 || rtype == REG_TYPE_VFD \
6811 || rtype == REG_TYPE_NQ); \
6815 #define po_reg_or_goto(regtype, label) \
6818 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6819 & inst.operands[i].vectype); \
6823 inst.operands[i].reg = val; \
6824 inst.operands[i].isreg = 1; \
6825 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6826 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6827 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6828 || rtype == REG_TYPE_VFD \
6829 || rtype == REG_TYPE_NQ); \
6833 #define po_imm_or_fail(min, max, popt) \
6836 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6838 inst.operands[i].imm = val; \
6842 #define po_scalar_or_goto(elsz, label) \
6845 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6848 inst.operands[i].reg = val; \
6849 inst.operands[i].isscalar = 1; \
6853 #define po_misc_or_fail(expr) \
6861 #define po_misc_or_fail_no_backtrack(expr) \
6865 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6866 backtrack_pos = 0; \
6867 if (result != PARSE_OPERAND_SUCCESS) \
6872 #define po_barrier_or_imm(str) \
6875 val = parse_barrier (&str); \
6876 if (val == FAIL && ! ISALPHA (*str)) \
6879 /* ISB can only take SY as an option. */ \
6880 || ((inst.instruction & 0xf0) == 0x60 \
6883 inst.error = _("invalid barrier type"); \
6884 backtrack_pos = 0; \
6890 skip_whitespace (str
);
6892 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6894 op_parse_code
= upat
[i
];
6895 if (op_parse_code
>= 1<<16)
6896 op_parse_code
= thumb
? (op_parse_code
>> 16)
6897 : (op_parse_code
& ((1<<16)-1));
6899 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6901 /* Remember where we are in case we need to backtrack. */
6902 gas_assert (!backtrack_pos
);
6903 backtrack_pos
= str
;
6904 backtrack_error
= inst
.error
;
6905 backtrack_index
= i
;
6908 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6909 po_char_or_fail (',');
6911 switch (op_parse_code
)
6921 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6922 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6923 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6924 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6925 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6926 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6928 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6930 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6932 /* Also accept generic coprocessor regs for unknown registers. */
6934 po_reg_or_fail (REG_TYPE_CN
);
6936 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6937 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6938 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6939 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6940 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6941 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6942 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6943 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6944 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6945 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6947 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6948 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
6950 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6951 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6953 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6955 /* Neon scalar. Using an element size of 8 means that some invalid
6956 scalars are accepted here, so deal with those in later code. */
6957 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6961 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6964 po_imm_or_fail (0, 0, TRUE
);
6969 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6974 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6977 if (parse_ifimm_zero (&str
))
6978 inst
.operands
[i
].imm
= 0;
6982 = _("only floating point zero is allowed as immediate value");
6990 po_scalar_or_goto (8, try_rr
);
6993 po_reg_or_fail (REG_TYPE_RN
);
6999 po_scalar_or_goto (8, try_nsdq
);
7002 po_reg_or_fail (REG_TYPE_NSDQ
);
7008 po_scalar_or_goto (8, try_s_scalar
);
7011 po_scalar_or_goto (4, try_nsd
);
7014 po_reg_or_fail (REG_TYPE_NSD
);
7020 po_scalar_or_goto (8, try_ndq
);
7023 po_reg_or_fail (REG_TYPE_NDQ
);
7029 po_scalar_or_goto (8, try_vfd
);
7032 po_reg_or_fail (REG_TYPE_VFD
);
7037 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7038 not careful then bad things might happen. */
7039 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
7044 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
7047 /* There's a possibility of getting a 64-bit immediate here, so
7048 we need special handling. */
7049 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
7052 inst
.error
= _("immediate value is out of range");
7060 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
7063 po_imm_or_fail (0, 63, TRUE
);
7068 po_char_or_fail ('[');
7069 po_reg_or_fail (REG_TYPE_RN
);
7070 po_char_or_fail (']');
7076 po_reg_or_fail (REG_TYPE_RN
);
7077 if (skip_past_char (&str
, '!') == SUCCESS
)
7078 inst
.operands
[i
].writeback
= 1;
7082 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
7083 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
7084 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
7085 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
7086 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
7087 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
7088 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
7089 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
7090 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
7091 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
7092 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
7093 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
7095 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
7097 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
7098 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
7100 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
7101 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
7102 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
7103 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
7105 /* Immediate variants */
7107 po_char_or_fail ('{');
7108 po_imm_or_fail (0, 255, TRUE
);
7109 po_char_or_fail ('}');
7113 /* The expression parser chokes on a trailing !, so we have
7114 to find it first and zap it. */
7117 while (*s
&& *s
!= ',')
7122 inst
.operands
[i
].writeback
= 1;
7124 po_imm_or_fail (0, 31, TRUE
);
7132 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7137 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7142 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7144 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7146 val
= parse_reloc (&str
);
7149 inst
.error
= _("unrecognized relocation suffix");
7152 else if (val
!= BFD_RELOC_UNUSED
)
7154 inst
.operands
[i
].imm
= val
;
7155 inst
.operands
[i
].hasreloc
= 1;
7161 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7163 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7165 inst
.operands
[i
].hasreloc
= 1;
7167 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7169 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7170 inst
.operands
[i
].hasreloc
= 0;
7174 /* Operand for MOVW or MOVT. */
7176 po_misc_or_fail (parse_half (&str
));
7179 /* Register or expression. */
7180 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7181 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7183 /* Register or immediate. */
7184 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7185 I0
: po_imm_or_fail (0, 0, FALSE
); break;
7187 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7189 if (!is_immediate_prefix (*str
))
7192 val
= parse_fpa_immediate (&str
);
7195 /* FPA immediates are encoded as registers 8-15.
7196 parse_fpa_immediate has already applied the offset. */
7197 inst
.operands
[i
].reg
= val
;
7198 inst
.operands
[i
].isreg
= 1;
7201 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7202 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7204 /* Two kinds of register. */
7207 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7209 || (rege
->type
!= REG_TYPE_MMXWR
7210 && rege
->type
!= REG_TYPE_MMXWC
7211 && rege
->type
!= REG_TYPE_MMXWCG
))
7213 inst
.error
= _("iWMMXt data or control register expected");
7216 inst
.operands
[i
].reg
= rege
->number
;
7217 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7223 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7225 || (rege
->type
!= REG_TYPE_MMXWC
7226 && rege
->type
!= REG_TYPE_MMXWCG
))
7228 inst
.error
= _("iWMMXt control register expected");
7231 inst
.operands
[i
].reg
= rege
->number
;
7232 inst
.operands
[i
].isreg
= 1;
7237 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7238 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7239 case OP_oROR
: val
= parse_ror (&str
); break;
7240 case OP_COND
: val
= parse_cond (&str
); break;
7241 case OP_oBARRIER_I15
:
7242 po_barrier_or_imm (str
); break;
7244 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7250 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7251 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7253 inst
.error
= _("Banked registers are not available with this "
7259 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7263 po_reg_or_goto (REG_TYPE_VFSD
, try_sysreg
);
7266 val
= parse_sys_vldr_vstr (&str
);
7270 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7273 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7275 if (strncasecmp (str
, "APSR_", 5) == 0)
7282 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7283 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7284 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7285 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7286 default: found
= 16;
7290 inst
.operands
[i
].isvec
= 1;
7291 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7292 inst
.operands
[i
].reg
= REG_PC
;
7299 po_misc_or_fail (parse_tb (&str
));
7302 /* Register lists. */
7304 val
= parse_reg_list (&str
, REGLIST_RN
);
7307 inst
.operands
[i
].writeback
= 1;
7313 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7317 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
,
7322 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
,
7327 /* Allow Q registers too. */
7328 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7329 REGLIST_NEON_D
, &partial_match
);
7333 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7334 REGLIST_VFP_S
, &partial_match
);
7335 inst
.operands
[i
].issingle
= 1;
7340 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7341 REGLIST_VFP_D_VPR
, &partial_match
);
7342 if (val
== FAIL
&& !partial_match
)
7345 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7346 REGLIST_VFP_S_VPR
, &partial_match
);
7347 inst
.operands
[i
].issingle
= 1;
7352 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7353 REGLIST_NEON_D
, &partial_match
);
7357 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7358 &inst
.operands
[i
].vectype
);
7361 /* Addressing modes */
7363 po_misc_or_fail (parse_address (&str
, i
));
7367 po_misc_or_fail_no_backtrack (
7368 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7372 po_misc_or_fail_no_backtrack (
7373 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7377 po_misc_or_fail_no_backtrack (
7378 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7382 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7386 po_misc_or_fail_no_backtrack (
7387 parse_shifter_operand_group_reloc (&str
, i
));
7391 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7395 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7399 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7403 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7406 /* Various value-based sanity checks and shared operations. We
7407 do not signal immediate failures for the register constraints;
7408 this allows a syntax error to take precedence. */
7409 switch (op_parse_code
)
7417 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7418 inst
.error
= BAD_PC
;
7423 if (inst
.operands
[i
].isreg
)
7425 if (inst
.operands
[i
].reg
== REG_PC
)
7426 inst
.error
= BAD_PC
;
7427 else if (inst
.operands
[i
].reg
== REG_SP
7428 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7429 relaxed since ARMv8-A. */
7430 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7433 inst
.error
= BAD_SP
;
7439 if (inst
.operands
[i
].isreg
7440 && inst
.operands
[i
].reg
== REG_PC
7441 && (inst
.operands
[i
].writeback
|| thumb
))
7442 inst
.error
= BAD_PC
;
7446 if (inst
.operands
[i
].isreg
)
7455 case OP_oBARRIER_I15
:
7466 inst
.operands
[i
].imm
= val
;
7471 if (inst
.operands
[i
].reg
!= REG_LR
)
7472 inst
.error
= _("operand must be LR register");
7479 /* If we get here, this operand was successfully parsed. */
7480 inst
.operands
[i
].present
= 1;
7484 inst
.error
= BAD_ARGS
;
7489 /* The parse routine should already have set inst.error, but set a
7490 default here just in case. */
7492 inst
.error
= _("syntax error");
7496 /* Do not backtrack over a trailing optional argument that
7497 absorbed some text. We will only fail again, with the
7498 'garbage following instruction' error message, which is
7499 probably less helpful than the current one. */
7500 if (backtrack_index
== i
&& backtrack_pos
!= str
7501 && upat
[i
+1] == OP_stop
)
7504 inst
.error
= _("syntax error");
7508 /* Try again, skipping the optional argument at backtrack_pos. */
7509 str
= backtrack_pos
;
7510 inst
.error
= backtrack_error
;
7511 inst
.operands
[backtrack_index
].present
= 0;
7512 i
= backtrack_index
;
7516 /* Check that we have parsed all the arguments. */
7517 if (*str
!= '\0' && !inst
.error
)
7518 inst
.error
= _("garbage following instruction");
7520 return inst
.error
? FAIL
: SUCCESS
;
7523 #undef po_char_or_fail
7524 #undef po_reg_or_fail
7525 #undef po_reg_or_goto
7526 #undef po_imm_or_fail
7527 #undef po_scalar_or_fail
7528 #undef po_barrier_or_imm
7530 /* Shorthand macro for instruction encoding functions issuing errors. */
7531 #define constraint(expr, err) \
7542 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7543 instructions are unpredictable if these registers are used. This
7544 is the BadReg predicate in ARM's Thumb-2 documentation.
7546 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7547 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7548 #define reject_bad_reg(reg) \
7550 if (reg == REG_PC) \
7552 inst.error = BAD_PC; \
7555 else if (reg == REG_SP \
7556 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7558 inst.error = BAD_SP; \
7563 /* If REG is R13 (the stack pointer), warn that its use is
7565 #define warn_deprecated_sp(reg) \
7567 if (warn_on_deprecated && reg == REG_SP) \
7568 as_tsktsk (_("use of r13 is deprecated")); \
7571 /* Functions for operand encoding. ARM, then Thumb. */
7573 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7575 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7577 The only binary encoding difference is the Coprocessor number. Coprocessor
7578 9 is used for half-precision calculations or conversions. The format of the
7579 instruction is the same as the equivalent Coprocessor 10 instruction that
7580 exists for Single-Precision operation. */
7583 do_scalar_fp16_v82_encode (void)
7585 if (inst
.cond
!= COND_ALWAYS
)
7586 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7587 " the behaviour is UNPREDICTABLE"));
7588 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7591 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7592 mark_feature_used (&arm_ext_fp16
);
7595 /* If VAL can be encoded in the immediate field of an ARM instruction,
7596 return the encoded form. Otherwise, return FAIL. */
7599 encode_arm_immediate (unsigned int val
)
7606 for (i
= 2; i
< 32; i
+= 2)
7607 if ((a
= rotate_left (val
, i
)) <= 0xff)
7608 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7613 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7614 return the encoded form. Otherwise, return FAIL. */
7616 encode_thumb32_immediate (unsigned int val
)
7623 for (i
= 1; i
<= 24; i
++)
7626 if ((val
& ~(0xff << i
)) == 0)
7627 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7631 if (val
== ((a
<< 16) | a
))
7633 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7637 if (val
== ((a
<< 16) | a
))
7638 return 0x200 | (a
>> 8);
7642 /* Encode a VFP SP or DP register number into inst.instruction. */
7645 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7647 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7650 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7653 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7656 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7661 first_error (_("D register out of range for selected VFP version"));
7669 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7673 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7677 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7681 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7685 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7689 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7697 /* Encode a <shift> in an ARM-format instruction. The immediate,
7698 if any, is handled by md_apply_fix. */
7700 encode_arm_shift (int i
)
7702 /* register-shifted register. */
7703 if (inst
.operands
[i
].immisreg
)
7706 for (op_index
= 0; op_index
<= i
; ++op_index
)
7708 /* Check the operand only when it's presented. In pre-UAL syntax,
7709 if the destination register is the same as the first operand, two
7710 register form of the instruction can be used. */
7711 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7712 && inst
.operands
[op_index
].reg
== REG_PC
)
7713 as_warn (UNPRED_REG ("r15"));
7716 if (inst
.operands
[i
].imm
== REG_PC
)
7717 as_warn (UNPRED_REG ("r15"));
7720 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7721 inst
.instruction
|= SHIFT_ROR
<< 5;
7724 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7725 if (inst
.operands
[i
].immisreg
)
7727 inst
.instruction
|= SHIFT_BY_REG
;
7728 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7731 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7736 encode_arm_shifter_operand (int i
)
7738 if (inst
.operands
[i
].isreg
)
7740 inst
.instruction
|= inst
.operands
[i
].reg
;
7741 encode_arm_shift (i
);
7745 inst
.instruction
|= INST_IMMEDIATE
;
7746 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
7747 inst
.instruction
|= inst
.operands
[i
].imm
;
7751 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7753 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7756 Generate an error if the operand is not a register. */
7757 constraint (!inst
.operands
[i
].isreg
,
7758 _("Instruction does not support =N addresses"));
7760 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7762 if (inst
.operands
[i
].preind
)
7766 inst
.error
= _("instruction does not accept preindexed addressing");
7769 inst
.instruction
|= PRE_INDEX
;
7770 if (inst
.operands
[i
].writeback
)
7771 inst
.instruction
|= WRITE_BACK
;
7774 else if (inst
.operands
[i
].postind
)
7776 gas_assert (inst
.operands
[i
].writeback
);
7778 inst
.instruction
|= WRITE_BACK
;
7780 else /* unindexed - only for coprocessor */
7782 inst
.error
= _("instruction does not accept unindexed addressing");
7786 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7787 && (((inst
.instruction
& 0x000f0000) >> 16)
7788 == ((inst
.instruction
& 0x0000f000) >> 12)))
7789 as_warn ((inst
.instruction
& LOAD_BIT
)
7790 ? _("destination register same as write-back base")
7791 : _("source register same as write-back base"));
7794 /* inst.operands[i] was set up by parse_address. Encode it into an
7795 ARM-format mode 2 load or store instruction. If is_t is true,
7796 reject forms that cannot be used with a T instruction (i.e. not
7799 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7801 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7803 encode_arm_addr_mode_common (i
, is_t
);
7805 if (inst
.operands
[i
].immisreg
)
7807 constraint ((inst
.operands
[i
].imm
== REG_PC
7808 || (is_pc
&& inst
.operands
[i
].writeback
)),
7810 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7811 inst
.instruction
|= inst
.operands
[i
].imm
;
7812 if (!inst
.operands
[i
].negative
)
7813 inst
.instruction
|= INDEX_UP
;
7814 if (inst
.operands
[i
].shifted
)
7816 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7817 inst
.instruction
|= SHIFT_ROR
<< 5;
7820 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7821 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7825 else /* immediate offset in inst.relocs[0] */
7827 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
7829 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7831 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7832 cannot use PC in addressing.
7833 PC cannot be used in writeback addressing, either. */
7834 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7837 /* Use of PC in str is deprecated for ARMv7. */
7838 if (warn_on_deprecated
7840 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7841 as_tsktsk (_("use of PC in this instruction is deprecated"));
7844 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7846 /* Prefer + for zero encoded value. */
7847 if (!inst
.operands
[i
].negative
)
7848 inst
.instruction
|= INDEX_UP
;
7849 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
7854 /* inst.operands[i] was set up by parse_address. Encode it into an
7855 ARM-format mode 3 load or store instruction. Reject forms that
7856 cannot be used with such instructions. If is_t is true, reject
7857 forms that cannot be used with a T instruction (i.e. not
7860 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7862 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7864 inst
.error
= _("instruction does not accept scaled register index");
7868 encode_arm_addr_mode_common (i
, is_t
);
7870 if (inst
.operands
[i
].immisreg
)
7872 constraint ((inst
.operands
[i
].imm
== REG_PC
7873 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7875 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7877 inst
.instruction
|= inst
.operands
[i
].imm
;
7878 if (!inst
.operands
[i
].negative
)
7879 inst
.instruction
|= INDEX_UP
;
7881 else /* immediate offset in inst.relocs[0] */
7883 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
7884 && inst
.operands
[i
].writeback
),
7886 inst
.instruction
|= HWOFFSET_IMM
;
7887 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7889 /* Prefer + for zero encoded value. */
7890 if (!inst
.operands
[i
].negative
)
7891 inst
.instruction
|= INDEX_UP
;
7893 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7898 /* Write immediate bits [7:0] to the following locations:
7900 |28/24|23 19|18 16|15 4|3 0|
7901 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7903 This function is used by VMOV/VMVN/VORR/VBIC. */
7906 neon_write_immbits (unsigned immbits
)
7908 inst
.instruction
|= immbits
& 0xf;
7909 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7910 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7913 /* Invert low-order SIZE bits of XHI:XLO. */
7916 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7918 unsigned immlo
= xlo
? *xlo
: 0;
7919 unsigned immhi
= xhi
? *xhi
: 0;
7924 immlo
= (~immlo
) & 0xff;
7928 immlo
= (~immlo
) & 0xffff;
7932 immhi
= (~immhi
) & 0xffffffff;
7936 immlo
= (~immlo
) & 0xffffffff;
7950 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7954 neon_bits_same_in_bytes (unsigned imm
)
7956 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7957 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7958 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7959 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7962 /* For immediate of above form, return 0bABCD. */
7965 neon_squash_bits (unsigned imm
)
7967 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7968 | ((imm
& 0x01000000) >> 21);
7971 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7974 neon_qfloat_bits (unsigned imm
)
7976 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7979 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7980 the instruction. *OP is passed as the initial value of the op field, and
7981 may be set to a different value depending on the constant (i.e.
7982 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7983 MVN). If the immediate looks like a repeated pattern then also
7984 try smaller element sizes. */
7987 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7988 unsigned *immbits
, int *op
, int size
,
7989 enum neon_el_type type
)
7991 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7993 if (type
== NT_float
&& !float_p
)
7996 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7998 if (size
!= 32 || *op
== 1)
8000 *immbits
= neon_qfloat_bits (immlo
);
8006 if (neon_bits_same_in_bytes (immhi
)
8007 && neon_bits_same_in_bytes (immlo
))
8011 *immbits
= (neon_squash_bits (immhi
) << 4)
8012 | neon_squash_bits (immlo
);
8023 if (immlo
== (immlo
& 0x000000ff))
8028 else if (immlo
== (immlo
& 0x0000ff00))
8030 *immbits
= immlo
>> 8;
8033 else if (immlo
== (immlo
& 0x00ff0000))
8035 *immbits
= immlo
>> 16;
8038 else if (immlo
== (immlo
& 0xff000000))
8040 *immbits
= immlo
>> 24;
8043 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
8045 *immbits
= (immlo
>> 8) & 0xff;
8048 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
8050 *immbits
= (immlo
>> 16) & 0xff;
8054 if ((immlo
& 0xffff) != (immlo
>> 16))
8061 if (immlo
== (immlo
& 0x000000ff))
8066 else if (immlo
== (immlo
& 0x0000ff00))
8068 *immbits
= immlo
>> 8;
8072 if ((immlo
& 0xff) != (immlo
>> 8))
8077 if (immlo
== (immlo
& 0x000000ff))
8079 /* Don't allow MVN with 8-bit immediate. */
8089 #if defined BFD_HOST_64_BIT
8090 /* Returns TRUE if double precision value V may be cast
8091 to single precision without loss of accuracy. */
8094 is_double_a_single (bfd_int64_t v
)
8096 int exp
= (int)((v
>> 52) & 0x7FF);
8097 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8099 return (exp
== 0 || exp
== 0x7FF
8100 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
8101 && (mantissa
& 0x1FFFFFFFl
) == 0;
8104 /* Returns a double precision value casted to single precision
8105 (ignoring the least significant bits in exponent and mantissa). */
8108 double_to_single (bfd_int64_t v
)
8110 int sign
= (int) ((v
>> 63) & 1l);
8111 int exp
= (int) ((v
>> 52) & 0x7FF);
8112 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8118 exp
= exp
- 1023 + 127;
8127 /* No denormalized numbers. */
8133 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8135 #endif /* BFD_HOST_64_BIT */
8144 static void do_vfp_nsyn_opcode (const char *);
8146 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8147 Determine whether it can be performed with a move instruction; if
8148 it can, convert inst.instruction to that move instruction and
8149 return TRUE; if it can't, convert inst.instruction to a literal-pool
8150 load and return FALSE. If this is not a valid thing to do in the
8151 current context, set inst.error and return TRUE.
8153 inst.operands[i] describes the destination register. */
8156 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
8159 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
8160 bfd_boolean arm_p
= (t
== CONST_ARM
);
8163 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8167 if ((inst
.instruction
& tbit
) == 0)
8169 inst
.error
= _("invalid pseudo operation");
8173 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8174 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8175 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8177 inst
.error
= _("constant expression expected");
8181 if (inst
.relocs
[0].exp
.X_op
== O_constant
8182 || inst
.relocs
[0].exp
.X_op
== O_big
)
8184 #if defined BFD_HOST_64_BIT
8189 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8191 LITTLENUM_TYPE w
[X_PRECISION
];
8194 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8196 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8198 /* FIXME: Should we check words w[2..5] ? */
8203 #if defined BFD_HOST_64_BIT
8205 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
8206 << LITTLENUM_NUMBER_OF_BITS
)
8207 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
8208 << LITTLENUM_NUMBER_OF_BITS
)
8209 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
8210 << LITTLENUM_NUMBER_OF_BITS
)
8211 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
8213 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
8214 | (l
[0] & LITTLENUM_MASK
);
8218 v
= inst
.relocs
[0].exp
.X_add_number
;
8220 if (!inst
.operands
[i
].issingle
)
8224 /* LDR should not use lead in a flag-setting instruction being
8225 chosen so we do not check whether movs can be used. */
8227 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8228 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8229 && inst
.operands
[i
].reg
!= 13
8230 && inst
.operands
[i
].reg
!= 15)
8232 /* Check if on thumb2 it can be done with a mov.w, mvn or
8233 movw instruction. */
8234 unsigned int newimm
;
8235 bfd_boolean isNegated
;
8237 newimm
= encode_thumb32_immediate (v
);
8238 if (newimm
!= (unsigned int) FAIL
)
8242 newimm
= encode_thumb32_immediate (~v
);
8243 if (newimm
!= (unsigned int) FAIL
)
8247 /* The number can be loaded with a mov.w or mvn
8249 if (newimm
!= (unsigned int) FAIL
8250 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8252 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8253 | (inst
.operands
[i
].reg
<< 8));
8254 /* Change to MOVN. */
8255 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8256 inst
.instruction
|= (newimm
& 0x800) << 15;
8257 inst
.instruction
|= (newimm
& 0x700) << 4;
8258 inst
.instruction
|= (newimm
& 0x0ff);
8261 /* The number can be loaded with a movw instruction. */
8262 else if ((v
& ~0xFFFF) == 0
8263 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8265 int imm
= v
& 0xFFFF;
8267 inst
.instruction
= 0xf2400000; /* MOVW. */
8268 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8269 inst
.instruction
|= (imm
& 0xf000) << 4;
8270 inst
.instruction
|= (imm
& 0x0800) << 15;
8271 inst
.instruction
|= (imm
& 0x0700) << 4;
8272 inst
.instruction
|= (imm
& 0x00ff);
8279 int value
= encode_arm_immediate (v
);
8283 /* This can be done with a mov instruction. */
8284 inst
.instruction
&= LITERAL_MASK
;
8285 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8286 inst
.instruction
|= value
& 0xfff;
8290 value
= encode_arm_immediate (~ v
);
8293 /* This can be done with a mvn instruction. */
8294 inst
.instruction
&= LITERAL_MASK
;
8295 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8296 inst
.instruction
|= value
& 0xfff;
8300 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8303 unsigned immbits
= 0;
8304 unsigned immlo
= inst
.operands
[1].imm
;
8305 unsigned immhi
= inst
.operands
[1].regisimm
8306 ? inst
.operands
[1].reg
8307 : inst
.relocs
[0].exp
.X_unsigned
8309 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8310 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8311 &op
, 64, NT_invtype
);
8315 neon_invert_size (&immlo
, &immhi
, 64);
8317 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8318 &op
, 64, NT_invtype
);
8323 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8329 /* Fill other bits in vmov encoding for both thumb and arm. */
8331 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8333 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8334 neon_write_immbits (immbits
);
8342 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8343 if (inst
.operands
[i
].issingle
8344 && is_quarter_float (inst
.operands
[1].imm
)
8345 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8347 inst
.operands
[1].imm
=
8348 neon_qfloat_bits (v
);
8349 do_vfp_nsyn_opcode ("fconsts");
8353 /* If our host does not support a 64-bit type then we cannot perform
8354 the following optimization. This mean that there will be a
8355 discrepancy between the output produced by an assembler built for
8356 a 32-bit-only host and the output produced from a 64-bit host, but
8357 this cannot be helped. */
8358 #if defined BFD_HOST_64_BIT
8359 else if (!inst
.operands
[1].issingle
8360 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8362 if (is_double_a_single (v
)
8363 && is_quarter_float (double_to_single (v
)))
8365 inst
.operands
[1].imm
=
8366 neon_qfloat_bits (double_to_single (v
));
8367 do_vfp_nsyn_opcode ("fconstd");
8375 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8376 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8379 inst
.operands
[1].reg
= REG_PC
;
8380 inst
.operands
[1].isreg
= 1;
8381 inst
.operands
[1].preind
= 1;
8382 inst
.relocs
[0].pc_rel
= 1;
8383 inst
.relocs
[0].type
= (thumb_p
8384 ? BFD_RELOC_ARM_THUMB_OFFSET
8386 ? BFD_RELOC_ARM_HWLITERAL
8387 : BFD_RELOC_ARM_LITERAL
));
8391 /* inst.operands[i] was set up by parse_address. Encode it into an
8392 ARM-format instruction. Reject all forms which cannot be encoded
8393 into a coprocessor load/store instruction. If wb_ok is false,
8394 reject use of writeback; if unind_ok is false, reject use of
8395 unindexed addressing. If reloc_override is not 0, use it instead
8396 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8397 (in which case it is preserved). */
8400 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8402 if (!inst
.operands
[i
].isreg
)
8405 if (! inst
.operands
[0].isvec
)
8407 inst
.error
= _("invalid co-processor operand");
8410 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8414 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8416 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8418 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8420 gas_assert (!inst
.operands
[i
].writeback
);
8423 inst
.error
= _("instruction does not support unindexed addressing");
8426 inst
.instruction
|= inst
.operands
[i
].imm
;
8427 inst
.instruction
|= INDEX_UP
;
8431 if (inst
.operands
[i
].preind
)
8432 inst
.instruction
|= PRE_INDEX
;
8434 if (inst
.operands
[i
].writeback
)
8436 if (inst
.operands
[i
].reg
== REG_PC
)
8438 inst
.error
= _("pc may not be used with write-back");
8443 inst
.error
= _("instruction does not support writeback");
8446 inst
.instruction
|= WRITE_BACK
;
8450 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
8451 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8452 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
8453 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8456 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8458 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8461 /* Prefer + for zero encoded value. */
8462 if (!inst
.operands
[i
].negative
)
8463 inst
.instruction
|= INDEX_UP
;
8468 /* Functions for instruction encoding, sorted by sub-architecture.
8469 First some generics; their names are taken from the conventional
8470 bit positions for register arguments in ARM format instructions. */
8480 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8486 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8492 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8493 inst
.instruction
|= inst
.operands
[1].reg
;
8499 inst
.instruction
|= inst
.operands
[0].reg
;
8500 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8506 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8507 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8513 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8514 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8520 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8521 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8525 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8527 if (ARM_CPU_IS_ANY (cpu_variant
))
8529 as_tsktsk ("%s", msg
);
8532 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8544 unsigned Rn
= inst
.operands
[2].reg
;
8545 /* Enforce restrictions on SWP instruction. */
8546 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8548 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8549 _("Rn must not overlap other operands"));
8551 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8553 if (!check_obsolete (&arm_ext_v8
,
8554 _("swp{b} use is obsoleted for ARMv8 and later"))
8555 && warn_on_deprecated
8556 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8557 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8560 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8561 inst
.instruction
|= inst
.operands
[1].reg
;
8562 inst
.instruction
|= Rn
<< 16;
8568 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8569 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8570 inst
.instruction
|= inst
.operands
[2].reg
;
8576 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8577 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
8578 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
8579 || inst
.relocs
[0].exp
.X_add_number
!= 0),
8581 inst
.instruction
|= inst
.operands
[0].reg
;
8582 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8583 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8589 inst
.instruction
|= inst
.operands
[0].imm
;
8595 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8596 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8599 /* ARM instructions, in alphabetical order by function name (except
8600 that wrapper functions appear immediately after the function they
8603 /* This is a pseudo-op of the form "adr rd, label" to be converted
8604 into a relative address of the form "add rd, pc, #label-.-8". */
8609 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8611 /* Frag hacking will turn this into a sub instruction if the offset turns
8612 out to be negative. */
8613 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
8614 inst
.relocs
[0].pc_rel
= 1;
8615 inst
.relocs
[0].exp
.X_add_number
-= 8;
8617 if (support_interwork
8618 && inst
.relocs
[0].exp
.X_op
== O_symbol
8619 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8620 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8621 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8622 inst
.relocs
[0].exp
.X_add_number
|= 1;
8625 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8626 into a relative address of the form:
8627 add rd, pc, #low(label-.-8)"
8628 add rd, rd, #high(label-.-8)" */
8633 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8635 /* Frag hacking will turn this into a sub instruction if the offset turns
8636 out to be negative. */
8637 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8638 inst
.relocs
[0].pc_rel
= 1;
8639 inst
.size
= INSN_SIZE
* 2;
8640 inst
.relocs
[0].exp
.X_add_number
-= 8;
8642 if (support_interwork
8643 && inst
.relocs
[0].exp
.X_op
== O_symbol
8644 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8645 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8646 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8647 inst
.relocs
[0].exp
.X_add_number
|= 1;
8653 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8654 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8656 if (!inst
.operands
[1].present
)
8657 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8658 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8659 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8660 encode_arm_shifter_operand (2);
8666 if (inst
.operands
[0].present
)
8667 inst
.instruction
|= inst
.operands
[0].imm
;
8669 inst
.instruction
|= 0xf;
8675 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8676 constraint (msb
> 32, _("bit-field extends past end of register"));
8677 /* The instruction encoding stores the LSB and MSB,
8678 not the LSB and width. */
8679 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8680 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8681 inst
.instruction
|= (msb
- 1) << 16;
8689 /* #0 in second position is alternative syntax for bfc, which is
8690 the same instruction but with REG_PC in the Rm field. */
8691 if (!inst
.operands
[1].isreg
)
8692 inst
.operands
[1].reg
= REG_PC
;
8694 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8695 constraint (msb
> 32, _("bit-field extends past end of register"));
8696 /* The instruction encoding stores the LSB and MSB,
8697 not the LSB and width. */
8698 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8699 inst
.instruction
|= inst
.operands
[1].reg
;
8700 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8701 inst
.instruction
|= (msb
- 1) << 16;
8707 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8708 _("bit-field extends past end of register"));
8709 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8710 inst
.instruction
|= inst
.operands
[1].reg
;
8711 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8712 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8715 /* ARM V5 breakpoint instruction (argument parse)
8716 BKPT <16 bit unsigned immediate>
8717 Instruction is not conditional.
8718 The bit pattern given in insns[] has the COND_ALWAYS condition,
8719 and it is an error if the caller tried to override that. */
8724 /* Top 12 of 16 bits to bits 19:8. */
8725 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8727 /* Bottom 4 of 16 bits to bits 3:0. */
8728 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8732 encode_branch (int default_reloc
)
8734 if (inst
.operands
[0].hasreloc
)
8736 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8737 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8738 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8739 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8740 ? BFD_RELOC_ARM_PLT32
8741 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8744 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
8745 inst
.relocs
[0].pc_rel
= 1;
8752 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8753 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8756 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8763 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8765 if (inst
.cond
== COND_ALWAYS
)
8766 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8768 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8772 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8775 /* ARM V5 branch-link-exchange instruction (argument parse)
8776 BLX <target_addr> ie BLX(1)
8777 BLX{<condition>} <Rm> ie BLX(2)
8778 Unfortunately, there are two different opcodes for this mnemonic.
8779 So, the insns[].value is not used, and the code here zaps values
8780 into inst.instruction.
8781 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8786 if (inst
.operands
[0].isreg
)
8788 /* Arg is a register; the opcode provided by insns[] is correct.
8789 It is not illegal to do "blx pc", just useless. */
8790 if (inst
.operands
[0].reg
== REG_PC
)
8791 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8793 inst
.instruction
|= inst
.operands
[0].reg
;
8797 /* Arg is an address; this instruction cannot be executed
8798 conditionally, and the opcode must be adjusted.
8799 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8800 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8801 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8802 inst
.instruction
= 0xfa000000;
8803 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8810 bfd_boolean want_reloc
;
8812 if (inst
.operands
[0].reg
== REG_PC
)
8813 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8815 inst
.instruction
|= inst
.operands
[0].reg
;
8816 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8817 it is for ARMv4t or earlier. */
8818 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8819 if (!ARM_FEATURE_ZERO (selected_object_arch
)
8820 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
8824 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8829 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
8833 /* ARM v5TEJ. Jump to Jazelle code. */
8838 if (inst
.operands
[0].reg
== REG_PC
)
8839 as_tsktsk (_("use of r15 in bxj is not really useful"));
8841 inst
.instruction
|= inst
.operands
[0].reg
;
8844 /* Co-processor data operation:
8845 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8846 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8850 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8851 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8852 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8853 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8854 inst
.instruction
|= inst
.operands
[4].reg
;
8855 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8861 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8862 encode_arm_shifter_operand (1);
8865 /* Transfer between coprocessor and ARM registers.
8866 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8871 No special properties. */
8873 struct deprecated_coproc_regs_s
8880 arm_feature_set deprecated
;
8881 arm_feature_set obsoleted
;
8882 const char *dep_msg
;
8883 const char *obs_msg
;
8886 #define DEPR_ACCESS_V8 \
8887 N_("This coprocessor register access is deprecated in ARMv8")
8889 /* Table of all deprecated coprocessor registers. */
8890 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8892 {15, 0, 7, 10, 5, /* CP15DMB. */
8893 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8894 DEPR_ACCESS_V8
, NULL
},
8895 {15, 0, 7, 10, 4, /* CP15DSB. */
8896 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8897 DEPR_ACCESS_V8
, NULL
},
8898 {15, 0, 7, 5, 4, /* CP15ISB. */
8899 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8900 DEPR_ACCESS_V8
, NULL
},
8901 {14, 6, 1, 0, 0, /* TEEHBR. */
8902 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8903 DEPR_ACCESS_V8
, NULL
},
8904 {14, 6, 0, 0, 0, /* TEECR. */
8905 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8906 DEPR_ACCESS_V8
, NULL
},
8909 #undef DEPR_ACCESS_V8
8911 static const size_t deprecated_coproc_reg_count
=
8912 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8920 Rd
= inst
.operands
[2].reg
;
8923 if (inst
.instruction
== 0xee000010
8924 || inst
.instruction
== 0xfe000010)
8926 reject_bad_reg (Rd
);
8927 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
8929 constraint (Rd
== REG_SP
, BAD_SP
);
8934 if (inst
.instruction
== 0xe000010)
8935 constraint (Rd
== REG_PC
, BAD_PC
);
8938 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8940 const struct deprecated_coproc_regs_s
*r
=
8941 deprecated_coproc_regs
+ i
;
8943 if (inst
.operands
[0].reg
== r
->cp
8944 && inst
.operands
[1].imm
== r
->opc1
8945 && inst
.operands
[3].reg
== r
->crn
8946 && inst
.operands
[4].reg
== r
->crm
8947 && inst
.operands
[5].imm
== r
->opc2
)
8949 if (! ARM_CPU_IS_ANY (cpu_variant
)
8950 && warn_on_deprecated
8951 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8952 as_tsktsk ("%s", r
->dep_msg
);
8956 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8957 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8958 inst
.instruction
|= Rd
<< 12;
8959 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8960 inst
.instruction
|= inst
.operands
[4].reg
;
8961 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8964 /* Transfer between coprocessor register and pair of ARM registers.
8965 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8970 Two XScale instructions are special cases of these:
8972 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8973 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8975 Result unpredictable if Rd or Rn is R15. */
8982 Rd
= inst
.operands
[2].reg
;
8983 Rn
= inst
.operands
[3].reg
;
8987 reject_bad_reg (Rd
);
8988 reject_bad_reg (Rn
);
8992 constraint (Rd
== REG_PC
, BAD_PC
);
8993 constraint (Rn
== REG_PC
, BAD_PC
);
8996 /* Only check the MRRC{2} variants. */
8997 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
8999 /* If Rd == Rn, error that the operation is
9000 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9001 constraint (Rd
== Rn
, BAD_OVERLAP
);
9004 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9005 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
9006 inst
.instruction
|= Rd
<< 12;
9007 inst
.instruction
|= Rn
<< 16;
9008 inst
.instruction
|= inst
.operands
[4].reg
;
9014 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
9015 if (inst
.operands
[1].present
)
9017 inst
.instruction
|= CPSI_MMOD
;
9018 inst
.instruction
|= inst
.operands
[1].imm
;
9025 inst
.instruction
|= inst
.operands
[0].imm
;
9031 unsigned Rd
, Rn
, Rm
;
9033 Rd
= inst
.operands
[0].reg
;
9034 Rn
= (inst
.operands
[1].present
9035 ? inst
.operands
[1].reg
: Rd
);
9036 Rm
= inst
.operands
[2].reg
;
9038 constraint ((Rd
== REG_PC
), BAD_PC
);
9039 constraint ((Rn
== REG_PC
), BAD_PC
);
9040 constraint ((Rm
== REG_PC
), BAD_PC
);
9042 inst
.instruction
|= Rd
<< 16;
9043 inst
.instruction
|= Rn
<< 0;
9044 inst
.instruction
|= Rm
<< 8;
9050 /* There is no IT instruction in ARM mode. We
9051 process it to do the validation as if in
9052 thumb mode, just in case the code gets
9053 assembled for thumb using the unified syntax. */
9058 set_it_insn_type (IT_INSN
);
9059 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
9060 now_it
.cc
= inst
.operands
[0].imm
;
9064 /* If there is only one register in the register list,
9065 then return its register number. Otherwise return -1. */
9067 only_one_reg_in_list (int range
)
9069 int i
= ffs (range
) - 1;
9070 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
9074 encode_ldmstm(int from_push_pop_mnem
)
9076 int base_reg
= inst
.operands
[0].reg
;
9077 int range
= inst
.operands
[1].imm
;
9080 inst
.instruction
|= base_reg
<< 16;
9081 inst
.instruction
|= range
;
9083 if (inst
.operands
[1].writeback
)
9084 inst
.instruction
|= LDM_TYPE_2_OR_3
;
9086 if (inst
.operands
[0].writeback
)
9088 inst
.instruction
|= WRITE_BACK
;
9089 /* Check for unpredictable uses of writeback. */
9090 if (inst
.instruction
& LOAD_BIT
)
9092 /* Not allowed in LDM type 2. */
9093 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
9094 && ((range
& (1 << REG_PC
)) == 0))
9095 as_warn (_("writeback of base register is UNPREDICTABLE"));
9096 /* Only allowed if base reg not in list for other types. */
9097 else if (range
& (1 << base_reg
))
9098 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9102 /* Not allowed for type 2. */
9103 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
9104 as_warn (_("writeback of base register is UNPREDICTABLE"));
9105 /* Only allowed if base reg not in list, or first in list. */
9106 else if ((range
& (1 << base_reg
))
9107 && (range
& ((1 << base_reg
) - 1)))
9108 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9112 /* If PUSH/POP has only one register, then use the A2 encoding. */
9113 one_reg
= only_one_reg_in_list (range
);
9114 if (from_push_pop_mnem
&& one_reg
>= 0)
9116 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9118 if (is_push
&& one_reg
== 13 /* SP */)
9119 /* PR 22483: The A2 encoding cannot be used when
9120 pushing the stack pointer as this is UNPREDICTABLE. */
9123 inst
.instruction
&= A_COND_MASK
;
9124 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9125 inst
.instruction
|= one_reg
<< 12;
9132 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
9135 /* ARMv5TE load-consecutive (argument parse)
9144 constraint (inst
.operands
[0].reg
% 2 != 0,
9145 _("first transfer register must be even"));
9146 constraint (inst
.operands
[1].present
9147 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9148 _("can only transfer two consecutive registers"));
9149 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9150 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9152 if (!inst
.operands
[1].present
)
9153 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9155 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9156 register and the first register written; we have to diagnose
9157 overlap between the base and the second register written here. */
9159 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9160 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9161 as_warn (_("base register written back, and overlaps "
9162 "second transfer register"));
9164 if (!(inst
.instruction
& V4_STR_BIT
))
9166 /* For an index-register load, the index register must not overlap the
9167 destination (even if not write-back). */
9168 if (inst
.operands
[2].immisreg
9169 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9170 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9171 as_warn (_("index register overlaps transfer register"));
9173 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9174 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
9180 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9181 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9182 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9183 || inst
.operands
[1].negative
9184 /* This can arise if the programmer has written
9186 or if they have mistakenly used a register name as the last
9189 It is very difficult to distinguish between these two cases
9190 because "rX" might actually be a label. ie the register
9191 name has been occluded by a symbol of the same name. So we
9192 just generate a general 'bad addressing mode' type error
9193 message and leave it up to the programmer to discover the
9194 true cause and fix their mistake. */
9195 || (inst
.operands
[1].reg
== REG_PC
),
9198 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9199 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9200 _("offset must be zero in ARM encoding"));
9202 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9204 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9205 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9206 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9212 constraint (inst
.operands
[0].reg
% 2 != 0,
9213 _("even register required"));
9214 constraint (inst
.operands
[1].present
9215 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9216 _("can only load two consecutive registers"));
9217 /* If op 1 were present and equal to PC, this function wouldn't
9218 have been called in the first place. */
9219 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9221 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9222 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9225 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9226 which is not a multiple of four is UNPREDICTABLE. */
9228 check_ldr_r15_aligned (void)
9230 constraint (!(inst
.operands
[1].immisreg
)
9231 && (inst
.operands
[0].reg
== REG_PC
9232 && inst
.operands
[1].reg
== REG_PC
9233 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9234 _("ldr to register 15 must be 4-byte aligned"));
9240 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9241 if (!inst
.operands
[1].isreg
)
9242 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9244 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9245 check_ldr_r15_aligned ();
9251 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9253 if (inst
.operands
[1].preind
)
9255 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9256 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9257 _("this instruction requires a post-indexed address"));
9259 inst
.operands
[1].preind
= 0;
9260 inst
.operands
[1].postind
= 1;
9261 inst
.operands
[1].writeback
= 1;
9263 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9264 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9267 /* Halfword and signed-byte load/store operations. */
9272 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9273 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9274 if (!inst
.operands
[1].isreg
)
9275 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9277 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9283 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9285 if (inst
.operands
[1].preind
)
9287 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9288 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9289 _("this instruction requires a post-indexed address"));
9291 inst
.operands
[1].preind
= 0;
9292 inst
.operands
[1].postind
= 1;
9293 inst
.operands
[1].writeback
= 1;
9295 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9296 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9299 /* Co-processor register load/store.
9300 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9304 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9305 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9306 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9312 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9313 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9314 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9315 && !(inst
.instruction
& 0x00400000))
9316 as_tsktsk (_("Rd and Rm should be different in mla"));
9318 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9319 inst
.instruction
|= inst
.operands
[1].reg
;
9320 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9321 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9327 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9328 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9330 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9331 encode_arm_shifter_operand (1);
9334 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9341 top
= (inst
.instruction
& 0x00400000) != 0;
9342 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
9343 _(":lower16: not allowed in this instruction"));
9344 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
9345 _(":upper16: not allowed in this instruction"));
9346 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9347 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
9349 imm
= inst
.relocs
[0].exp
.X_add_number
;
9350 /* The value is in two pieces: 0:11, 16:19. */
9351 inst
.instruction
|= (imm
& 0x00000fff);
9352 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9357 do_vfp_nsyn_mrs (void)
9359 if (inst
.operands
[0].isvec
)
9361 if (inst
.operands
[1].reg
!= 1)
9362 first_error (_("operand 1 must be FPSCR"));
9363 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9364 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9365 do_vfp_nsyn_opcode ("fmstat");
9367 else if (inst
.operands
[1].isvec
)
9368 do_vfp_nsyn_opcode ("fmrx");
9376 do_vfp_nsyn_msr (void)
9378 if (inst
.operands
[0].isvec
)
9379 do_vfp_nsyn_opcode ("fmxr");
9389 unsigned Rt
= inst
.operands
[0].reg
;
9391 if (thumb_mode
&& Rt
== REG_SP
)
9393 inst
.error
= BAD_SP
;
9397 /* MVFR2 is only valid at ARMv8-A. */
9398 if (inst
.operands
[1].reg
== 5)
9399 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9402 /* APSR_ sets isvec. All other refs to PC are illegal. */
9403 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9405 inst
.error
= BAD_PC
;
9409 /* If we get through parsing the register name, we just insert the number
9410 generated into the instruction without further validation. */
9411 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9412 inst
.instruction
|= (Rt
<< 12);
9418 unsigned Rt
= inst
.operands
[1].reg
;
9421 reject_bad_reg (Rt
);
9422 else if (Rt
== REG_PC
)
9424 inst
.error
= BAD_PC
;
9428 /* MVFR2 is only valid for ARMv8-A. */
9429 if (inst
.operands
[0].reg
== 5)
9430 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9433 /* If we get through parsing the register name, we just insert the number
9434 generated into the instruction without further validation. */
9435 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9436 inst
.instruction
|= (Rt
<< 12);
9444 if (do_vfp_nsyn_mrs () == SUCCESS
)
9447 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9448 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9450 if (inst
.operands
[1].isreg
)
9452 br
= inst
.operands
[1].reg
;
9453 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
9454 as_bad (_("bad register for mrs"));
9458 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9459 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9461 _("'APSR', 'CPSR' or 'SPSR' expected"));
9462 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9465 inst
.instruction
|= br
;
9468 /* Two possible forms:
9469 "{C|S}PSR_<field>, Rm",
9470 "{C|S}PSR_f, #expression". */
9475 if (do_vfp_nsyn_msr () == SUCCESS
)
9478 inst
.instruction
|= inst
.operands
[0].imm
;
9479 if (inst
.operands
[1].isreg
)
9480 inst
.instruction
|= inst
.operands
[1].reg
;
9483 inst
.instruction
|= INST_IMMEDIATE
;
9484 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9485 inst
.relocs
[0].pc_rel
= 0;
9492 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9494 if (!inst
.operands
[2].present
)
9495 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9496 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9497 inst
.instruction
|= inst
.operands
[1].reg
;
9498 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9500 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9501 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9502 as_tsktsk (_("Rd and Rm should be different in mul"));
9505 /* Long Multiply Parser
9506 UMULL RdLo, RdHi, Rm, Rs
9507 SMULL RdLo, RdHi, Rm, Rs
9508 UMLAL RdLo, RdHi, Rm, Rs
9509 SMLAL RdLo, RdHi, Rm, Rs. */
9514 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9515 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9516 inst
.instruction
|= inst
.operands
[2].reg
;
9517 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9519 /* rdhi and rdlo must be different. */
9520 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9521 as_tsktsk (_("rdhi and rdlo must be different"));
9523 /* rdhi, rdlo and rm must all be different before armv6. */
9524 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9525 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9526 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9527 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9533 if (inst
.operands
[0].present
9534 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9536 /* Architectural NOP hints are CPSR sets with no bits selected. */
9537 inst
.instruction
&= 0xf0000000;
9538 inst
.instruction
|= 0x0320f000;
9539 if (inst
.operands
[0].present
)
9540 inst
.instruction
|= inst
.operands
[0].imm
;
9544 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9545 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9546 Condition defaults to COND_ALWAYS.
9547 Error if Rd, Rn or Rm are R15. */
9552 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9553 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9554 inst
.instruction
|= inst
.operands
[2].reg
;
9555 if (inst
.operands
[3].present
)
9556 encode_arm_shift (3);
9559 /* ARM V6 PKHTB (Argument Parse). */
9564 if (!inst
.operands
[3].present
)
9566 /* If the shift specifier is omitted, turn the instruction
9567 into pkhbt rd, rm, rn. */
9568 inst
.instruction
&= 0xfff00010;
9569 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9570 inst
.instruction
|= inst
.operands
[1].reg
;
9571 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9575 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9576 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9577 inst
.instruction
|= inst
.operands
[2].reg
;
9578 encode_arm_shift (3);
9582 /* ARMv5TE: Preload-Cache
9583 MP Extensions: Preload for write
9587 Syntactically, like LDR with B=1, W=0, L=1. */
9592 constraint (!inst
.operands
[0].isreg
,
9593 _("'[' expected after PLD mnemonic"));
9594 constraint (inst
.operands
[0].postind
,
9595 _("post-indexed expression used in preload instruction"));
9596 constraint (inst
.operands
[0].writeback
,
9597 _("writeback used in preload instruction"));
9598 constraint (!inst
.operands
[0].preind
,
9599 _("unindexed addressing used in preload instruction"));
9600 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9603 /* ARMv7: PLI <addr_mode> */
9607 constraint (!inst
.operands
[0].isreg
,
9608 _("'[' expected after PLI mnemonic"));
9609 constraint (inst
.operands
[0].postind
,
9610 _("post-indexed expression used in preload instruction"));
9611 constraint (inst
.operands
[0].writeback
,
9612 _("writeback used in preload instruction"));
9613 constraint (!inst
.operands
[0].preind
,
9614 _("unindexed addressing used in preload instruction"));
9615 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9616 inst
.instruction
&= ~PRE_INDEX
;
9622 constraint (inst
.operands
[0].writeback
,
9623 _("push/pop do not support {reglist}^"));
9624 inst
.operands
[1] = inst
.operands
[0];
9625 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9626 inst
.operands
[0].isreg
= 1;
9627 inst
.operands
[0].writeback
= 1;
9628 inst
.operands
[0].reg
= REG_SP
;
9629 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9632 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9633 word at the specified address and the following word
9635 Unconditionally executed.
9636 Error if Rn is R15. */
9641 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9642 if (inst
.operands
[0].writeback
)
9643 inst
.instruction
|= WRITE_BACK
;
9646 /* ARM V6 ssat (argument parse). */
9651 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9652 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9653 inst
.instruction
|= inst
.operands
[2].reg
;
9655 if (inst
.operands
[3].present
)
9656 encode_arm_shift (3);
9659 /* ARM V6 usat (argument parse). */
9664 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9665 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9666 inst
.instruction
|= inst
.operands
[2].reg
;
9668 if (inst
.operands
[3].present
)
9669 encode_arm_shift (3);
9672 /* ARM V6 ssat16 (argument parse). */
9677 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9678 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9679 inst
.instruction
|= inst
.operands
[2].reg
;
9685 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9686 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9687 inst
.instruction
|= inst
.operands
[2].reg
;
9690 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9691 preserving the other bits.
9693 setend <endian_specifier>, where <endian_specifier> is either
9699 if (warn_on_deprecated
9700 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9701 as_tsktsk (_("setend use is deprecated for ARMv8"));
9703 if (inst
.operands
[0].imm
)
9704 inst
.instruction
|= 0x200;
9710 unsigned int Rm
= (inst
.operands
[1].present
9711 ? inst
.operands
[1].reg
9712 : inst
.operands
[0].reg
);
9714 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9715 inst
.instruction
|= Rm
;
9716 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9718 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9719 inst
.instruction
|= SHIFT_BY_REG
;
9720 /* PR 12854: Error on extraneous shifts. */
9721 constraint (inst
.operands
[2].shifted
,
9722 _("extraneous shift as part of operand to shift insn"));
9725 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
9731 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
9732 inst
.relocs
[0].pc_rel
= 0;
9738 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
9739 inst
.relocs
[0].pc_rel
= 0;
9745 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
9746 inst
.relocs
[0].pc_rel
= 0;
9752 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9753 _("selected processor does not support SETPAN instruction"));
9755 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9761 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9762 _("selected processor does not support SETPAN instruction"));
9764 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9767 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9768 SMLAxy{cond} Rd,Rm,Rs,Rn
9769 SMLAWy{cond} Rd,Rm,Rs,Rn
9770 Error if any register is R15. */
9775 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9776 inst
.instruction
|= inst
.operands
[1].reg
;
9777 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9778 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9781 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9782 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9783 Error if any register is R15.
9784 Warning if Rdlo == Rdhi. */
9789 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9790 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9791 inst
.instruction
|= inst
.operands
[2].reg
;
9792 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9794 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9795 as_tsktsk (_("rdhi and rdlo must be different"));
9798 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9799 SMULxy{cond} Rd,Rm,Rs
9800 Error if any register is R15. */
9805 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9806 inst
.instruction
|= inst
.operands
[1].reg
;
9807 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9810 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9811 the same for both ARM and Thumb-2. */
9818 if (inst
.operands
[0].present
)
9820 reg
= inst
.operands
[0].reg
;
9821 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9826 inst
.instruction
|= reg
<< 16;
9827 inst
.instruction
|= inst
.operands
[1].imm
;
9828 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9829 inst
.instruction
|= WRITE_BACK
;
9832 /* ARM V6 strex (argument parse). */
9837 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9838 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9839 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9840 || inst
.operands
[2].negative
9841 /* See comment in do_ldrex(). */
9842 || (inst
.operands
[2].reg
== REG_PC
),
9845 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9846 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9848 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9849 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9850 _("offset must be zero in ARM encoding"));
9852 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9853 inst
.instruction
|= inst
.operands
[1].reg
;
9854 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9855 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9861 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9862 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9863 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9864 || inst
.operands
[2].negative
,
9867 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9868 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9876 constraint (inst
.operands
[1].reg
% 2 != 0,
9877 _("even register required"));
9878 constraint (inst
.operands
[2].present
9879 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9880 _("can only store two consecutive registers"));
9881 /* If op 2 were present and equal to PC, this function wouldn't
9882 have been called in the first place. */
9883 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9885 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9886 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9887 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9890 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9891 inst
.instruction
|= inst
.operands
[1].reg
;
9892 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9899 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9900 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9908 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9909 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9914 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9915 extends it to 32-bits, and adds the result to a value in another
9916 register. You can specify a rotation by 0, 8, 16, or 24 bits
9917 before extracting the 16-bit value.
9918 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9919 Condition defaults to COND_ALWAYS.
9920 Error if any register uses R15. */
9925 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9926 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9927 inst
.instruction
|= inst
.operands
[2].reg
;
9928 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9933 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9934 Condition defaults to COND_ALWAYS.
9935 Error if any register uses R15. */
9940 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9941 inst
.instruction
|= inst
.operands
[1].reg
;
9942 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9945 /* VFP instructions. In a logical order: SP variant first, monad
9946 before dyad, arithmetic then move then load/store. */
9949 do_vfp_sp_monadic (void)
9951 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9952 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9956 do_vfp_sp_dyadic (void)
9958 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9959 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9960 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9964 do_vfp_sp_compare_z (void)
9966 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9970 do_vfp_dp_sp_cvt (void)
9972 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9973 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9977 do_vfp_sp_dp_cvt (void)
9979 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9980 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9984 do_vfp_reg_from_sp (void)
9986 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9987 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9991 do_vfp_reg2_from_sp2 (void)
9993 constraint (inst
.operands
[2].imm
!= 2,
9994 _("only two consecutive VFP SP registers allowed here"));
9995 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9996 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9997 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10001 do_vfp_sp_from_reg (void)
10003 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
10004 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10008 do_vfp_sp2_from_reg2 (void)
10010 constraint (inst
.operands
[0].imm
!= 2,
10011 _("only two consecutive VFP SP registers allowed here"));
10012 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
10013 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10014 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10018 do_vfp_sp_ldst (void)
10020 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10021 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10025 do_vfp_dp_ldst (void)
10027 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10028 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10033 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
10035 if (inst
.operands
[0].writeback
)
10036 inst
.instruction
|= WRITE_BACK
;
10038 constraint (ldstm_type
!= VFP_LDSTMIA
,
10039 _("this addressing mode requires base-register writeback"));
10040 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10041 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
10042 inst
.instruction
|= inst
.operands
[1].imm
;
10046 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
10050 if (inst
.operands
[0].writeback
)
10051 inst
.instruction
|= WRITE_BACK
;
10053 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
10054 _("this addressing mode requires base-register writeback"));
10056 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10057 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10059 count
= inst
.operands
[1].imm
<< 1;
10060 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
10063 inst
.instruction
|= count
;
10067 do_vfp_sp_ldstmia (void)
10069 vfp_sp_ldstm (VFP_LDSTMIA
);
10073 do_vfp_sp_ldstmdb (void)
10075 vfp_sp_ldstm (VFP_LDSTMDB
);
10079 do_vfp_dp_ldstmia (void)
10081 vfp_dp_ldstm (VFP_LDSTMIA
);
10085 do_vfp_dp_ldstmdb (void)
10087 vfp_dp_ldstm (VFP_LDSTMDB
);
10091 do_vfp_xp_ldstmia (void)
10093 vfp_dp_ldstm (VFP_LDSTMIAX
);
10097 do_vfp_xp_ldstmdb (void)
10099 vfp_dp_ldstm (VFP_LDSTMDBX
);
10103 do_vfp_dp_rd_rm (void)
10105 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10106 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10110 do_vfp_dp_rn_rd (void)
10112 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
10113 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10117 do_vfp_dp_rd_rn (void)
10119 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10120 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10124 do_vfp_dp_rd_rn_rm (void)
10126 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10127 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10128 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10132 do_vfp_dp_rd (void)
10134 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10138 do_vfp_dp_rm_rd_rn (void)
10140 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10141 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10142 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10145 /* VFPv3 instructions. */
10147 do_vfp_sp_const (void)
10149 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10150 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10151 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10155 do_vfp_dp_const (void)
10157 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10158 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10159 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10163 vfp_conv (int srcsize
)
10165 int immbits
= srcsize
- inst
.operands
[1].imm
;
10167 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10169 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10170 i.e. immbits must be in range 0 - 16. */
10171 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10174 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10176 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10177 i.e. immbits must be in range 0 - 31. */
10178 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10182 inst
.instruction
|= (immbits
& 1) << 5;
10183 inst
.instruction
|= (immbits
>> 1);
10187 do_vfp_sp_conv_16 (void)
10189 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10194 do_vfp_dp_conv_16 (void)
10196 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10201 do_vfp_sp_conv_32 (void)
10203 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10208 do_vfp_dp_conv_32 (void)
10210 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10214 /* FPA instructions. Also in a logical order. */
10219 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10220 inst
.instruction
|= inst
.operands
[1].reg
;
10224 do_fpa_ldmstm (void)
10226 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10227 switch (inst
.operands
[1].imm
)
10229 case 1: inst
.instruction
|= CP_T_X
; break;
10230 case 2: inst
.instruction
|= CP_T_Y
; break;
10231 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
10236 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
10238 /* The instruction specified "ea" or "fd", so we can only accept
10239 [Rn]{!}. The instruction does not really support stacking or
10240 unstacking, so we have to emulate these by setting appropriate
10241 bits and offsets. */
10242 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10243 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10244 _("this instruction does not support indexing"));
10246 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10247 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10249 if (!(inst
.instruction
& INDEX_UP
))
10250 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
10252 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10254 inst
.operands
[2].preind
= 0;
10255 inst
.operands
[2].postind
= 1;
10259 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10262 /* iWMMXt instructions: strictly in alphabetical order. */
10265 do_iwmmxt_tandorc (void)
10267 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10271 do_iwmmxt_textrc (void)
10273 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10274 inst
.instruction
|= inst
.operands
[1].imm
;
10278 do_iwmmxt_textrm (void)
10280 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10281 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10282 inst
.instruction
|= inst
.operands
[2].imm
;
10286 do_iwmmxt_tinsr (void)
10288 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10289 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10290 inst
.instruction
|= inst
.operands
[2].imm
;
10294 do_iwmmxt_tmia (void)
10296 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10297 inst
.instruction
|= inst
.operands
[1].reg
;
10298 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10302 do_iwmmxt_waligni (void)
10304 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10305 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10306 inst
.instruction
|= inst
.operands
[2].reg
;
10307 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10311 do_iwmmxt_wmerge (void)
10313 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10314 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10315 inst
.instruction
|= inst
.operands
[2].reg
;
10316 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10320 do_iwmmxt_wmov (void)
10322 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10323 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10324 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10325 inst
.instruction
|= inst
.operands
[1].reg
;
10329 do_iwmmxt_wldstbh (void)
10332 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10334 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10336 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10337 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10341 do_iwmmxt_wldstw (void)
10343 /* RIWR_RIWC clears .isreg for a control register. */
10344 if (!inst
.operands
[0].isreg
)
10346 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10347 inst
.instruction
|= 0xf0000000;
10350 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10351 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10355 do_iwmmxt_wldstd (void)
10357 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10358 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10359 && inst
.operands
[1].immisreg
)
10361 inst
.instruction
&= ~0x1a000ff;
10362 inst
.instruction
|= (0xfU
<< 28);
10363 if (inst
.operands
[1].preind
)
10364 inst
.instruction
|= PRE_INDEX
;
10365 if (!inst
.operands
[1].negative
)
10366 inst
.instruction
|= INDEX_UP
;
10367 if (inst
.operands
[1].writeback
)
10368 inst
.instruction
|= WRITE_BACK
;
10369 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10370 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10371 inst
.instruction
|= inst
.operands
[1].imm
;
10374 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10378 do_iwmmxt_wshufh (void)
10380 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10381 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10382 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10383 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10387 do_iwmmxt_wzero (void)
10389 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10390 inst
.instruction
|= inst
.operands
[0].reg
;
10391 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10392 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10396 do_iwmmxt_wrwrwr_or_imm5 (void)
10398 if (inst
.operands
[2].isreg
)
10401 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10402 _("immediate operand requires iWMMXt2"));
10404 if (inst
.operands
[2].imm
== 0)
10406 switch ((inst
.instruction
>> 20) & 0xf)
10412 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10413 inst
.operands
[2].imm
= 16;
10414 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10420 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10421 inst
.operands
[2].imm
= 32;
10422 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10429 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10431 wrn
= (inst
.instruction
>> 16) & 0xf;
10432 inst
.instruction
&= 0xff0fff0f;
10433 inst
.instruction
|= wrn
;
10434 /* Bail out here; the instruction is now assembled. */
10439 /* Map 32 -> 0, etc. */
10440 inst
.operands
[2].imm
&= 0x1f;
10441 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10445 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10446 operations first, then control, shift, and load/store. */
10448 /* Insns like "foo X,Y,Z". */
10451 do_mav_triple (void)
10453 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10454 inst
.instruction
|= inst
.operands
[1].reg
;
10455 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10458 /* Insns like "foo W,X,Y,Z".
10459 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10464 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10465 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10466 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10467 inst
.instruction
|= inst
.operands
[3].reg
;
10470 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10472 do_mav_dspsc (void)
10474 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10477 /* Maverick shift immediate instructions.
10478 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10479 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10482 do_mav_shift (void)
10484 int imm
= inst
.operands
[2].imm
;
10486 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10487 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10489 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10490 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10491 Bit 4 should be 0. */
10492 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10494 inst
.instruction
|= imm
;
10497 /* XScale instructions. Also sorted arithmetic before move. */
10499 /* Xscale multiply-accumulate (argument parse)
10502 MIAxycc acc0,Rm,Rs. */
10507 inst
.instruction
|= inst
.operands
[1].reg
;
10508 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10511 /* Xscale move-accumulator-register (argument parse)
10513 MARcc acc0,RdLo,RdHi. */
10518 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10519 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10522 /* Xscale move-register-accumulator (argument parse)
10524 MRAcc RdLo,RdHi,acc0. */
10529 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10530 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10531 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10534 /* Encoding functions relevant only to Thumb. */
10536 /* inst.operands[i] is a shifted-register operand; encode
10537 it into inst.instruction in the format used by Thumb32. */
10540 encode_thumb32_shifted_operand (int i
)
10542 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10543 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10545 constraint (inst
.operands
[i
].immisreg
,
10546 _("shift by register not allowed in thumb mode"));
10547 inst
.instruction
|= inst
.operands
[i
].reg
;
10548 if (shift
== SHIFT_RRX
)
10549 inst
.instruction
|= SHIFT_ROR
<< 4;
10552 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10553 _("expression too complex"));
10555 constraint (value
> 32
10556 || (value
== 32 && (shift
== SHIFT_LSL
10557 || shift
== SHIFT_ROR
)),
10558 _("shift expression is too large"));
10562 else if (value
== 32)
10565 inst
.instruction
|= shift
<< 4;
10566 inst
.instruction
|= (value
& 0x1c) << 10;
10567 inst
.instruction
|= (value
& 0x03) << 6;
10572 /* inst.operands[i] was set up by parse_address. Encode it into a
10573 Thumb32 format load or store instruction. Reject forms that cannot
10574 be used with such instructions. If is_t is true, reject forms that
10575 cannot be used with a T instruction; if is_d is true, reject forms
10576 that cannot be used with a D instruction. If it is a store insn,
10577 reject PC in Rn. */
10580 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10582 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10584 constraint (!inst
.operands
[i
].isreg
,
10585 _("Instruction does not support =N addresses"));
10587 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10588 if (inst
.operands
[i
].immisreg
)
10590 constraint (is_pc
, BAD_PC_ADDRESSING
);
10591 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10592 constraint (inst
.operands
[i
].negative
,
10593 _("Thumb does not support negative register indexing"));
10594 constraint (inst
.operands
[i
].postind
,
10595 _("Thumb does not support register post-indexing"));
10596 constraint (inst
.operands
[i
].writeback
,
10597 _("Thumb does not support register indexing with writeback"));
10598 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10599 _("Thumb supports only LSL in shifted register indexing"));
10601 inst
.instruction
|= inst
.operands
[i
].imm
;
10602 if (inst
.operands
[i
].shifted
)
10604 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10605 _("expression too complex"));
10606 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10607 || inst
.relocs
[0].exp
.X_add_number
> 3,
10608 _("shift out of range"));
10609 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10611 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10613 else if (inst
.operands
[i
].preind
)
10615 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10616 constraint (is_t
&& inst
.operands
[i
].writeback
,
10617 _("cannot use writeback with this instruction"));
10618 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10619 BAD_PC_ADDRESSING
);
10623 inst
.instruction
|= 0x01000000;
10624 if (inst
.operands
[i
].writeback
)
10625 inst
.instruction
|= 0x00200000;
10629 inst
.instruction
|= 0x00000c00;
10630 if (inst
.operands
[i
].writeback
)
10631 inst
.instruction
|= 0x00000100;
10633 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10635 else if (inst
.operands
[i
].postind
)
10637 gas_assert (inst
.operands
[i
].writeback
);
10638 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10639 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10642 inst
.instruction
|= 0x00200000;
10644 inst
.instruction
|= 0x00000900;
10645 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10647 else /* unindexed - only for coprocessor */
10648 inst
.error
= _("instruction does not accept unindexed addressing");
10651 /* Table of Thumb instructions which exist in both 16- and 32-bit
10652 encodings (the latter only in post-V6T2 cores). The index is the
10653 value used in the insns table below. When there is more than one
10654 possible 16-bit encoding for the instruction, this table always
10656 Also contains several pseudo-instructions used during relaxation. */
10657 #define T16_32_TAB \
10658 X(_adc, 4140, eb400000), \
10659 X(_adcs, 4140, eb500000), \
10660 X(_add, 1c00, eb000000), \
10661 X(_adds, 1c00, eb100000), \
10662 X(_addi, 0000, f1000000), \
10663 X(_addis, 0000, f1100000), \
10664 X(_add_pc,000f, f20f0000), \
10665 X(_add_sp,000d, f10d0000), \
10666 X(_adr, 000f, f20f0000), \
10667 X(_and, 4000, ea000000), \
10668 X(_ands, 4000, ea100000), \
10669 X(_asr, 1000, fa40f000), \
10670 X(_asrs, 1000, fa50f000), \
10671 X(_b, e000, f000b000), \
10672 X(_bcond, d000, f0008000), \
10673 X(_bf, 0000, f040e001), \
10674 X(_bfcsel,0000, f000e001), \
10675 X(_bfx, 0000, f060e001), \
10676 X(_bfl, 0000, f000c001), \
10677 X(_bflx, 0000, f070e001), \
10678 X(_bic, 4380, ea200000), \
10679 X(_bics, 4380, ea300000), \
10680 X(_cmn, 42c0, eb100f00), \
10681 X(_cmp, 2800, ebb00f00), \
10682 X(_cpsie, b660, f3af8400), \
10683 X(_cpsid, b670, f3af8600), \
10684 X(_cpy, 4600, ea4f0000), \
10685 X(_dec_sp,80dd, f1ad0d00), \
10686 X(_dls, 0000, f040e001), \
10687 X(_eor, 4040, ea800000), \
10688 X(_eors, 4040, ea900000), \
10689 X(_inc_sp,00dd, f10d0d00), \
10690 X(_ldmia, c800, e8900000), \
10691 X(_ldr, 6800, f8500000), \
10692 X(_ldrb, 7800, f8100000), \
10693 X(_ldrh, 8800, f8300000), \
10694 X(_ldrsb, 5600, f9100000), \
10695 X(_ldrsh, 5e00, f9300000), \
10696 X(_ldr_pc,4800, f85f0000), \
10697 X(_ldr_pc2,4800, f85f0000), \
10698 X(_ldr_sp,9800, f85d0000), \
10699 X(_le, 0000, f00fc001), \
10700 X(_lsl, 0000, fa00f000), \
10701 X(_lsls, 0000, fa10f000), \
10702 X(_lsr, 0800, fa20f000), \
10703 X(_lsrs, 0800, fa30f000), \
10704 X(_mov, 2000, ea4f0000), \
10705 X(_movs, 2000, ea5f0000), \
10706 X(_mul, 4340, fb00f000), \
10707 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10708 X(_mvn, 43c0, ea6f0000), \
10709 X(_mvns, 43c0, ea7f0000), \
10710 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10711 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10712 X(_orr, 4300, ea400000), \
10713 X(_orrs, 4300, ea500000), \
10714 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10715 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10716 X(_rev, ba00, fa90f080), \
10717 X(_rev16, ba40, fa90f090), \
10718 X(_revsh, bac0, fa90f0b0), \
10719 X(_ror, 41c0, fa60f000), \
10720 X(_rors, 41c0, fa70f000), \
10721 X(_sbc, 4180, eb600000), \
10722 X(_sbcs, 4180, eb700000), \
10723 X(_stmia, c000, e8800000), \
10724 X(_str, 6000, f8400000), \
10725 X(_strb, 7000, f8000000), \
10726 X(_strh, 8000, f8200000), \
10727 X(_str_sp,9000, f84d0000), \
10728 X(_sub, 1e00, eba00000), \
10729 X(_subs, 1e00, ebb00000), \
10730 X(_subi, 8000, f1a00000), \
10731 X(_subis, 8000, f1b00000), \
10732 X(_sxtb, b240, fa4ff080), \
10733 X(_sxth, b200, fa0ff080), \
10734 X(_tst, 4200, ea100f00), \
10735 X(_uxtb, b2c0, fa5ff080), \
10736 X(_uxth, b280, fa1ff080), \
10737 X(_nop, bf00, f3af8000), \
10738 X(_yield, bf10, f3af8001), \
10739 X(_wfe, bf20, f3af8002), \
10740 X(_wfi, bf30, f3af8003), \
10741 X(_wls, 0000, f040c001), \
10742 X(_sev, bf40, f3af8004), \
10743 X(_sevl, bf50, f3af8005), \
10744 X(_udf, de00, f7f0a000)
10746 /* To catch errors in encoding functions, the codes are all offset by
10747 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10748 as 16-bit instructions. */
10749 #define X(a,b,c) T_MNEM##a
10750 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10753 #define X(a,b,c) 0x##b
10754 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10755 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10758 #define X(a,b,c) 0x##c
10759 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10760 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10761 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10765 /* Thumb instruction encoders, in alphabetical order. */
10767 /* ADDW or SUBW. */
10770 do_t_add_sub_w (void)
10774 Rd
= inst
.operands
[0].reg
;
10775 Rn
= inst
.operands
[1].reg
;
10777 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10778 is the SP-{plus,minus}-immediate form of the instruction. */
10780 constraint (Rd
== REG_PC
, BAD_PC
);
10782 reject_bad_reg (Rd
);
10784 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10785 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10788 /* Parse an add or subtract instruction. We get here with inst.instruction
10789 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10792 do_t_add_sub (void)
10796 Rd
= inst
.operands
[0].reg
;
10797 Rs
= (inst
.operands
[1].present
10798 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10799 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10802 set_it_insn_type_last ();
10804 if (unified_syntax
)
10807 bfd_boolean narrow
;
10810 flags
= (inst
.instruction
== T_MNEM_adds
10811 || inst
.instruction
== T_MNEM_subs
);
10813 narrow
= !in_it_block ();
10815 narrow
= in_it_block ();
10816 if (!inst
.operands
[2].isreg
)
10820 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10821 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10823 add
= (inst
.instruction
== T_MNEM_add
10824 || inst
.instruction
== T_MNEM_adds
);
10826 if (inst
.size_req
!= 4)
10828 /* Attempt to use a narrow opcode, with relaxation if
10830 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10831 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10832 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10833 opcode
= T_MNEM_add_sp
;
10834 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10835 opcode
= T_MNEM_add_pc
;
10836 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10839 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10841 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10845 inst
.instruction
= THUMB_OP16(opcode
);
10846 inst
.instruction
|= (Rd
<< 4) | Rs
;
10847 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10848 || (inst
.relocs
[0].type
10849 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
10851 if (inst
.size_req
== 2)
10852 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10854 inst
.relax
= opcode
;
10858 constraint (inst
.size_req
== 2, BAD_HIREG
);
10860 if (inst
.size_req
== 4
10861 || (inst
.size_req
!= 2 && !opcode
))
10863 constraint ((inst
.relocs
[0].type
10864 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
10865 && (inst
.relocs
[0].type
10866 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
10867 THUMB1_RELOC_ONLY
);
10870 constraint (add
, BAD_PC
);
10871 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10872 _("only SUBS PC, LR, #const allowed"));
10873 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10874 _("expression too complex"));
10875 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10876 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
10877 _("immediate value out of range"));
10878 inst
.instruction
= T2_SUBS_PC_LR
10879 | inst
.relocs
[0].exp
.X_add_number
;
10880 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10883 else if (Rs
== REG_PC
)
10885 /* Always use addw/subw. */
10886 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10887 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10891 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10892 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10895 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10897 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10899 inst
.instruction
|= Rd
<< 8;
10900 inst
.instruction
|= Rs
<< 16;
10905 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10906 unsigned int shift
= inst
.operands
[2].shift_kind
;
10908 Rn
= inst
.operands
[2].reg
;
10909 /* See if we can do this with a 16-bit instruction. */
10910 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10912 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10917 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10918 || inst
.instruction
== T_MNEM_add
)
10920 : T_OPCODE_SUB_R3
);
10921 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10925 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10927 /* Thumb-1 cores (except v6-M) require at least one high
10928 register in a narrow non flag setting add. */
10929 if (Rd
> 7 || Rn
> 7
10930 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10931 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10938 inst
.instruction
= T_OPCODE_ADD_HI
;
10939 inst
.instruction
|= (Rd
& 8) << 4;
10940 inst
.instruction
|= (Rd
& 7);
10941 inst
.instruction
|= Rn
<< 3;
10947 constraint (Rd
== REG_PC
, BAD_PC
);
10948 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10949 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10950 constraint (Rs
== REG_PC
, BAD_PC
);
10951 reject_bad_reg (Rn
);
10953 /* If we get here, it can't be done in 16 bits. */
10954 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10955 _("shift must be constant"));
10956 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10957 inst
.instruction
|= Rd
<< 8;
10958 inst
.instruction
|= Rs
<< 16;
10959 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10960 _("shift value over 3 not allowed in thumb mode"));
10961 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10962 _("only LSL shift allowed in thumb mode"));
10963 encode_thumb32_shifted_operand (2);
10968 constraint (inst
.instruction
== T_MNEM_adds
10969 || inst
.instruction
== T_MNEM_subs
,
10972 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10974 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10975 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10978 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10979 ? 0x0000 : 0x8000);
10980 inst
.instruction
|= (Rd
<< 4) | Rs
;
10981 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10985 Rn
= inst
.operands
[2].reg
;
10986 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10988 /* We now have Rd, Rs, and Rn set to registers. */
10989 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10991 /* Can't do this for SUB. */
10992 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10993 inst
.instruction
= T_OPCODE_ADD_HI
;
10994 inst
.instruction
|= (Rd
& 8) << 4;
10995 inst
.instruction
|= (Rd
& 7);
10997 inst
.instruction
|= Rn
<< 3;
10999 inst
.instruction
|= Rs
<< 3;
11001 constraint (1, _("dest must overlap one source register"));
11005 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11006 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
11007 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11017 Rd
= inst
.operands
[0].reg
;
11018 reject_bad_reg (Rd
);
11020 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
11022 /* Defer to section relaxation. */
11023 inst
.relax
= inst
.instruction
;
11024 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11025 inst
.instruction
|= Rd
<< 4;
11027 else if (unified_syntax
&& inst
.size_req
!= 2)
11029 /* Generate a 32-bit opcode. */
11030 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11031 inst
.instruction
|= Rd
<< 8;
11032 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
11033 inst
.relocs
[0].pc_rel
= 1;
11037 /* Generate a 16-bit opcode. */
11038 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11039 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11040 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
11041 inst
.relocs
[0].pc_rel
= 1;
11042 inst
.instruction
|= Rd
<< 4;
11045 if (inst
.relocs
[0].exp
.X_op
== O_symbol
11046 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11047 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11048 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11049 inst
.relocs
[0].exp
.X_add_number
+= 1;
11052 /* Arithmetic instructions for which there is just one 16-bit
11053 instruction encoding, and it allows only two low registers.
11054 For maximal compatibility with ARM syntax, we allow three register
11055 operands even when Thumb-32 instructions are not available, as long
11056 as the first two are identical. For instance, both "sbc r0,r1" and
11057 "sbc r0,r0,r1" are allowed. */
11063 Rd
= inst
.operands
[0].reg
;
11064 Rs
= (inst
.operands
[1].present
11065 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11066 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11067 Rn
= inst
.operands
[2].reg
;
11069 reject_bad_reg (Rd
);
11070 reject_bad_reg (Rs
);
11071 if (inst
.operands
[2].isreg
)
11072 reject_bad_reg (Rn
);
11074 if (unified_syntax
)
11076 if (!inst
.operands
[2].isreg
)
11078 /* For an immediate, we always generate a 32-bit opcode;
11079 section relaxation will shrink it later if possible. */
11080 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11081 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11082 inst
.instruction
|= Rd
<< 8;
11083 inst
.instruction
|= Rs
<< 16;
11084 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11088 bfd_boolean narrow
;
11090 /* See if we can do this with a 16-bit instruction. */
11091 if (THUMB_SETS_FLAGS (inst
.instruction
))
11092 narrow
= !in_it_block ();
11094 narrow
= in_it_block ();
11096 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11098 if (inst
.operands
[2].shifted
)
11100 if (inst
.size_req
== 4)
11106 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11107 inst
.instruction
|= Rd
;
11108 inst
.instruction
|= Rn
<< 3;
11112 /* If we get here, it can't be done in 16 bits. */
11113 constraint (inst
.operands
[2].shifted
11114 && inst
.operands
[2].immisreg
,
11115 _("shift must be constant"));
11116 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11117 inst
.instruction
|= Rd
<< 8;
11118 inst
.instruction
|= Rs
<< 16;
11119 encode_thumb32_shifted_operand (2);
11124 /* On its face this is a lie - the instruction does set the
11125 flags. However, the only supported mnemonic in this mode
11126 says it doesn't. */
11127 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11129 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11130 _("unshifted register required"));
11131 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11132 constraint (Rd
!= Rs
,
11133 _("dest and source1 must be the same register"));
11135 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11136 inst
.instruction
|= Rd
;
11137 inst
.instruction
|= Rn
<< 3;
11141 /* Similarly, but for instructions where the arithmetic operation is
11142 commutative, so we can allow either of them to be different from
11143 the destination operand in a 16-bit instruction. For instance, all
11144 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11151 Rd
= inst
.operands
[0].reg
;
11152 Rs
= (inst
.operands
[1].present
11153 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11154 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11155 Rn
= inst
.operands
[2].reg
;
11157 reject_bad_reg (Rd
);
11158 reject_bad_reg (Rs
);
11159 if (inst
.operands
[2].isreg
)
11160 reject_bad_reg (Rn
);
11162 if (unified_syntax
)
11164 if (!inst
.operands
[2].isreg
)
11166 /* For an immediate, we always generate a 32-bit opcode;
11167 section relaxation will shrink it later if possible. */
11168 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11169 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11170 inst
.instruction
|= Rd
<< 8;
11171 inst
.instruction
|= Rs
<< 16;
11172 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11176 bfd_boolean narrow
;
11178 /* See if we can do this with a 16-bit instruction. */
11179 if (THUMB_SETS_FLAGS (inst
.instruction
))
11180 narrow
= !in_it_block ();
11182 narrow
= in_it_block ();
11184 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11186 if (inst
.operands
[2].shifted
)
11188 if (inst
.size_req
== 4)
11195 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11196 inst
.instruction
|= Rd
;
11197 inst
.instruction
|= Rn
<< 3;
11202 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11203 inst
.instruction
|= Rd
;
11204 inst
.instruction
|= Rs
<< 3;
11209 /* If we get here, it can't be done in 16 bits. */
11210 constraint (inst
.operands
[2].shifted
11211 && inst
.operands
[2].immisreg
,
11212 _("shift must be constant"));
11213 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11214 inst
.instruction
|= Rd
<< 8;
11215 inst
.instruction
|= Rs
<< 16;
11216 encode_thumb32_shifted_operand (2);
11221 /* On its face this is a lie - the instruction does set the
11222 flags. However, the only supported mnemonic in this mode
11223 says it doesn't. */
11224 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11226 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11227 _("unshifted register required"));
11228 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11230 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11231 inst
.instruction
|= Rd
;
11234 inst
.instruction
|= Rn
<< 3;
11236 inst
.instruction
|= Rs
<< 3;
11238 constraint (1, _("dest must overlap one source register"));
11246 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
11247 constraint (msb
> 32, _("bit-field extends past end of register"));
11248 /* The instruction encoding stores the LSB and MSB,
11249 not the LSB and width. */
11250 Rd
= inst
.operands
[0].reg
;
11251 reject_bad_reg (Rd
);
11252 inst
.instruction
|= Rd
<< 8;
11253 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
11254 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11255 inst
.instruction
|= msb
- 1;
11264 Rd
= inst
.operands
[0].reg
;
11265 reject_bad_reg (Rd
);
11267 /* #0 in second position is alternative syntax for bfc, which is
11268 the same instruction but with REG_PC in the Rm field. */
11269 if (!inst
.operands
[1].isreg
)
11273 Rn
= inst
.operands
[1].reg
;
11274 reject_bad_reg (Rn
);
11277 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11278 constraint (msb
> 32, _("bit-field extends past end of register"));
11279 /* The instruction encoding stores the LSB and MSB,
11280 not the LSB and width. */
11281 inst
.instruction
|= Rd
<< 8;
11282 inst
.instruction
|= Rn
<< 16;
11283 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11284 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11285 inst
.instruction
|= msb
- 1;
11293 Rd
= inst
.operands
[0].reg
;
11294 Rn
= inst
.operands
[1].reg
;
11296 reject_bad_reg (Rd
);
11297 reject_bad_reg (Rn
);
11299 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11300 _("bit-field extends past end of register"));
11301 inst
.instruction
|= Rd
<< 8;
11302 inst
.instruction
|= Rn
<< 16;
11303 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11304 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11305 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11308 /* ARM V5 Thumb BLX (argument parse)
11309 BLX <target_addr> which is BLX(1)
11310 BLX <Rm> which is BLX(2)
11311 Unfortunately, there are two different opcodes for this mnemonic.
11312 So, the insns[].value is not used, and the code here zaps values
11313 into inst.instruction.
11315 ??? How to take advantage of the additional two bits of displacement
11316 available in Thumb32 mode? Need new relocation? */
11321 set_it_insn_type_last ();
11323 if (inst
.operands
[0].isreg
)
11325 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11326 /* We have a register, so this is BLX(2). */
11327 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11331 /* No register. This must be BLX(1). */
11332 inst
.instruction
= 0xf000e800;
11333 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11342 bfd_reloc_code_real_type reloc
;
11345 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
11347 if (in_it_block ())
11349 /* Conditional branches inside IT blocks are encoded as unconditional
11351 cond
= COND_ALWAYS
;
11356 if (cond
!= COND_ALWAYS
)
11357 opcode
= T_MNEM_bcond
;
11359 opcode
= inst
.instruction
;
11362 && (inst
.size_req
== 4
11363 || (inst
.size_req
!= 2
11364 && (inst
.operands
[0].hasreloc
11365 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
11367 inst
.instruction
= THUMB_OP32(opcode
);
11368 if (cond
== COND_ALWAYS
)
11369 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11372 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11373 _("selected architecture does not support "
11374 "wide conditional branch instruction"));
11376 gas_assert (cond
!= 0xF);
11377 inst
.instruction
|= cond
<< 22;
11378 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11383 inst
.instruction
= THUMB_OP16(opcode
);
11384 if (cond
== COND_ALWAYS
)
11385 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11388 inst
.instruction
|= cond
<< 8;
11389 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11391 /* Allow section relaxation. */
11392 if (unified_syntax
&& inst
.size_req
!= 2)
11393 inst
.relax
= opcode
;
11395 inst
.relocs
[0].type
= reloc
;
11396 inst
.relocs
[0].pc_rel
= 1;
11399 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11400 between the two is the maximum immediate allowed - which is passed in
11403 do_t_bkpt_hlt1 (int range
)
11405 constraint (inst
.cond
!= COND_ALWAYS
,
11406 _("instruction is always unconditional"));
11407 if (inst
.operands
[0].present
)
11409 constraint (inst
.operands
[0].imm
> range
,
11410 _("immediate value out of range"));
11411 inst
.instruction
|= inst
.operands
[0].imm
;
11414 set_it_insn_type (NEUTRAL_IT_INSN
);
11420 do_t_bkpt_hlt1 (63);
11426 do_t_bkpt_hlt1 (255);
11430 do_t_branch23 (void)
11432 set_it_insn_type_last ();
11433 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11435 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11436 this file. We used to simply ignore the PLT reloc type here --
11437 the branch encoding is now needed to deal with TLSCALL relocs.
11438 So if we see a PLT reloc now, put it back to how it used to be to
11439 keep the preexisting behaviour. */
11440 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
11441 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11443 #if defined(OBJ_COFF)
11444 /* If the destination of the branch is a defined symbol which does not have
11445 the THUMB_FUNC attribute, then we must be calling a function which has
11446 the (interfacearm) attribute. We look for the Thumb entry point to that
11447 function and change the branch to refer to that function instead. */
11448 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
11449 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11450 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11451 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11452 inst
.relocs
[0].exp
.X_add_symbol
11453 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
11460 set_it_insn_type_last ();
11461 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11462 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11463 should cause the alignment to be checked once it is known. This is
11464 because BX PC only works if the instruction is word aligned. */
11472 set_it_insn_type_last ();
11473 Rm
= inst
.operands
[0].reg
;
11474 reject_bad_reg (Rm
);
11475 inst
.instruction
|= Rm
<< 16;
11484 Rd
= inst
.operands
[0].reg
;
11485 Rm
= inst
.operands
[1].reg
;
11487 reject_bad_reg (Rd
);
11488 reject_bad_reg (Rm
);
11490 inst
.instruction
|= Rd
<< 8;
11491 inst
.instruction
|= Rm
<< 16;
11492 inst
.instruction
|= Rm
;
11498 set_it_insn_type (OUTSIDE_IT_INSN
);
11504 set_it_insn_type (OUTSIDE_IT_INSN
);
11505 inst
.instruction
|= inst
.operands
[0].imm
;
11511 set_it_insn_type (OUTSIDE_IT_INSN
);
11513 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11514 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11516 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11517 inst
.instruction
= 0xf3af8000;
11518 inst
.instruction
|= imod
<< 9;
11519 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11520 if (inst
.operands
[1].present
)
11521 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11525 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11526 && (inst
.operands
[0].imm
& 4),
11527 _("selected processor does not support 'A' form "
11528 "of this instruction"));
11529 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11530 _("Thumb does not support the 2-argument "
11531 "form of this instruction"));
11532 inst
.instruction
|= inst
.operands
[0].imm
;
11536 /* THUMB CPY instruction (argument parse). */
11541 if (inst
.size_req
== 4)
11543 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11544 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11545 inst
.instruction
|= inst
.operands
[1].reg
;
11549 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11550 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11551 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11558 set_it_insn_type (OUTSIDE_IT_INSN
);
11559 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11560 inst
.instruction
|= inst
.operands
[0].reg
;
11561 inst
.relocs
[0].pc_rel
= 1;
11562 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11568 inst
.instruction
|= inst
.operands
[0].imm
;
11574 unsigned Rd
, Rn
, Rm
;
11576 Rd
= inst
.operands
[0].reg
;
11577 Rn
= (inst
.operands
[1].present
11578 ? inst
.operands
[1].reg
: Rd
);
11579 Rm
= inst
.operands
[2].reg
;
11581 reject_bad_reg (Rd
);
11582 reject_bad_reg (Rn
);
11583 reject_bad_reg (Rm
);
11585 inst
.instruction
|= Rd
<< 8;
11586 inst
.instruction
|= Rn
<< 16;
11587 inst
.instruction
|= Rm
;
11593 if (unified_syntax
&& inst
.size_req
== 4)
11594 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11596 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11602 unsigned int cond
= inst
.operands
[0].imm
;
11604 set_it_insn_type (IT_INSN
);
11605 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11607 now_it
.warn_deprecated
= FALSE
;
11609 /* If the condition is a negative condition, invert the mask. */
11610 if ((cond
& 0x1) == 0x0)
11612 unsigned int mask
= inst
.instruction
& 0x000f;
11614 if ((mask
& 0x7) == 0)
11616 /* No conversion needed. */
11617 now_it
.block_length
= 1;
11619 else if ((mask
& 0x3) == 0)
11622 now_it
.block_length
= 2;
11624 else if ((mask
& 0x1) == 0)
11627 now_it
.block_length
= 3;
11632 now_it
.block_length
= 4;
11635 inst
.instruction
&= 0xfff0;
11636 inst
.instruction
|= mask
;
11639 inst
.instruction
|= cond
<< 4;
11642 /* Helper function used for both push/pop and ldm/stm. */
11644 encode_thumb2_multi (bfd_boolean do_io
, int base
, unsigned mask
,
11645 bfd_boolean writeback
)
11647 bfd_boolean load
, store
;
11649 gas_assert (base
!= -1 || !do_io
);
11650 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
11651 store
= do_io
&& !load
;
11653 if (mask
& (1 << 13))
11654 inst
.error
= _("SP not allowed in register list");
11656 if (do_io
&& (mask
& (1 << base
)) != 0
11658 inst
.error
= _("having the base register in the register list when "
11659 "using write back is UNPREDICTABLE");
11663 if (mask
& (1 << 15))
11665 if (mask
& (1 << 14))
11666 inst
.error
= _("LR and PC should not both be in register list");
11668 set_it_insn_type_last ();
11673 if (mask
& (1 << 15))
11674 inst
.error
= _("PC not allowed in register list");
11677 if (do_io
&& ((mask
& (mask
- 1)) == 0))
11679 /* Single register transfers implemented as str/ldr. */
11682 if (inst
.instruction
& (1 << 23))
11683 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11685 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11689 if (inst
.instruction
& (1 << 23))
11690 inst
.instruction
= 0x00800000; /* ia -> [base] */
11692 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11695 inst
.instruction
|= 0xf8400000;
11697 inst
.instruction
|= 0x00100000;
11699 mask
= ffs (mask
) - 1;
11702 else if (writeback
)
11703 inst
.instruction
|= WRITE_BACK
;
11705 inst
.instruction
|= mask
;
11707 inst
.instruction
|= base
<< 16;
11713 /* This really doesn't seem worth it. */
11714 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
11715 _("expression too complex"));
11716 constraint (inst
.operands
[1].writeback
,
11717 _("Thumb load/store multiple does not support {reglist}^"));
11719 if (unified_syntax
)
11721 bfd_boolean narrow
;
11725 /* See if we can use a 16-bit instruction. */
11726 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11727 && inst
.size_req
!= 4
11728 && !(inst
.operands
[1].imm
& ~0xff))
11730 mask
= 1 << inst
.operands
[0].reg
;
11732 if (inst
.operands
[0].reg
<= 7)
11734 if (inst
.instruction
== T_MNEM_stmia
11735 ? inst
.operands
[0].writeback
11736 : (inst
.operands
[0].writeback
11737 == !(inst
.operands
[1].imm
& mask
)))
11739 if (inst
.instruction
== T_MNEM_stmia
11740 && (inst
.operands
[1].imm
& mask
)
11741 && (inst
.operands
[1].imm
& (mask
- 1)))
11742 as_warn (_("value stored for r%d is UNKNOWN"),
11743 inst
.operands
[0].reg
);
11745 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11746 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11747 inst
.instruction
|= inst
.operands
[1].imm
;
11750 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11752 /* This means 1 register in reg list one of 3 situations:
11753 1. Instruction is stmia, but without writeback.
11754 2. lmdia without writeback, but with Rn not in
11756 3. ldmia with writeback, but with Rn in reglist.
11757 Case 3 is UNPREDICTABLE behaviour, so we handle
11758 case 1 and 2 which can be converted into a 16-bit
11759 str or ldr. The SP cases are handled below. */
11760 unsigned long opcode
;
11761 /* First, record an error for Case 3. */
11762 if (inst
.operands
[1].imm
& mask
11763 && inst
.operands
[0].writeback
)
11765 _("having the base register in the register list when "
11766 "using write back is UNPREDICTABLE");
11768 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11770 inst
.instruction
= THUMB_OP16 (opcode
);
11771 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11772 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11776 else if (inst
.operands
[0] .reg
== REG_SP
)
11778 if (inst
.operands
[0].writeback
)
11781 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11782 ? T_MNEM_push
: T_MNEM_pop
);
11783 inst
.instruction
|= inst
.operands
[1].imm
;
11786 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11789 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11790 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11791 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11799 if (inst
.instruction
< 0xffff)
11800 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11802 encode_thumb2_multi (TRUE
/* do_io */, inst
.operands
[0].reg
,
11803 inst
.operands
[1].imm
,
11804 inst
.operands
[0].writeback
);
11809 constraint (inst
.operands
[0].reg
> 7
11810 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11811 constraint (inst
.instruction
!= T_MNEM_ldmia
11812 && inst
.instruction
!= T_MNEM_stmia
,
11813 _("Thumb-2 instruction only valid in unified syntax"));
11814 if (inst
.instruction
== T_MNEM_stmia
)
11816 if (!inst
.operands
[0].writeback
)
11817 as_warn (_("this instruction will write back the base register"));
11818 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11819 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11820 as_warn (_("value stored for r%d is UNKNOWN"),
11821 inst
.operands
[0].reg
);
11825 if (!inst
.operands
[0].writeback
11826 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11827 as_warn (_("this instruction will write back the base register"));
11828 else if (inst
.operands
[0].writeback
11829 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11830 as_warn (_("this instruction will not write back the base register"));
11833 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11834 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11835 inst
.instruction
|= inst
.operands
[1].imm
;
11842 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11843 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11844 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11845 || inst
.operands
[1].negative
,
11848 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11850 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11851 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11852 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11858 if (!inst
.operands
[1].present
)
11860 constraint (inst
.operands
[0].reg
== REG_LR
,
11861 _("r14 not allowed as first register "
11862 "when second register is omitted"));
11863 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11865 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11868 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11869 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11870 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11876 unsigned long opcode
;
11879 if (inst
.operands
[0].isreg
11880 && !inst
.operands
[0].preind
11881 && inst
.operands
[0].reg
== REG_PC
)
11882 set_it_insn_type_last ();
11884 opcode
= inst
.instruction
;
11885 if (unified_syntax
)
11887 if (!inst
.operands
[1].isreg
)
11889 if (opcode
<= 0xffff)
11890 inst
.instruction
= THUMB_OP32 (opcode
);
11891 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11894 if (inst
.operands
[1].isreg
11895 && !inst
.operands
[1].writeback
11896 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11897 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11898 && opcode
<= 0xffff
11899 && inst
.size_req
!= 4)
11901 /* Insn may have a 16-bit form. */
11902 Rn
= inst
.operands
[1].reg
;
11903 if (inst
.operands
[1].immisreg
)
11905 inst
.instruction
= THUMB_OP16 (opcode
);
11907 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11909 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11910 reject_bad_reg (inst
.operands
[1].imm
);
11912 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11913 && opcode
!= T_MNEM_ldrsb
)
11914 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11915 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11922 if (inst
.relocs
[0].pc_rel
)
11923 opcode
= T_MNEM_ldr_pc2
;
11925 opcode
= T_MNEM_ldr_pc
;
11929 if (opcode
== T_MNEM_ldr
)
11930 opcode
= T_MNEM_ldr_sp
;
11932 opcode
= T_MNEM_str_sp
;
11934 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11938 inst
.instruction
= inst
.operands
[0].reg
;
11939 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11941 inst
.instruction
|= THUMB_OP16 (opcode
);
11942 if (inst
.size_req
== 2)
11943 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11945 inst
.relax
= opcode
;
11949 /* Definitely a 32-bit variant. */
11951 /* Warning for Erratum 752419. */
11952 if (opcode
== T_MNEM_ldr
11953 && inst
.operands
[0].reg
== REG_SP
11954 && inst
.operands
[1].writeback
== 1
11955 && !inst
.operands
[1].immisreg
)
11957 if (no_cpu_selected ()
11958 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11959 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11960 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11961 as_warn (_("This instruction may be unpredictable "
11962 "if executed on M-profile cores "
11963 "with interrupts enabled."));
11966 /* Do some validations regarding addressing modes. */
11967 if (inst
.operands
[1].immisreg
)
11968 reject_bad_reg (inst
.operands
[1].imm
);
11970 constraint (inst
.operands
[1].writeback
== 1
11971 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11974 inst
.instruction
= THUMB_OP32 (opcode
);
11975 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11976 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11977 check_ldr_r15_aligned ();
11981 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11983 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11985 /* Only [Rn,Rm] is acceptable. */
11986 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11987 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11988 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11989 || inst
.operands
[1].negative
,
11990 _("Thumb does not support this addressing mode"));
11991 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11995 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11996 if (!inst
.operands
[1].isreg
)
11997 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12000 constraint (!inst
.operands
[1].preind
12001 || inst
.operands
[1].shifted
12002 || inst
.operands
[1].writeback
,
12003 _("Thumb does not support this addressing mode"));
12004 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
12006 constraint (inst
.instruction
& 0x0600,
12007 _("byte or halfword not valid for base register"));
12008 constraint (inst
.operands
[1].reg
== REG_PC
12009 && !(inst
.instruction
& THUMB_LOAD_BIT
),
12010 _("r15 based store not allowed"));
12011 constraint (inst
.operands
[1].immisreg
,
12012 _("invalid base register for register offset"));
12014 if (inst
.operands
[1].reg
== REG_PC
)
12015 inst
.instruction
= T_OPCODE_LDR_PC
;
12016 else if (inst
.instruction
& THUMB_LOAD_BIT
)
12017 inst
.instruction
= T_OPCODE_LDR_SP
;
12019 inst
.instruction
= T_OPCODE_STR_SP
;
12021 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12022 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12026 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
12027 if (!inst
.operands
[1].immisreg
)
12029 /* Immediate offset. */
12030 inst
.instruction
|= inst
.operands
[0].reg
;
12031 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12032 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12036 /* Register offset. */
12037 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
12038 constraint (inst
.operands
[1].negative
,
12039 _("Thumb does not support this addressing mode"));
12042 switch (inst
.instruction
)
12044 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
12045 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
12046 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
12047 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
12048 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
12049 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
12050 case 0x5600 /* ldrsb */:
12051 case 0x5e00 /* ldrsh */: break;
12055 inst
.instruction
|= inst
.operands
[0].reg
;
12056 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12057 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
12063 if (!inst
.operands
[1].present
)
12065 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12066 constraint (inst
.operands
[0].reg
== REG_LR
,
12067 _("r14 not allowed here"));
12068 constraint (inst
.operands
[0].reg
== REG_R12
,
12069 _("r12 not allowed here"));
12072 if (inst
.operands
[2].writeback
12073 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
12074 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
12075 as_warn (_("base register written back, and overlaps "
12076 "one of transfer registers"));
12078 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12079 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12080 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
12086 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12087 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
12093 unsigned Rd
, Rn
, Rm
, Ra
;
12095 Rd
= inst
.operands
[0].reg
;
12096 Rn
= inst
.operands
[1].reg
;
12097 Rm
= inst
.operands
[2].reg
;
12098 Ra
= inst
.operands
[3].reg
;
12100 reject_bad_reg (Rd
);
12101 reject_bad_reg (Rn
);
12102 reject_bad_reg (Rm
);
12103 reject_bad_reg (Ra
);
12105 inst
.instruction
|= Rd
<< 8;
12106 inst
.instruction
|= Rn
<< 16;
12107 inst
.instruction
|= Rm
;
12108 inst
.instruction
|= Ra
<< 12;
12114 unsigned RdLo
, RdHi
, Rn
, Rm
;
12116 RdLo
= inst
.operands
[0].reg
;
12117 RdHi
= inst
.operands
[1].reg
;
12118 Rn
= inst
.operands
[2].reg
;
12119 Rm
= inst
.operands
[3].reg
;
12121 reject_bad_reg (RdLo
);
12122 reject_bad_reg (RdHi
);
12123 reject_bad_reg (Rn
);
12124 reject_bad_reg (Rm
);
12126 inst
.instruction
|= RdLo
<< 12;
12127 inst
.instruction
|= RdHi
<< 8;
12128 inst
.instruction
|= Rn
<< 16;
12129 inst
.instruction
|= Rm
;
12133 do_t_mov_cmp (void)
12137 Rn
= inst
.operands
[0].reg
;
12138 Rm
= inst
.operands
[1].reg
;
12141 set_it_insn_type_last ();
12143 if (unified_syntax
)
12145 int r0off
= (inst
.instruction
== T_MNEM_mov
12146 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
12147 unsigned long opcode
;
12148 bfd_boolean narrow
;
12149 bfd_boolean low_regs
;
12151 low_regs
= (Rn
<= 7 && Rm
<= 7);
12152 opcode
= inst
.instruction
;
12153 if (in_it_block ())
12154 narrow
= opcode
!= T_MNEM_movs
;
12156 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
12157 if (inst
.size_req
== 4
12158 || inst
.operands
[1].shifted
)
12161 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12162 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
12163 && !inst
.operands
[1].shifted
12167 inst
.instruction
= T2_SUBS_PC_LR
;
12171 if (opcode
== T_MNEM_cmp
)
12173 constraint (Rn
== REG_PC
, BAD_PC
);
12176 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12178 warn_deprecated_sp (Rm
);
12179 /* R15 was documented as a valid choice for Rm in ARMv6,
12180 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12181 tools reject R15, so we do too. */
12182 constraint (Rm
== REG_PC
, BAD_PC
);
12185 reject_bad_reg (Rm
);
12187 else if (opcode
== T_MNEM_mov
12188 || opcode
== T_MNEM_movs
)
12190 if (inst
.operands
[1].isreg
)
12192 if (opcode
== T_MNEM_movs
)
12194 reject_bad_reg (Rn
);
12195 reject_bad_reg (Rm
);
12199 /* This is mov.n. */
12200 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
12201 && (Rm
== REG_SP
|| Rm
== REG_PC
))
12203 as_tsktsk (_("Use of r%u as a source register is "
12204 "deprecated when r%u is the destination "
12205 "register."), Rm
, Rn
);
12210 /* This is mov.w. */
12211 constraint (Rn
== REG_PC
, BAD_PC
);
12212 constraint (Rm
== REG_PC
, BAD_PC
);
12213 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12214 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
12218 reject_bad_reg (Rn
);
12221 if (!inst
.operands
[1].isreg
)
12223 /* Immediate operand. */
12224 if (!in_it_block () && opcode
== T_MNEM_mov
)
12226 if (low_regs
&& narrow
)
12228 inst
.instruction
= THUMB_OP16 (opcode
);
12229 inst
.instruction
|= Rn
<< 8;
12230 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12231 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
12233 if (inst
.size_req
== 2)
12234 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12236 inst
.relax
= opcode
;
12241 constraint ((inst
.relocs
[0].type
12242 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
12243 && (inst
.relocs
[0].type
12244 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
12245 THUMB1_RELOC_ONLY
);
12247 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12248 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12249 inst
.instruction
|= Rn
<< r0off
;
12250 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12253 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
12254 && (inst
.instruction
== T_MNEM_mov
12255 || inst
.instruction
== T_MNEM_movs
))
12257 /* Register shifts are encoded as separate shift instructions. */
12258 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
12260 if (in_it_block ())
12265 if (inst
.size_req
== 4)
12268 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12274 switch (inst
.operands
[1].shift_kind
)
12277 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12280 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12283 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12286 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12292 inst
.instruction
= opcode
;
12295 inst
.instruction
|= Rn
;
12296 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12301 inst
.instruction
|= CONDS_BIT
;
12303 inst
.instruction
|= Rn
<< 8;
12304 inst
.instruction
|= Rm
<< 16;
12305 inst
.instruction
|= inst
.operands
[1].imm
;
12310 /* Some mov with immediate shift have narrow variants.
12311 Register shifts are handled above. */
12312 if (low_regs
&& inst
.operands
[1].shifted
12313 && (inst
.instruction
== T_MNEM_mov
12314 || inst
.instruction
== T_MNEM_movs
))
12316 if (in_it_block ())
12317 narrow
= (inst
.instruction
== T_MNEM_mov
);
12319 narrow
= (inst
.instruction
== T_MNEM_movs
);
12324 switch (inst
.operands
[1].shift_kind
)
12326 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12327 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12328 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12329 default: narrow
= FALSE
; break;
12335 inst
.instruction
|= Rn
;
12336 inst
.instruction
|= Rm
<< 3;
12337 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12341 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12342 inst
.instruction
|= Rn
<< r0off
;
12343 encode_thumb32_shifted_operand (1);
12347 switch (inst
.instruction
)
12350 /* In v4t or v5t a move of two lowregs produces unpredictable
12351 results. Don't allow this. */
12354 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12355 "MOV Rd, Rs with two low registers is not "
12356 "permitted on this architecture");
12357 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12361 inst
.instruction
= T_OPCODE_MOV_HR
;
12362 inst
.instruction
|= (Rn
& 0x8) << 4;
12363 inst
.instruction
|= (Rn
& 0x7);
12364 inst
.instruction
|= Rm
<< 3;
12368 /* We know we have low registers at this point.
12369 Generate LSLS Rd, Rs, #0. */
12370 inst
.instruction
= T_OPCODE_LSL_I
;
12371 inst
.instruction
|= Rn
;
12372 inst
.instruction
|= Rm
<< 3;
12378 inst
.instruction
= T_OPCODE_CMP_LR
;
12379 inst
.instruction
|= Rn
;
12380 inst
.instruction
|= Rm
<< 3;
12384 inst
.instruction
= T_OPCODE_CMP_HR
;
12385 inst
.instruction
|= (Rn
& 0x8) << 4;
12386 inst
.instruction
|= (Rn
& 0x7);
12387 inst
.instruction
|= Rm
<< 3;
12394 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12396 /* PR 10443: Do not silently ignore shifted operands. */
12397 constraint (inst
.operands
[1].shifted
,
12398 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12400 if (inst
.operands
[1].isreg
)
12402 if (Rn
< 8 && Rm
< 8)
12404 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12405 since a MOV instruction produces unpredictable results. */
12406 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12407 inst
.instruction
= T_OPCODE_ADD_I3
;
12409 inst
.instruction
= T_OPCODE_CMP_LR
;
12411 inst
.instruction
|= Rn
;
12412 inst
.instruction
|= Rm
<< 3;
12416 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12417 inst
.instruction
= T_OPCODE_MOV_HR
;
12419 inst
.instruction
= T_OPCODE_CMP_HR
;
12425 constraint (Rn
> 7,
12426 _("only lo regs allowed with immediate"));
12427 inst
.instruction
|= Rn
<< 8;
12428 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12439 top
= (inst
.instruction
& 0x00800000) != 0;
12440 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
12442 constraint (top
, _(":lower16: not allowed in this instruction"));
12443 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
12445 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
12447 constraint (!top
, _(":upper16: not allowed in this instruction"));
12448 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
12451 Rd
= inst
.operands
[0].reg
;
12452 reject_bad_reg (Rd
);
12454 inst
.instruction
|= Rd
<< 8;
12455 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
12457 imm
= inst
.relocs
[0].exp
.X_add_number
;
12458 inst
.instruction
|= (imm
& 0xf000) << 4;
12459 inst
.instruction
|= (imm
& 0x0800) << 15;
12460 inst
.instruction
|= (imm
& 0x0700) << 4;
12461 inst
.instruction
|= (imm
& 0x00ff);
12466 do_t_mvn_tst (void)
12470 Rn
= inst
.operands
[0].reg
;
12471 Rm
= inst
.operands
[1].reg
;
12473 if (inst
.instruction
== T_MNEM_cmp
12474 || inst
.instruction
== T_MNEM_cmn
)
12475 constraint (Rn
== REG_PC
, BAD_PC
);
12477 reject_bad_reg (Rn
);
12478 reject_bad_reg (Rm
);
12480 if (unified_syntax
)
12482 int r0off
= (inst
.instruction
== T_MNEM_mvn
12483 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12484 bfd_boolean narrow
;
12486 if (inst
.size_req
== 4
12487 || inst
.instruction
> 0xffff
12488 || inst
.operands
[1].shifted
12489 || Rn
> 7 || Rm
> 7)
12491 else if (inst
.instruction
== T_MNEM_cmn
12492 || inst
.instruction
== T_MNEM_tst
)
12494 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12495 narrow
= !in_it_block ();
12497 narrow
= in_it_block ();
12499 if (!inst
.operands
[1].isreg
)
12501 /* For an immediate, we always generate a 32-bit opcode;
12502 section relaxation will shrink it later if possible. */
12503 if (inst
.instruction
< 0xffff)
12504 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12505 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12506 inst
.instruction
|= Rn
<< r0off
;
12507 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12511 /* See if we can do this with a 16-bit instruction. */
12514 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12515 inst
.instruction
|= Rn
;
12516 inst
.instruction
|= Rm
<< 3;
12520 constraint (inst
.operands
[1].shifted
12521 && inst
.operands
[1].immisreg
,
12522 _("shift must be constant"));
12523 if (inst
.instruction
< 0xffff)
12524 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12525 inst
.instruction
|= Rn
<< r0off
;
12526 encode_thumb32_shifted_operand (1);
12532 constraint (inst
.instruction
> 0xffff
12533 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12534 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12535 _("unshifted register required"));
12536 constraint (Rn
> 7 || Rm
> 7,
12539 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12540 inst
.instruction
|= Rn
;
12541 inst
.instruction
|= Rm
<< 3;
12550 if (do_vfp_nsyn_mrs () == SUCCESS
)
12553 Rd
= inst
.operands
[0].reg
;
12554 reject_bad_reg (Rd
);
12555 inst
.instruction
|= Rd
<< 8;
12557 if (inst
.operands
[1].isreg
)
12559 unsigned br
= inst
.operands
[1].reg
;
12560 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12561 as_bad (_("bad register for mrs"));
12563 inst
.instruction
|= br
& (0xf << 16);
12564 inst
.instruction
|= (br
& 0x300) >> 4;
12565 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12569 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12571 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12573 /* PR gas/12698: The constraint is only applied for m_profile.
12574 If the user has specified -march=all, we want to ignore it as
12575 we are building for any CPU type, including non-m variants. */
12576 bfd_boolean m_profile
=
12577 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12578 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12579 "not support requested special purpose register"));
12582 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12584 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12585 _("'APSR', 'CPSR' or 'SPSR' expected"));
12587 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12588 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12589 inst
.instruction
|= 0xf0000;
12599 if (do_vfp_nsyn_msr () == SUCCESS
)
12602 constraint (!inst
.operands
[1].isreg
,
12603 _("Thumb encoding does not support an immediate here"));
12605 if (inst
.operands
[0].isreg
)
12606 flags
= (int)(inst
.operands
[0].reg
);
12608 flags
= inst
.operands
[0].imm
;
12610 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12612 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12614 /* PR gas/12698: The constraint is only applied for m_profile.
12615 If the user has specified -march=all, we want to ignore it as
12616 we are building for any CPU type, including non-m variants. */
12617 bfd_boolean m_profile
=
12618 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12619 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12620 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12621 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12622 && bits
!= PSR_f
)) && m_profile
,
12623 _("selected processor does not support requested special "
12624 "purpose register"));
12627 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12628 "requested special purpose register"));
12630 Rn
= inst
.operands
[1].reg
;
12631 reject_bad_reg (Rn
);
12633 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12634 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12635 inst
.instruction
|= (flags
& 0x300) >> 4;
12636 inst
.instruction
|= (flags
& 0xff);
12637 inst
.instruction
|= Rn
<< 16;
12643 bfd_boolean narrow
;
12644 unsigned Rd
, Rn
, Rm
;
12646 if (!inst
.operands
[2].present
)
12647 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12649 Rd
= inst
.operands
[0].reg
;
12650 Rn
= inst
.operands
[1].reg
;
12651 Rm
= inst
.operands
[2].reg
;
12653 if (unified_syntax
)
12655 if (inst
.size_req
== 4
12661 else if (inst
.instruction
== T_MNEM_muls
)
12662 narrow
= !in_it_block ();
12664 narrow
= in_it_block ();
12668 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12669 constraint (Rn
> 7 || Rm
> 7,
12676 /* 16-bit MULS/Conditional MUL. */
12677 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12678 inst
.instruction
|= Rd
;
12681 inst
.instruction
|= Rm
<< 3;
12683 inst
.instruction
|= Rn
<< 3;
12685 constraint (1, _("dest must overlap one source register"));
12689 constraint (inst
.instruction
!= T_MNEM_mul
,
12690 _("Thumb-2 MUL must not set flags"));
12692 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12693 inst
.instruction
|= Rd
<< 8;
12694 inst
.instruction
|= Rn
<< 16;
12695 inst
.instruction
|= Rm
<< 0;
12697 reject_bad_reg (Rd
);
12698 reject_bad_reg (Rn
);
12699 reject_bad_reg (Rm
);
12706 unsigned RdLo
, RdHi
, Rn
, Rm
;
12708 RdLo
= inst
.operands
[0].reg
;
12709 RdHi
= inst
.operands
[1].reg
;
12710 Rn
= inst
.operands
[2].reg
;
12711 Rm
= inst
.operands
[3].reg
;
12713 reject_bad_reg (RdLo
);
12714 reject_bad_reg (RdHi
);
12715 reject_bad_reg (Rn
);
12716 reject_bad_reg (Rm
);
12718 inst
.instruction
|= RdLo
<< 12;
12719 inst
.instruction
|= RdHi
<< 8;
12720 inst
.instruction
|= Rn
<< 16;
12721 inst
.instruction
|= Rm
;
12724 as_tsktsk (_("rdhi and rdlo must be different"));
12730 set_it_insn_type (NEUTRAL_IT_INSN
);
12732 if (unified_syntax
)
12734 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12736 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12737 inst
.instruction
|= inst
.operands
[0].imm
;
12741 /* PR9722: Check for Thumb2 availability before
12742 generating a thumb2 nop instruction. */
12743 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12745 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12746 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12749 inst
.instruction
= 0x46c0;
12754 constraint (inst
.operands
[0].present
,
12755 _("Thumb does not support NOP with hints"));
12756 inst
.instruction
= 0x46c0;
12763 if (unified_syntax
)
12765 bfd_boolean narrow
;
12767 if (THUMB_SETS_FLAGS (inst
.instruction
))
12768 narrow
= !in_it_block ();
12770 narrow
= in_it_block ();
12771 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12773 if (inst
.size_req
== 4)
12778 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12779 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12780 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12784 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12785 inst
.instruction
|= inst
.operands
[0].reg
;
12786 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12791 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12793 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12795 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12796 inst
.instruction
|= inst
.operands
[0].reg
;
12797 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12806 Rd
= inst
.operands
[0].reg
;
12807 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12809 reject_bad_reg (Rd
);
12810 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12811 reject_bad_reg (Rn
);
12813 inst
.instruction
|= Rd
<< 8;
12814 inst
.instruction
|= Rn
<< 16;
12816 if (!inst
.operands
[2].isreg
)
12818 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12819 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12825 Rm
= inst
.operands
[2].reg
;
12826 reject_bad_reg (Rm
);
12828 constraint (inst
.operands
[2].shifted
12829 && inst
.operands
[2].immisreg
,
12830 _("shift must be constant"));
12831 encode_thumb32_shifted_operand (2);
12838 unsigned Rd
, Rn
, Rm
;
12840 Rd
= inst
.operands
[0].reg
;
12841 Rn
= inst
.operands
[1].reg
;
12842 Rm
= inst
.operands
[2].reg
;
12844 reject_bad_reg (Rd
);
12845 reject_bad_reg (Rn
);
12846 reject_bad_reg (Rm
);
12848 inst
.instruction
|= Rd
<< 8;
12849 inst
.instruction
|= Rn
<< 16;
12850 inst
.instruction
|= Rm
;
12851 if (inst
.operands
[3].present
)
12853 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
12854 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
12855 _("expression too complex"));
12856 inst
.instruction
|= (val
& 0x1c) << 10;
12857 inst
.instruction
|= (val
& 0x03) << 6;
12864 if (!inst
.operands
[3].present
)
12868 inst
.instruction
&= ~0x00000020;
12870 /* PR 10168. Swap the Rm and Rn registers. */
12871 Rtmp
= inst
.operands
[1].reg
;
12872 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12873 inst
.operands
[2].reg
= Rtmp
;
12881 if (inst
.operands
[0].immisreg
)
12882 reject_bad_reg (inst
.operands
[0].imm
);
12884 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12888 do_t_push_pop (void)
12892 constraint (inst
.operands
[0].writeback
,
12893 _("push/pop do not support {reglist}^"));
12894 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
12895 _("expression too complex"));
12897 mask
= inst
.operands
[0].imm
;
12898 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12899 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12900 else if (inst
.size_req
!= 4
12901 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
12902 ? REG_LR
: REG_PC
)))
12904 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12905 inst
.instruction
|= THUMB_PP_PC_LR
;
12906 inst
.instruction
|= mask
& 0xff;
12908 else if (unified_syntax
)
12910 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12911 encode_thumb2_multi (TRUE
/* do_io */, 13, mask
, TRUE
);
12915 inst
.error
= _("invalid register list to push/pop instruction");
12923 if (unified_syntax
)
12924 encode_thumb2_multi (FALSE
/* do_io */, -1, inst
.operands
[0].imm
, FALSE
);
12927 inst
.error
= _("invalid register list to push/pop instruction");
12933 do_t_vscclrm (void)
12935 if (inst
.operands
[0].issingle
)
12937 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1) << 22;
12938 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1e) << 11;
12939 inst
.instruction
|= inst
.operands
[0].imm
;
12943 inst
.instruction
|= (inst
.operands
[0].reg
& 0x10) << 18;
12944 inst
.instruction
|= (inst
.operands
[0].reg
& 0xf) << 12;
12945 inst
.instruction
|= 1 << 8;
12946 inst
.instruction
|= inst
.operands
[0].imm
<< 1;
12955 Rd
= inst
.operands
[0].reg
;
12956 Rm
= inst
.operands
[1].reg
;
12958 reject_bad_reg (Rd
);
12959 reject_bad_reg (Rm
);
12961 inst
.instruction
|= Rd
<< 8;
12962 inst
.instruction
|= Rm
<< 16;
12963 inst
.instruction
|= Rm
;
12971 Rd
= inst
.operands
[0].reg
;
12972 Rm
= inst
.operands
[1].reg
;
12974 reject_bad_reg (Rd
);
12975 reject_bad_reg (Rm
);
12977 if (Rd
<= 7 && Rm
<= 7
12978 && inst
.size_req
!= 4)
12980 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12981 inst
.instruction
|= Rd
;
12982 inst
.instruction
|= Rm
<< 3;
12984 else if (unified_syntax
)
12986 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12987 inst
.instruction
|= Rd
<< 8;
12988 inst
.instruction
|= Rm
<< 16;
12989 inst
.instruction
|= Rm
;
12992 inst
.error
= BAD_HIREG
;
13000 Rd
= inst
.operands
[0].reg
;
13001 Rm
= inst
.operands
[1].reg
;
13003 reject_bad_reg (Rd
);
13004 reject_bad_reg (Rm
);
13006 inst
.instruction
|= Rd
<< 8;
13007 inst
.instruction
|= Rm
;
13015 Rd
= inst
.operands
[0].reg
;
13016 Rs
= (inst
.operands
[1].present
13017 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
13018 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
13020 reject_bad_reg (Rd
);
13021 reject_bad_reg (Rs
);
13022 if (inst
.operands
[2].isreg
)
13023 reject_bad_reg (inst
.operands
[2].reg
);
13025 inst
.instruction
|= Rd
<< 8;
13026 inst
.instruction
|= Rs
<< 16;
13027 if (!inst
.operands
[2].isreg
)
13029 bfd_boolean narrow
;
13031 if ((inst
.instruction
& 0x00100000) != 0)
13032 narrow
= !in_it_block ();
13034 narrow
= in_it_block ();
13036 if (Rd
> 7 || Rs
> 7)
13039 if (inst
.size_req
== 4 || !unified_syntax
)
13042 if (inst
.relocs
[0].exp
.X_op
!= O_constant
13043 || inst
.relocs
[0].exp
.X_add_number
!= 0)
13046 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13047 relaxation, but it doesn't seem worth the hassle. */
13050 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13051 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
13052 inst
.instruction
|= Rs
<< 3;
13053 inst
.instruction
|= Rd
;
13057 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13058 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13062 encode_thumb32_shifted_operand (2);
13068 if (warn_on_deprecated
13069 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13070 as_tsktsk (_("setend use is deprecated for ARMv8"));
13072 set_it_insn_type (OUTSIDE_IT_INSN
);
13073 if (inst
.operands
[0].imm
)
13074 inst
.instruction
|= 0x8;
13080 if (!inst
.operands
[1].present
)
13081 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
13083 if (unified_syntax
)
13085 bfd_boolean narrow
;
13088 switch (inst
.instruction
)
13091 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
13093 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
13095 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
13097 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
13101 if (THUMB_SETS_FLAGS (inst
.instruction
))
13102 narrow
= !in_it_block ();
13104 narrow
= in_it_block ();
13105 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13107 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
13109 if (inst
.operands
[2].isreg
13110 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
13111 || inst
.operands
[2].reg
> 7))
13113 if (inst
.size_req
== 4)
13116 reject_bad_reg (inst
.operands
[0].reg
);
13117 reject_bad_reg (inst
.operands
[1].reg
);
13121 if (inst
.operands
[2].isreg
)
13123 reject_bad_reg (inst
.operands
[2].reg
);
13124 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13125 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13126 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13127 inst
.instruction
|= inst
.operands
[2].reg
;
13129 /* PR 12854: Error on extraneous shifts. */
13130 constraint (inst
.operands
[2].shifted
,
13131 _("extraneous shift as part of operand to shift insn"));
13135 inst
.operands
[1].shifted
= 1;
13136 inst
.operands
[1].shift_kind
= shift_kind
;
13137 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
13138 ? T_MNEM_movs
: T_MNEM_mov
);
13139 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13140 encode_thumb32_shifted_operand (1);
13141 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13142 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13147 if (inst
.operands
[2].isreg
)
13149 switch (shift_kind
)
13151 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13152 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13153 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13154 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13158 inst
.instruction
|= inst
.operands
[0].reg
;
13159 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13161 /* PR 12854: Error on extraneous shifts. */
13162 constraint (inst
.operands
[2].shifted
,
13163 _("extraneous shift as part of operand to shift insn"));
13167 switch (shift_kind
)
13169 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13170 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13171 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13174 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13175 inst
.instruction
|= inst
.operands
[0].reg
;
13176 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13182 constraint (inst
.operands
[0].reg
> 7
13183 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
13184 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13186 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
13188 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
13189 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13190 _("source1 and dest must be same register"));
13192 switch (inst
.instruction
)
13194 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13195 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13196 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13197 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13201 inst
.instruction
|= inst
.operands
[0].reg
;
13202 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13204 /* PR 12854: Error on extraneous shifts. */
13205 constraint (inst
.operands
[2].shifted
,
13206 _("extraneous shift as part of operand to shift insn"));
13210 switch (inst
.instruction
)
13212 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13213 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13214 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13215 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
13218 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13219 inst
.instruction
|= inst
.operands
[0].reg
;
13220 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13228 unsigned Rd
, Rn
, Rm
;
13230 Rd
= inst
.operands
[0].reg
;
13231 Rn
= inst
.operands
[1].reg
;
13232 Rm
= inst
.operands
[2].reg
;
13234 reject_bad_reg (Rd
);
13235 reject_bad_reg (Rn
);
13236 reject_bad_reg (Rm
);
13238 inst
.instruction
|= Rd
<< 8;
13239 inst
.instruction
|= Rn
<< 16;
13240 inst
.instruction
|= Rm
;
13246 unsigned Rd
, Rn
, Rm
;
13248 Rd
= inst
.operands
[0].reg
;
13249 Rm
= inst
.operands
[1].reg
;
13250 Rn
= inst
.operands
[2].reg
;
13252 reject_bad_reg (Rd
);
13253 reject_bad_reg (Rn
);
13254 reject_bad_reg (Rm
);
13256 inst
.instruction
|= Rd
<< 8;
13257 inst
.instruction
|= Rn
<< 16;
13258 inst
.instruction
|= Rm
;
13264 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13265 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
13266 _("SMC is not permitted on this architecture"));
13267 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13268 _("expression too complex"));
13269 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13270 inst
.instruction
|= (value
& 0xf000) >> 12;
13271 inst
.instruction
|= (value
& 0x0ff0);
13272 inst
.instruction
|= (value
& 0x000f) << 16;
13273 /* PR gas/15623: SMC instructions must be last in an IT block. */
13274 set_it_insn_type_last ();
13280 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13282 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13283 inst
.instruction
|= (value
& 0x0fff);
13284 inst
.instruction
|= (value
& 0xf000) << 4;
13288 do_t_ssat_usat (int bias
)
13292 Rd
= inst
.operands
[0].reg
;
13293 Rn
= inst
.operands
[2].reg
;
13295 reject_bad_reg (Rd
);
13296 reject_bad_reg (Rn
);
13298 inst
.instruction
|= Rd
<< 8;
13299 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13300 inst
.instruction
|= Rn
<< 16;
13302 if (inst
.operands
[3].present
)
13304 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
13306 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13308 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13309 _("expression too complex"));
13311 if (shift_amount
!= 0)
13313 constraint (shift_amount
> 31,
13314 _("shift expression is too large"));
13316 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13317 inst
.instruction
|= 0x00200000; /* sh bit. */
13319 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13320 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13328 do_t_ssat_usat (1);
13336 Rd
= inst
.operands
[0].reg
;
13337 Rn
= inst
.operands
[2].reg
;
13339 reject_bad_reg (Rd
);
13340 reject_bad_reg (Rn
);
13342 inst
.instruction
|= Rd
<< 8;
13343 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13344 inst
.instruction
|= Rn
<< 16;
13350 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13351 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13352 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13353 || inst
.operands
[2].negative
,
13356 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13358 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13359 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13360 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13361 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13367 if (!inst
.operands
[2].present
)
13368 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13370 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13371 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13372 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13375 inst
.instruction
|= inst
.operands
[0].reg
;
13376 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13377 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13378 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13384 unsigned Rd
, Rn
, Rm
;
13386 Rd
= inst
.operands
[0].reg
;
13387 Rn
= inst
.operands
[1].reg
;
13388 Rm
= inst
.operands
[2].reg
;
13390 reject_bad_reg (Rd
);
13391 reject_bad_reg (Rn
);
13392 reject_bad_reg (Rm
);
13394 inst
.instruction
|= Rd
<< 8;
13395 inst
.instruction
|= Rn
<< 16;
13396 inst
.instruction
|= Rm
;
13397 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13405 Rd
= inst
.operands
[0].reg
;
13406 Rm
= inst
.operands
[1].reg
;
13408 reject_bad_reg (Rd
);
13409 reject_bad_reg (Rm
);
13411 if (inst
.instruction
<= 0xffff
13412 && inst
.size_req
!= 4
13413 && Rd
<= 7 && Rm
<= 7
13414 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13416 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13417 inst
.instruction
|= Rd
;
13418 inst
.instruction
|= Rm
<< 3;
13420 else if (unified_syntax
)
13422 if (inst
.instruction
<= 0xffff)
13423 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13424 inst
.instruction
|= Rd
<< 8;
13425 inst
.instruction
|= Rm
;
13426 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13430 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13431 _("Thumb encoding does not support rotation"));
13432 constraint (1, BAD_HIREG
);
13439 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
13448 half
= (inst
.instruction
& 0x10) != 0;
13449 set_it_insn_type_last ();
13450 constraint (inst
.operands
[0].immisreg
,
13451 _("instruction requires register index"));
13453 Rn
= inst
.operands
[0].reg
;
13454 Rm
= inst
.operands
[0].imm
;
13456 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13457 constraint (Rn
== REG_SP
, BAD_SP
);
13458 reject_bad_reg (Rm
);
13460 constraint (!half
&& inst
.operands
[0].shifted
,
13461 _("instruction does not allow shifted index"));
13462 inst
.instruction
|= (Rn
<< 16) | Rm
;
13468 if (!inst
.operands
[0].present
)
13469 inst
.operands
[0].imm
= 0;
13471 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13473 constraint (inst
.size_req
== 2,
13474 _("immediate value out of range"));
13475 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13476 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13477 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13481 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13482 inst
.instruction
|= inst
.operands
[0].imm
;
13485 set_it_insn_type (NEUTRAL_IT_INSN
);
13492 do_t_ssat_usat (0);
13500 Rd
= inst
.operands
[0].reg
;
13501 Rn
= inst
.operands
[2].reg
;
13503 reject_bad_reg (Rd
);
13504 reject_bad_reg (Rn
);
13506 inst
.instruction
|= Rd
<< 8;
13507 inst
.instruction
|= inst
.operands
[1].imm
;
13508 inst
.instruction
|= Rn
<< 16;
13511 /* Checking the range of the branch offset (VAL) with NBITS bits
13512 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13514 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
13516 gas_assert (nbits
> 0 && nbits
<= 32);
13519 int cmp
= (1 << (nbits
- 1));
13520 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
13525 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
13531 /* For branches in Armv8.1-M Mainline. */
13533 do_t_branch_future (void)
13535 unsigned long insn
= inst
.instruction
;
13537 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13538 if (inst
.operands
[0].hasreloc
== 0)
13540 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, FALSE
) == FAIL
)
13541 as_bad (BAD_BRANCH_OFF
);
13543 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
13547 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
13548 inst
.relocs
[0].pc_rel
= 1;
13554 if (inst
.operands
[1].hasreloc
== 0)
13556 int val
= inst
.operands
[1].imm
;
13557 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, TRUE
) == FAIL
)
13558 as_bad (BAD_BRANCH_OFF
);
13560 int immA
= (val
& 0x0001f000) >> 12;
13561 int immB
= (val
& 0x00000ffc) >> 2;
13562 int immC
= (val
& 0x00000002) >> 1;
13563 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13567 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
13568 inst
.relocs
[1].pc_rel
= 1;
13573 if (inst
.operands
[1].hasreloc
== 0)
13575 int val
= inst
.operands
[1].imm
;
13576 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, TRUE
) == FAIL
)
13577 as_bad (BAD_BRANCH_OFF
);
13579 int immA
= (val
& 0x0007f000) >> 12;
13580 int immB
= (val
& 0x00000ffc) >> 2;
13581 int immC
= (val
& 0x00000002) >> 1;
13582 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13586 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
13587 inst
.relocs
[1].pc_rel
= 1;
13591 case T_MNEM_bfcsel
:
13593 if (inst
.operands
[1].hasreloc
== 0)
13595 int val
= inst
.operands
[1].imm
;
13596 int immA
= (val
& 0x00001000) >> 12;
13597 int immB
= (val
& 0x00000ffc) >> 2;
13598 int immC
= (val
& 0x00000002) >> 1;
13599 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13603 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
13604 inst
.relocs
[1].pc_rel
= 1;
13608 if (inst
.operands
[2].hasreloc
== 0)
13610 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
13611 int val2
= inst
.operands
[2].imm
;
13612 int val0
= inst
.operands
[0].imm
& 0x1f;
13613 int diff
= val2
- val0
;
13615 inst
.instruction
|= 1 << 17; /* T bit. */
13616 else if (diff
!= 2)
13617 as_bad (_("out of range label-relative fixup value"));
13621 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
13622 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
13623 inst
.relocs
[2].pc_rel
= 1;
13627 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
13628 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
13633 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13640 /* Helper function for do_t_loloop to handle relocations. */
13642 v8_1_loop_reloc (int is_le
)
13644 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
13646 int value
= inst
.relocs
[0].exp
.X_add_number
;
13647 value
= (is_le
) ? -value
: value
;
13649 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
13650 as_bad (BAD_BRANCH_OFF
);
13654 immh
= (value
& 0x00000ffc) >> 2;
13655 imml
= (value
& 0x00000002) >> 1;
13657 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
13661 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
13662 inst
.relocs
[0].pc_rel
= 1;
13666 /* To handle the Scalar Low Overhead Loop instructions
13667 in Armv8.1-M Mainline. */
13671 unsigned long insn
= inst
.instruction
;
13673 set_it_insn_type (OUTSIDE_IT_INSN
);
13674 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13680 if (!inst
.operands
[0].present
)
13681 inst
.instruction
|= 1 << 21;
13683 v8_1_loop_reloc (TRUE
);
13687 v8_1_loop_reloc (FALSE
);
13688 /* Fall through. */
13690 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
13691 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
13698 /* Neon instruction encoder helpers. */
13700 /* Encodings for the different types for various Neon opcodes. */
13702 /* An "invalid" code for the following tables. */
13705 struct neon_tab_entry
13708 unsigned float_or_poly
;
13709 unsigned scalar_or_imm
;
13712 /* Map overloaded Neon opcodes to their respective encodings. */
13713 #define NEON_ENC_TAB \
13714 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13715 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13716 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13717 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13718 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13719 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13720 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13721 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13722 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13723 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13724 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13725 /* Register variants of the following two instructions are encoded as
13726 vcge / vcgt with the operands reversed. */ \
13727 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13728 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13729 X(vfma, N_INV, 0x0000c10, N_INV), \
13730 X(vfms, N_INV, 0x0200c10, N_INV), \
13731 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13732 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13733 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13734 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13735 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13736 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13737 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13738 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13739 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13740 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13741 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13742 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13743 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13744 X(vshl, 0x0000400, N_INV, 0x0800510), \
13745 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13746 X(vand, 0x0000110, N_INV, 0x0800030), \
13747 X(vbic, 0x0100110, N_INV, 0x0800030), \
13748 X(veor, 0x1000110, N_INV, N_INV), \
13749 X(vorn, 0x0300110, N_INV, 0x0800010), \
13750 X(vorr, 0x0200110, N_INV, 0x0800010), \
13751 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13752 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13753 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13754 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13755 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13756 X(vst1, 0x0000000, 0x0800000, N_INV), \
13757 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13758 X(vst2, 0x0000100, 0x0800100, N_INV), \
13759 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13760 X(vst3, 0x0000200, 0x0800200, N_INV), \
13761 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13762 X(vst4, 0x0000300, 0x0800300, N_INV), \
13763 X(vmovn, 0x1b20200, N_INV, N_INV), \
13764 X(vtrn, 0x1b20080, N_INV, N_INV), \
13765 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13766 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13767 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13768 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13769 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13770 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13771 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13772 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13773 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13774 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13775 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13776 X(vseleq, 0xe000a00, N_INV, N_INV), \
13777 X(vselvs, 0xe100a00, N_INV, N_INV), \
13778 X(vselge, 0xe200a00, N_INV, N_INV), \
13779 X(vselgt, 0xe300a00, N_INV, N_INV), \
13780 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13781 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13782 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13783 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13784 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13785 X(aes, 0x3b00300, N_INV, N_INV), \
13786 X(sha3op, 0x2000c00, N_INV, N_INV), \
13787 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13788 X(sha2op, 0x3ba0380, N_INV, N_INV)
13792 #define X(OPC,I,F,S) N_MNEM_##OPC
13797 static const struct neon_tab_entry neon_enc_tab
[] =
13799 #define X(OPC,I,F,S) { (I), (F), (S) }
13804 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13805 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13806 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13807 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13808 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13809 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13810 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13811 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13812 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13813 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13814 #define NEON_ENC_SINGLE_(X) \
13815 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13816 #define NEON_ENC_DOUBLE_(X) \
13817 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13818 #define NEON_ENC_FPV8_(X) \
13819 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13821 #define NEON_ENCODE(type, inst) \
13824 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13825 inst.is_neon = 1; \
13829 #define check_neon_suffixes \
13832 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13834 as_bad (_("invalid neon suffix for non neon instruction")); \
13840 /* Define shapes for instruction operands. The following mnemonic characters
13841 are used in this table:
13843 F - VFP S<n> register
13844 D - Neon D<n> register
13845 Q - Neon Q<n> register
13849 L - D<n> register list
13851 This table is used to generate various data:
13852 - enumerations of the form NS_DDR to be used as arguments to
13854 - a table classifying shapes into single, double, quad, mixed.
13855 - a table used to drive neon_select_shape. */
13857 #define NEON_SHAPE_DEF \
13858 X(3, (D, D, D), DOUBLE), \
13859 X(3, (Q, Q, Q), QUAD), \
13860 X(3, (D, D, I), DOUBLE), \
13861 X(3, (Q, Q, I), QUAD), \
13862 X(3, (D, D, S), DOUBLE), \
13863 X(3, (Q, Q, S), QUAD), \
13864 X(2, (D, D), DOUBLE), \
13865 X(2, (Q, Q), QUAD), \
13866 X(2, (D, S), DOUBLE), \
13867 X(2, (Q, S), QUAD), \
13868 X(2, (D, R), DOUBLE), \
13869 X(2, (Q, R), QUAD), \
13870 X(2, (D, I), DOUBLE), \
13871 X(2, (Q, I), QUAD), \
13872 X(3, (D, L, D), DOUBLE), \
13873 X(2, (D, Q), MIXED), \
13874 X(2, (Q, D), MIXED), \
13875 X(3, (D, Q, I), MIXED), \
13876 X(3, (Q, D, I), MIXED), \
13877 X(3, (Q, D, D), MIXED), \
13878 X(3, (D, Q, Q), MIXED), \
13879 X(3, (Q, Q, D), MIXED), \
13880 X(3, (Q, D, S), MIXED), \
13881 X(3, (D, Q, S), MIXED), \
13882 X(4, (D, D, D, I), DOUBLE), \
13883 X(4, (Q, Q, Q, I), QUAD), \
13884 X(4, (D, D, S, I), DOUBLE), \
13885 X(4, (Q, Q, S, I), QUAD), \
13886 X(2, (F, F), SINGLE), \
13887 X(3, (F, F, F), SINGLE), \
13888 X(2, (F, I), SINGLE), \
13889 X(2, (F, D), MIXED), \
13890 X(2, (D, F), MIXED), \
13891 X(3, (F, F, I), MIXED), \
13892 X(4, (R, R, F, F), SINGLE), \
13893 X(4, (F, F, R, R), SINGLE), \
13894 X(3, (D, R, R), DOUBLE), \
13895 X(3, (R, R, D), DOUBLE), \
13896 X(2, (S, R), SINGLE), \
13897 X(2, (R, S), SINGLE), \
13898 X(2, (F, R), SINGLE), \
13899 X(2, (R, F), SINGLE), \
13900 /* Half float shape supported so far. */\
13901 X (2, (H, D), MIXED), \
13902 X (2, (D, H), MIXED), \
13903 X (2, (H, F), MIXED), \
13904 X (2, (F, H), MIXED), \
13905 X (2, (H, H), HALF), \
13906 X (2, (H, R), HALF), \
13907 X (2, (R, H), HALF), \
13908 X (2, (H, I), HALF), \
13909 X (3, (H, H, H), HALF), \
13910 X (3, (H, F, I), MIXED), \
13911 X (3, (F, H, I), MIXED), \
13912 X (3, (D, H, H), MIXED), \
13913 X (3, (D, H, S), MIXED)
13915 #define S2(A,B) NS_##A##B
13916 #define S3(A,B,C) NS_##A##B##C
13917 #define S4(A,B,C,D) NS_##A##B##C##D
13919 #define X(N, L, C) S##N L
13932 enum neon_shape_class
13941 #define X(N, L, C) SC_##C
13943 static enum neon_shape_class neon_shape_class
[] =
13962 /* Register widths of above. */
13963 static unsigned neon_shape_el_size
[] =
13975 struct neon_shape_info
13978 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13981 #define S2(A,B) { SE_##A, SE_##B }
13982 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13983 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13985 #define X(N, L, C) { N, S##N L }
13987 static struct neon_shape_info neon_shape_tab
[] =
13997 /* Bit masks used in type checking given instructions.
13998 'N_EQK' means the type must be the same as (or based on in some way) the key
13999 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14000 set, various other bits can be set as well in order to modify the meaning of
14001 the type constraint. */
14003 enum neon_type_mask
14027 N_KEY
= 0x1000000, /* Key element (main type specifier). */
14028 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
14029 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
14030 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
14031 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
14032 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
14033 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14034 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14035 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14036 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
14037 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14039 N_MAX_NONSPECIAL
= N_P64
14042 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14044 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14045 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14046 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14047 #define N_S_32 (N_S8 | N_S16 | N_S32)
14048 #define N_F_16_32 (N_F16 | N_F32)
14049 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14050 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14051 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14052 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14054 /* Pass this as the first type argument to neon_check_type to ignore types
14056 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14058 /* Select a "shape" for the current instruction (describing register types or
14059 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14060 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14061 function of operand parsing, so this function doesn't need to be called.
14062 Shapes should be listed in order of decreasing length. */
14064 static enum neon_shape
14065 neon_select_shape (enum neon_shape shape
, ...)
14068 enum neon_shape first_shape
= shape
;
14070 /* Fix missing optional operands. FIXME: we don't know at this point how
14071 many arguments we should have, so this makes the assumption that we have
14072 > 1. This is true of all current Neon opcodes, I think, but may not be
14073 true in the future. */
14074 if (!inst
.operands
[1].present
)
14075 inst
.operands
[1] = inst
.operands
[0];
14077 va_start (ap
, shape
);
14079 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
14084 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
14086 if (!inst
.operands
[j
].present
)
14092 switch (neon_shape_tab
[shape
].el
[j
])
14094 /* If a .f16, .16, .u16, .s16 type specifier is given over
14095 a VFP single precision register operand, it's essentially
14096 means only half of the register is used.
14098 If the type specifier is given after the mnemonics, the
14099 information is stored in inst.vectype. If the type specifier
14100 is given after register operand, the information is stored
14101 in inst.operands[].vectype.
14103 When there is only one type specifier, and all the register
14104 operands are the same type of hardware register, the type
14105 specifier applies to all register operands.
14107 If no type specifier is given, the shape is inferred from
14108 operand information.
14111 vadd.f16 s0, s1, s2: NS_HHH
14112 vabs.f16 s0, s1: NS_HH
14113 vmov.f16 s0, r1: NS_HR
14114 vmov.f16 r0, s1: NS_RH
14115 vcvt.f16 r0, s1: NS_RH
14116 vcvt.f16.s32 s2, s2, #29: NS_HFI
14117 vcvt.f16.s32 s2, s2: NS_HF
14120 if (!(inst
.operands
[j
].isreg
14121 && inst
.operands
[j
].isvec
14122 && inst
.operands
[j
].issingle
14123 && !inst
.operands
[j
].isquad
14124 && ((inst
.vectype
.elems
== 1
14125 && inst
.vectype
.el
[0].size
== 16)
14126 || (inst
.vectype
.elems
> 1
14127 && inst
.vectype
.el
[j
].size
== 16)
14128 || (inst
.vectype
.elems
== 0
14129 && inst
.operands
[j
].vectype
.type
!= NT_invtype
14130 && inst
.operands
[j
].vectype
.size
== 16))))
14135 if (!(inst
.operands
[j
].isreg
14136 && inst
.operands
[j
].isvec
14137 && inst
.operands
[j
].issingle
14138 && !inst
.operands
[j
].isquad
14139 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
14140 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
14141 || (inst
.vectype
.elems
== 0
14142 && (inst
.operands
[j
].vectype
.size
== 32
14143 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
14148 if (!(inst
.operands
[j
].isreg
14149 && inst
.operands
[j
].isvec
14150 && !inst
.operands
[j
].isquad
14151 && !inst
.operands
[j
].issingle
))
14156 if (!(inst
.operands
[j
].isreg
14157 && !inst
.operands
[j
].isvec
))
14162 if (!(inst
.operands
[j
].isreg
14163 && inst
.operands
[j
].isvec
14164 && inst
.operands
[j
].isquad
14165 && !inst
.operands
[j
].issingle
))
14170 if (!(!inst
.operands
[j
].isreg
14171 && !inst
.operands
[j
].isscalar
))
14176 if (!(!inst
.operands
[j
].isreg
14177 && inst
.operands
[j
].isscalar
))
14187 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
14188 /* We've matched all the entries in the shape table, and we don't
14189 have any left over operands which have not been matched. */
14195 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
14196 first_error (_("invalid instruction shape"));
14201 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14202 means the Q bit should be set). */
14205 neon_quad (enum neon_shape shape
)
14207 return neon_shape_class
[shape
] == SC_QUAD
;
14211 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
14214 /* Allow modification to be made to types which are constrained to be
14215 based on the key element, based on bits set alongside N_EQK. */
14216 if ((typebits
& N_EQK
) != 0)
14218 if ((typebits
& N_HLF
) != 0)
14220 else if ((typebits
& N_DBL
) != 0)
14222 if ((typebits
& N_SGN
) != 0)
14223 *g_type
= NT_signed
;
14224 else if ((typebits
& N_UNS
) != 0)
14225 *g_type
= NT_unsigned
;
14226 else if ((typebits
& N_INT
) != 0)
14227 *g_type
= NT_integer
;
14228 else if ((typebits
& N_FLT
) != 0)
14229 *g_type
= NT_float
;
14230 else if ((typebits
& N_SIZ
) != 0)
14231 *g_type
= NT_untyped
;
14235 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14236 operand type, i.e. the single type specified in a Neon instruction when it
14237 is the only one given. */
14239 static struct neon_type_el
14240 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
14242 struct neon_type_el dest
= *key
;
14244 gas_assert ((thisarg
& N_EQK
) != 0);
14246 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
14251 /* Convert Neon type and size into compact bitmask representation. */
14253 static enum neon_type_mask
14254 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
14261 case 8: return N_8
;
14262 case 16: return N_16
;
14263 case 32: return N_32
;
14264 case 64: return N_64
;
14272 case 8: return N_I8
;
14273 case 16: return N_I16
;
14274 case 32: return N_I32
;
14275 case 64: return N_I64
;
14283 case 16: return N_F16
;
14284 case 32: return N_F32
;
14285 case 64: return N_F64
;
14293 case 8: return N_P8
;
14294 case 16: return N_P16
;
14295 case 64: return N_P64
;
14303 case 8: return N_S8
;
14304 case 16: return N_S16
;
14305 case 32: return N_S32
;
14306 case 64: return N_S64
;
14314 case 8: return N_U8
;
14315 case 16: return N_U16
;
14316 case 32: return N_U32
;
14317 case 64: return N_U64
;
14328 /* Convert compact Neon bitmask type representation to a type and size. Only
14329 handles the case where a single bit is set in the mask. */
14332 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
14333 enum neon_type_mask mask
)
14335 if ((mask
& N_EQK
) != 0)
14338 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
14340 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
14342 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
14344 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
14349 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
14351 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
14352 *type
= NT_unsigned
;
14353 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
14354 *type
= NT_integer
;
14355 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
14356 *type
= NT_untyped
;
14357 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
14359 else if ((mask
& (N_F_ALL
)) != 0)
14367 /* Modify a bitmask of allowed types. This is only needed for type
14371 modify_types_allowed (unsigned allowed
, unsigned mods
)
14374 enum neon_el_type type
;
14380 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
14382 if (el_type_of_type_chk (&type
, &size
,
14383 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
14385 neon_modify_type_size (mods
, &type
, &size
);
14386 destmask
|= type_chk_of_el_type (type
, size
);
14393 /* Check type and return type classification.
14394 The manual states (paraphrase): If one datatype is given, it indicates the
14396 - the second operand, if there is one
14397 - the operand, if there is no second operand
14398 - the result, if there are no operands.
14399 This isn't quite good enough though, so we use a concept of a "key" datatype
14400 which is set on a per-instruction basis, which is the one which matters when
14401 only one data type is written.
14402 Note: this function has side-effects (e.g. filling in missing operands). All
14403 Neon instructions should call it before performing bit encoding. */
14405 static struct neon_type_el
14406 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
14409 unsigned i
, pass
, key_el
= 0;
14410 unsigned types
[NEON_MAX_TYPE_ELS
];
14411 enum neon_el_type k_type
= NT_invtype
;
14412 unsigned k_size
= -1u;
14413 struct neon_type_el badtype
= {NT_invtype
, -1};
14414 unsigned key_allowed
= 0;
14416 /* Optional registers in Neon instructions are always (not) in operand 1.
14417 Fill in the missing operand here, if it was omitted. */
14418 if (els
> 1 && !inst
.operands
[1].present
)
14419 inst
.operands
[1] = inst
.operands
[0];
14421 /* Suck up all the varargs. */
14423 for (i
= 0; i
< els
; i
++)
14425 unsigned thisarg
= va_arg (ap
, unsigned);
14426 if (thisarg
== N_IGNORE_TYPE
)
14431 types
[i
] = thisarg
;
14432 if ((thisarg
& N_KEY
) != 0)
14437 if (inst
.vectype
.elems
> 0)
14438 for (i
= 0; i
< els
; i
++)
14439 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
14441 first_error (_("types specified in both the mnemonic and operands"));
14445 /* Duplicate inst.vectype elements here as necessary.
14446 FIXME: No idea if this is exactly the same as the ARM assembler,
14447 particularly when an insn takes one register and one non-register
14449 if (inst
.vectype
.elems
== 1 && els
> 1)
14452 inst
.vectype
.elems
= els
;
14453 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
14454 for (j
= 0; j
< els
; j
++)
14456 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14459 else if (inst
.vectype
.elems
== 0 && els
> 0)
14462 /* No types were given after the mnemonic, so look for types specified
14463 after each operand. We allow some flexibility here; as long as the
14464 "key" operand has a type, we can infer the others. */
14465 for (j
= 0; j
< els
; j
++)
14466 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
14467 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
14469 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
14471 for (j
= 0; j
< els
; j
++)
14472 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
14473 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14478 first_error (_("operand types can't be inferred"));
14482 else if (inst
.vectype
.elems
!= els
)
14484 first_error (_("type specifier has the wrong number of parts"));
14488 for (pass
= 0; pass
< 2; pass
++)
14490 for (i
= 0; i
< els
; i
++)
14492 unsigned thisarg
= types
[i
];
14493 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14494 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14495 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14496 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14498 /* Decay more-specific signed & unsigned types to sign-insensitive
14499 integer types if sign-specific variants are unavailable. */
14500 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14501 && (types_allowed
& N_SU_ALL
) == 0)
14502 g_type
= NT_integer
;
14504 /* If only untyped args are allowed, decay any more specific types to
14505 them. Some instructions only care about signs for some element
14506 sizes, so handle that properly. */
14507 if (((types_allowed
& N_UNT
) == 0)
14508 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14509 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14510 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14511 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14512 g_type
= NT_untyped
;
14516 if ((thisarg
& N_KEY
) != 0)
14520 key_allowed
= thisarg
& ~N_KEY
;
14522 /* Check architecture constraint on FP16 extension. */
14524 && k_type
== NT_float
14525 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14527 inst
.error
= _(BAD_FP16
);
14534 if ((thisarg
& N_VFP
) != 0)
14536 enum neon_shape_el regshape
;
14537 unsigned regwidth
, match
;
14539 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14542 first_error (_("invalid instruction shape"));
14545 regshape
= neon_shape_tab
[ns
].el
[i
];
14546 regwidth
= neon_shape_el_size
[regshape
];
14548 /* In VFP mode, operands must match register widths. If we
14549 have a key operand, use its width, else use the width of
14550 the current operand. */
14556 /* FP16 will use a single precision register. */
14557 if (regwidth
== 32 && match
== 16)
14559 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14563 inst
.error
= _(BAD_FP16
);
14568 if (regwidth
!= match
)
14570 first_error (_("operand size must match register width"));
14575 if ((thisarg
& N_EQK
) == 0)
14577 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14579 if ((given_type
& types_allowed
) == 0)
14581 first_error (_("bad type in Neon instruction"));
14587 enum neon_el_type mod_k_type
= k_type
;
14588 unsigned mod_k_size
= k_size
;
14589 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14590 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14592 first_error (_("inconsistent types in Neon instruction"));
14600 return inst
.vectype
.el
[key_el
];
14603 /* Neon-style VFP instruction forwarding. */
14605 /* Thumb VFP instructions have 0xE in the condition field. */
14608 do_vfp_cond_or_thumb (void)
14613 inst
.instruction
|= 0xe0000000;
14615 inst
.instruction
|= inst
.cond
<< 28;
14618 /* Look up and encode a simple mnemonic, for use as a helper function for the
14619 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14620 etc. It is assumed that operand parsing has already been done, and that the
14621 operands are in the form expected by the given opcode (this isn't necessarily
14622 the same as the form in which they were parsed, hence some massaging must
14623 take place before this function is called).
14624 Checks current arch version against that in the looked-up opcode. */
14627 do_vfp_nsyn_opcode (const char *opname
)
14629 const struct asm_opcode
*opcode
;
14631 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14636 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14637 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14644 inst
.instruction
= opcode
->tvalue
;
14645 opcode
->tencode ();
14649 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14650 opcode
->aencode ();
14655 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14657 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14659 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14662 do_vfp_nsyn_opcode ("fadds");
14664 do_vfp_nsyn_opcode ("fsubs");
14666 /* ARMv8.2 fp16 instruction. */
14668 do_scalar_fp16_v82_encode ();
14673 do_vfp_nsyn_opcode ("faddd");
14675 do_vfp_nsyn_opcode ("fsubd");
14679 /* Check operand types to see if this is a VFP instruction, and if so call
14683 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14685 enum neon_shape rs
;
14686 struct neon_type_el et
;
14691 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14692 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14696 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14697 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14698 N_F_ALL
| N_KEY
| N_VFP
);
14705 if (et
.type
!= NT_invtype
)
14716 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14718 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14720 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14723 do_vfp_nsyn_opcode ("fmacs");
14725 do_vfp_nsyn_opcode ("fnmacs");
14727 /* ARMv8.2 fp16 instruction. */
14729 do_scalar_fp16_v82_encode ();
14734 do_vfp_nsyn_opcode ("fmacd");
14736 do_vfp_nsyn_opcode ("fnmacd");
14741 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14743 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14745 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14748 do_vfp_nsyn_opcode ("ffmas");
14750 do_vfp_nsyn_opcode ("ffnmas");
14752 /* ARMv8.2 fp16 instruction. */
14754 do_scalar_fp16_v82_encode ();
14759 do_vfp_nsyn_opcode ("ffmad");
14761 do_vfp_nsyn_opcode ("ffnmad");
14766 do_vfp_nsyn_mul (enum neon_shape rs
)
14768 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14770 do_vfp_nsyn_opcode ("fmuls");
14772 /* ARMv8.2 fp16 instruction. */
14774 do_scalar_fp16_v82_encode ();
14777 do_vfp_nsyn_opcode ("fmuld");
14781 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14783 int is_neg
= (inst
.instruction
& 0x80) != 0;
14784 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14786 if (rs
== NS_FF
|| rs
== NS_HH
)
14789 do_vfp_nsyn_opcode ("fnegs");
14791 do_vfp_nsyn_opcode ("fabss");
14793 /* ARMv8.2 fp16 instruction. */
14795 do_scalar_fp16_v82_encode ();
14800 do_vfp_nsyn_opcode ("fnegd");
14802 do_vfp_nsyn_opcode ("fabsd");
14806 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14807 insns belong to Neon, and are handled elsewhere. */
14810 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14812 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14816 do_vfp_nsyn_opcode ("fldmdbs");
14818 do_vfp_nsyn_opcode ("fldmias");
14823 do_vfp_nsyn_opcode ("fstmdbs");
14825 do_vfp_nsyn_opcode ("fstmias");
14830 do_vfp_nsyn_sqrt (void)
14832 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14833 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14835 if (rs
== NS_FF
|| rs
== NS_HH
)
14837 do_vfp_nsyn_opcode ("fsqrts");
14839 /* ARMv8.2 fp16 instruction. */
14841 do_scalar_fp16_v82_encode ();
14844 do_vfp_nsyn_opcode ("fsqrtd");
14848 do_vfp_nsyn_div (void)
14850 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14851 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14852 N_F_ALL
| N_KEY
| N_VFP
);
14854 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14856 do_vfp_nsyn_opcode ("fdivs");
14858 /* ARMv8.2 fp16 instruction. */
14860 do_scalar_fp16_v82_encode ();
14863 do_vfp_nsyn_opcode ("fdivd");
14867 do_vfp_nsyn_nmul (void)
14869 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14870 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14871 N_F_ALL
| N_KEY
| N_VFP
);
14873 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14875 NEON_ENCODE (SINGLE
, inst
);
14876 do_vfp_sp_dyadic ();
14878 /* ARMv8.2 fp16 instruction. */
14880 do_scalar_fp16_v82_encode ();
14884 NEON_ENCODE (DOUBLE
, inst
);
14885 do_vfp_dp_rd_rn_rm ();
14887 do_vfp_cond_or_thumb ();
14892 do_vfp_nsyn_cmp (void)
14894 enum neon_shape rs
;
14895 if (inst
.operands
[1].isreg
)
14897 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14898 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14900 if (rs
== NS_FF
|| rs
== NS_HH
)
14902 NEON_ENCODE (SINGLE
, inst
);
14903 do_vfp_sp_monadic ();
14907 NEON_ENCODE (DOUBLE
, inst
);
14908 do_vfp_dp_rd_rm ();
14913 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14914 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14916 switch (inst
.instruction
& 0x0fffffff)
14919 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14922 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14928 if (rs
== NS_FI
|| rs
== NS_HI
)
14930 NEON_ENCODE (SINGLE
, inst
);
14931 do_vfp_sp_compare_z ();
14935 NEON_ENCODE (DOUBLE
, inst
);
14939 do_vfp_cond_or_thumb ();
14941 /* ARMv8.2 fp16 instruction. */
14942 if (rs
== NS_HI
|| rs
== NS_HH
)
14943 do_scalar_fp16_v82_encode ();
14947 nsyn_insert_sp (void)
14949 inst
.operands
[1] = inst
.operands
[0];
14950 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14951 inst
.operands
[0].reg
= REG_SP
;
14952 inst
.operands
[0].isreg
= 1;
14953 inst
.operands
[0].writeback
= 1;
14954 inst
.operands
[0].present
= 1;
14958 do_vfp_nsyn_push (void)
14962 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14963 _("register list must contain at least 1 and at most 16 "
14966 if (inst
.operands
[1].issingle
)
14967 do_vfp_nsyn_opcode ("fstmdbs");
14969 do_vfp_nsyn_opcode ("fstmdbd");
14973 do_vfp_nsyn_pop (void)
14977 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14978 _("register list must contain at least 1 and at most 16 "
14981 if (inst
.operands
[1].issingle
)
14982 do_vfp_nsyn_opcode ("fldmias");
14984 do_vfp_nsyn_opcode ("fldmiad");
14987 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14988 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14991 neon_dp_fixup (struct arm_it
* insn
)
14993 unsigned int i
= insn
->instruction
;
14998 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15009 insn
->instruction
= i
;
15012 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15016 neon_logbits (unsigned x
)
15018 return ffs (x
) - 4;
15021 #define LOW4(R) ((R) & 0xf)
15022 #define HI1(R) (((R) >> 4) & 1)
15024 /* Encode insns with bit pattern:
15026 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15027 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15029 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15030 different meaning for some instruction. */
15033 neon_three_same (int isquad
, int ubit
, int size
)
15035 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15036 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15037 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15038 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15039 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15040 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15041 inst
.instruction
|= (isquad
!= 0) << 6;
15042 inst
.instruction
|= (ubit
!= 0) << 24;
15044 inst
.instruction
|= neon_logbits (size
) << 20;
15046 neon_dp_fixup (&inst
);
15049 /* Encode instructions of the form:
15051 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15052 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15054 Don't write size if SIZE == -1. */
15057 neon_two_same (int qbit
, int ubit
, int size
)
15059 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15060 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15061 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15062 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15063 inst
.instruction
|= (qbit
!= 0) << 6;
15064 inst
.instruction
|= (ubit
!= 0) << 24;
15067 inst
.instruction
|= neon_logbits (size
) << 18;
15069 neon_dp_fixup (&inst
);
15072 /* Neon instruction encoders, in approximate order of appearance. */
15075 do_neon_dyadic_i_su (void)
15077 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15078 struct neon_type_el et
= neon_check_type (3, rs
,
15079 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
15080 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15084 do_neon_dyadic_i64_su (void)
15086 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15087 struct neon_type_el et
= neon_check_type (3, rs
,
15088 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15089 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15093 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
15096 unsigned size
= et
.size
>> 3;
15097 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15098 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15099 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15100 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15101 inst
.instruction
|= (isquad
!= 0) << 6;
15102 inst
.instruction
|= immbits
<< 16;
15103 inst
.instruction
|= (size
>> 3) << 7;
15104 inst
.instruction
|= (size
& 0x7) << 19;
15106 inst
.instruction
|= (uval
!= 0) << 24;
15108 neon_dp_fixup (&inst
);
15112 do_neon_shl_imm (void)
15114 if (!inst
.operands
[2].isreg
)
15116 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15117 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
15118 int imm
= inst
.operands
[2].imm
;
15120 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15121 _("immediate out of range for shift"));
15122 NEON_ENCODE (IMMED
, inst
);
15123 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15127 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15128 struct neon_type_el et
= neon_check_type (3, rs
,
15129 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15132 /* VSHL/VQSHL 3-register variants have syntax such as:
15134 whereas other 3-register operations encoded by neon_three_same have
15137 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15139 tmp
= inst
.operands
[2].reg
;
15140 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15141 inst
.operands
[1].reg
= tmp
;
15142 NEON_ENCODE (INTEGER
, inst
);
15143 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15148 do_neon_qshl_imm (void)
15150 if (!inst
.operands
[2].isreg
)
15152 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15153 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15154 int imm
= inst
.operands
[2].imm
;
15156 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15157 _("immediate out of range for shift"));
15158 NEON_ENCODE (IMMED
, inst
);
15159 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
15163 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15164 struct neon_type_el et
= neon_check_type (3, rs
,
15165 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15168 /* See note in do_neon_shl_imm. */
15169 tmp
= inst
.operands
[2].reg
;
15170 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15171 inst
.operands
[1].reg
= tmp
;
15172 NEON_ENCODE (INTEGER
, inst
);
15173 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15178 do_neon_rshl (void)
15180 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15181 struct neon_type_el et
= neon_check_type (3, rs
,
15182 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15185 tmp
= inst
.operands
[2].reg
;
15186 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15187 inst
.operands
[1].reg
= tmp
;
15188 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15192 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
15194 /* Handle .I8 pseudo-instructions. */
15197 /* Unfortunately, this will make everything apart from zero out-of-range.
15198 FIXME is this the intended semantics? There doesn't seem much point in
15199 accepting .I8 if so. */
15200 immediate
|= immediate
<< 8;
15206 if (immediate
== (immediate
& 0x000000ff))
15208 *immbits
= immediate
;
15211 else if (immediate
== (immediate
& 0x0000ff00))
15213 *immbits
= immediate
>> 8;
15216 else if (immediate
== (immediate
& 0x00ff0000))
15218 *immbits
= immediate
>> 16;
15221 else if (immediate
== (immediate
& 0xff000000))
15223 *immbits
= immediate
>> 24;
15226 if ((immediate
& 0xffff) != (immediate
>> 16))
15227 goto bad_immediate
;
15228 immediate
&= 0xffff;
15231 if (immediate
== (immediate
& 0x000000ff))
15233 *immbits
= immediate
;
15236 else if (immediate
== (immediate
& 0x0000ff00))
15238 *immbits
= immediate
>> 8;
15243 first_error (_("immediate value out of range"));
15248 do_neon_logic (void)
15250 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
15252 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15253 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15254 /* U bit and size field were set as part of the bitmask. */
15255 NEON_ENCODE (INTEGER
, inst
);
15256 neon_three_same (neon_quad (rs
), 0, -1);
15260 const int three_ops_form
= (inst
.operands
[2].present
15261 && !inst
.operands
[2].isreg
);
15262 const int immoperand
= (three_ops_form
? 2 : 1);
15263 enum neon_shape rs
= (three_ops_form
15264 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
15265 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
15266 struct neon_type_el et
= neon_check_type (2, rs
,
15267 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15268 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
15272 if (et
.type
== NT_invtype
)
15275 if (three_ops_form
)
15276 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15277 _("first and second operands shall be the same register"));
15279 NEON_ENCODE (IMMED
, inst
);
15281 immbits
= inst
.operands
[immoperand
].imm
;
15284 /* .i64 is a pseudo-op, so the immediate must be a repeating
15286 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
15287 inst
.operands
[immoperand
].reg
: 0))
15289 /* Set immbits to an invalid constant. */
15290 immbits
= 0xdeadbeef;
15297 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15301 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15305 /* Pseudo-instruction for VBIC. */
15306 neon_invert_size (&immbits
, 0, et
.size
);
15307 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15311 /* Pseudo-instruction for VORR. */
15312 neon_invert_size (&immbits
, 0, et
.size
);
15313 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15323 inst
.instruction
|= neon_quad (rs
) << 6;
15324 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15325 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15326 inst
.instruction
|= cmode
<< 8;
15327 neon_write_immbits (immbits
);
15329 neon_dp_fixup (&inst
);
15334 do_neon_bitfield (void)
15336 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15337 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15338 neon_three_same (neon_quad (rs
), 0, -1);
15342 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
15345 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15346 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
15348 if (et
.type
== NT_float
)
15350 NEON_ENCODE (FLOAT
, inst
);
15351 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15355 NEON_ENCODE (INTEGER
, inst
);
15356 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
15361 do_neon_dyadic_if_su (void)
15363 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15367 do_neon_dyadic_if_su_d (void)
15369 /* This version only allow D registers, but that constraint is enforced during
15370 operand parsing so we don't need to do anything extra here. */
15371 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15375 do_neon_dyadic_if_i_d (void)
15377 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15378 affected if we specify unsigned args. */
15379 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15382 enum vfp_or_neon_is_neon_bits
15385 NEON_CHECK_ARCH
= 2,
15386 NEON_CHECK_ARCH8
= 4
15389 /* Call this function if an instruction which may have belonged to the VFP or
15390 Neon instruction sets, but turned out to be a Neon instruction (due to the
15391 operand types involved, etc.). We have to check and/or fix-up a couple of
15394 - Make sure the user hasn't attempted to make a Neon instruction
15396 - Alter the value in the condition code field if necessary.
15397 - Make sure that the arch supports Neon instructions.
15399 Which of these operations take place depends on bits from enum
15400 vfp_or_neon_is_neon_bits.
15402 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15403 current instruction's condition is COND_ALWAYS, the condition field is
15404 changed to inst.uncond_value. This is necessary because instructions shared
15405 between VFP and Neon may be conditional for the VFP variants only, and the
15406 unconditional Neon version must have, e.g., 0xF in the condition field. */
15409 vfp_or_neon_is_neon (unsigned check
)
15411 /* Conditions are always legal in Thumb mode (IT blocks). */
15412 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
15414 if (inst
.cond
!= COND_ALWAYS
)
15416 first_error (_(BAD_COND
));
15419 if (inst
.uncond_value
!= -1)
15420 inst
.instruction
|= inst
.uncond_value
<< 28;
15423 if ((check
& NEON_CHECK_ARCH
)
15424 && !mark_feature_used (&fpu_neon_ext_v1
))
15426 first_error (_(BAD_FPU
));
15430 if ((check
& NEON_CHECK_ARCH8
)
15431 && !mark_feature_used (&fpu_neon_ext_armv8
))
15433 first_error (_(BAD_FPU
));
15441 do_neon_addsub_if_i (void)
15443 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
15446 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15449 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15450 affected if we specify unsigned args. */
15451 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
15454 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15456 V<op> A,B (A is operand 0, B is operand 2)
15461 so handle that case specially. */
15464 neon_exchange_operands (void)
15466 if (inst
.operands
[1].present
)
15468 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
15470 /* Swap operands[1] and operands[2]. */
15471 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
15472 inst
.operands
[1] = inst
.operands
[2];
15473 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
15478 inst
.operands
[1] = inst
.operands
[2];
15479 inst
.operands
[2] = inst
.operands
[0];
15484 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
15486 if (inst
.operands
[2].isreg
)
15489 neon_exchange_operands ();
15490 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
15494 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15495 struct neon_type_el et
= neon_check_type (2, rs
,
15496 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
15498 NEON_ENCODE (IMMED
, inst
);
15499 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15500 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15501 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15502 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15503 inst
.instruction
|= neon_quad (rs
) << 6;
15504 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15505 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15507 neon_dp_fixup (&inst
);
15514 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
15518 do_neon_cmp_inv (void)
15520 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
15526 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
15529 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15530 scalars, which are encoded in 5 bits, M : Rm.
15531 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15532 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15535 Dot Product instructions are similar to multiply instructions except elsize
15536 should always be 32.
15538 This function translates SCALAR, which is GAS's internal encoding of indexed
15539 scalar register, to raw encoding. There is also register and index range
15540 check based on ELSIZE. */
15543 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
15545 unsigned regno
= NEON_SCALAR_REG (scalar
);
15546 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15551 if (regno
> 7 || elno
> 3)
15553 return regno
| (elno
<< 3);
15556 if (regno
> 15 || elno
> 1)
15558 return regno
| (elno
<< 4);
15562 first_error (_("scalar out of range for multiply instruction"));
15568 /* Encode multiply / multiply-accumulate scalar instructions. */
15571 neon_mul_mac (struct neon_type_el et
, int ubit
)
15575 /* Give a more helpful error message if we have an invalid type. */
15576 if (et
.type
== NT_invtype
)
15579 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15580 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15581 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15582 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15583 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15584 inst
.instruction
|= LOW4 (scalar
);
15585 inst
.instruction
|= HI1 (scalar
) << 5;
15586 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15587 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15588 inst
.instruction
|= (ubit
!= 0) << 24;
15590 neon_dp_fixup (&inst
);
15594 do_neon_mac_maybe_scalar (void)
15596 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15599 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15602 if (inst
.operands
[2].isscalar
)
15604 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15605 struct neon_type_el et
= neon_check_type (3, rs
,
15606 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15607 NEON_ENCODE (SCALAR
, inst
);
15608 neon_mul_mac (et
, neon_quad (rs
));
15612 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15613 affected if we specify unsigned args. */
15614 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15619 do_neon_fmac (void)
15621 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15624 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15627 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15633 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15634 struct neon_type_el et
= neon_check_type (3, rs
,
15635 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15636 neon_three_same (neon_quad (rs
), 0, et
.size
);
15639 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15640 same types as the MAC equivalents. The polynomial type for this instruction
15641 is encoded the same as the integer type. */
15646 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15649 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15652 if (inst
.operands
[2].isscalar
)
15653 do_neon_mac_maybe_scalar ();
15655 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15659 do_neon_qdmulh (void)
15661 if (inst
.operands
[2].isscalar
)
15663 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15664 struct neon_type_el et
= neon_check_type (3, rs
,
15665 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15666 NEON_ENCODE (SCALAR
, inst
);
15667 neon_mul_mac (et
, neon_quad (rs
));
15671 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15672 struct neon_type_el et
= neon_check_type (3, rs
,
15673 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15674 NEON_ENCODE (INTEGER
, inst
);
15675 /* The U bit (rounding) comes from bit mask. */
15676 neon_three_same (neon_quad (rs
), 0, et
.size
);
15681 do_neon_qrdmlah (void)
15683 /* Check we're on the correct architecture. */
15684 if (!mark_feature_used (&fpu_neon_ext_armv8
))
15686 _("instruction form not available on this architecture.");
15687 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
15689 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15690 record_feature_use (&fpu_neon_ext_v8_1
);
15693 if (inst
.operands
[2].isscalar
)
15695 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15696 struct neon_type_el et
= neon_check_type (3, rs
,
15697 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15698 NEON_ENCODE (SCALAR
, inst
);
15699 neon_mul_mac (et
, neon_quad (rs
));
15703 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15704 struct neon_type_el et
= neon_check_type (3, rs
,
15705 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15706 NEON_ENCODE (INTEGER
, inst
);
15707 /* The U bit (rounding) comes from bit mask. */
15708 neon_three_same (neon_quad (rs
), 0, et
.size
);
15713 do_neon_fcmp_absolute (void)
15715 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15716 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15717 N_F_16_32
| N_KEY
);
15718 /* Size field comes from bit mask. */
15719 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
15723 do_neon_fcmp_absolute_inv (void)
15725 neon_exchange_operands ();
15726 do_neon_fcmp_absolute ();
15730 do_neon_step (void)
15732 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15733 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15734 N_F_16_32
| N_KEY
);
15735 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15739 do_neon_abs_neg (void)
15741 enum neon_shape rs
;
15742 struct neon_type_el et
;
15744 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15747 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15750 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15751 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
15753 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15754 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15755 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15756 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15757 inst
.instruction
|= neon_quad (rs
) << 6;
15758 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15759 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15761 neon_dp_fixup (&inst
);
15767 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15768 struct neon_type_el et
= neon_check_type (2, rs
,
15769 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15770 int imm
= inst
.operands
[2].imm
;
15771 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15772 _("immediate out of range for insert"));
15773 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15779 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15780 struct neon_type_el et
= neon_check_type (2, rs
,
15781 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15782 int imm
= inst
.operands
[2].imm
;
15783 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15784 _("immediate out of range for insert"));
15785 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15789 do_neon_qshlu_imm (void)
15791 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15792 struct neon_type_el et
= neon_check_type (2, rs
,
15793 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15794 int imm
= inst
.operands
[2].imm
;
15795 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15796 _("immediate out of range for shift"));
15797 /* Only encodes the 'U present' variant of the instruction.
15798 In this case, signed types have OP (bit 8) set to 0.
15799 Unsigned types have OP set to 1. */
15800 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15801 /* The rest of the bits are the same as other immediate shifts. */
15802 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15806 do_neon_qmovn (void)
15808 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15809 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15810 /* Saturating move where operands can be signed or unsigned, and the
15811 destination has the same signedness. */
15812 NEON_ENCODE (INTEGER
, inst
);
15813 if (et
.type
== NT_unsigned
)
15814 inst
.instruction
|= 0xc0;
15816 inst
.instruction
|= 0x80;
15817 neon_two_same (0, 1, et
.size
/ 2);
15821 do_neon_qmovun (void)
15823 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15824 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15825 /* Saturating move with unsigned results. Operands must be signed. */
15826 NEON_ENCODE (INTEGER
, inst
);
15827 neon_two_same (0, 1, et
.size
/ 2);
15831 do_neon_rshift_sat_narrow (void)
15833 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15834 or unsigned. If operands are unsigned, results must also be unsigned. */
15835 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15836 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15837 int imm
= inst
.operands
[2].imm
;
15838 /* This gets the bounds check, size encoding and immediate bits calculation
15842 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15843 VQMOVN.I<size> <Dd>, <Qm>. */
15846 inst
.operands
[2].present
= 0;
15847 inst
.instruction
= N_MNEM_vqmovn
;
15852 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15853 _("immediate out of range"));
15854 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15858 do_neon_rshift_sat_narrow_u (void)
15860 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15861 or unsigned. If operands are unsigned, results must also be unsigned. */
15862 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15863 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15864 int imm
= inst
.operands
[2].imm
;
15865 /* This gets the bounds check, size encoding and immediate bits calculation
15869 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15870 VQMOVUN.I<size> <Dd>, <Qm>. */
15873 inst
.operands
[2].present
= 0;
15874 inst
.instruction
= N_MNEM_vqmovun
;
15879 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15880 _("immediate out of range"));
15881 /* FIXME: The manual is kind of unclear about what value U should have in
15882 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15884 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15888 do_neon_movn (void)
15890 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15891 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15892 NEON_ENCODE (INTEGER
, inst
);
15893 neon_two_same (0, 1, et
.size
/ 2);
15897 do_neon_rshift_narrow (void)
15899 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15900 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15901 int imm
= inst
.operands
[2].imm
;
15902 /* This gets the bounds check, size encoding and immediate bits calculation
15906 /* If immediate is zero then we are a pseudo-instruction for
15907 VMOVN.I<size> <Dd>, <Qm> */
15910 inst
.operands
[2].present
= 0;
15911 inst
.instruction
= N_MNEM_vmovn
;
15916 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15917 _("immediate out of range for narrowing operation"));
15918 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15922 do_neon_shll (void)
15924 /* FIXME: Type checking when lengthening. */
15925 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15926 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15927 unsigned imm
= inst
.operands
[2].imm
;
15929 if (imm
== et
.size
)
15931 /* Maximum shift variant. */
15932 NEON_ENCODE (INTEGER
, inst
);
15933 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15934 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15935 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15936 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15937 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15939 neon_dp_fixup (&inst
);
15943 /* A more-specific type check for non-max versions. */
15944 et
= neon_check_type (2, NS_QDI
,
15945 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15946 NEON_ENCODE (IMMED
, inst
);
15947 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15951 /* Check the various types for the VCVT instruction, and return which version
15952 the current instruction is. */
15954 #define CVT_FLAVOUR_VAR \
15955 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15956 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15957 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15958 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15959 /* Half-precision conversions. */ \
15960 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15961 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15962 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15963 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15964 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15965 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15966 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15967 Compared with single/double precision variants, only the co-processor \
15968 field is different, so the encoding flow is reused here. */ \
15969 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15970 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15971 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15972 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15973 /* VFP instructions. */ \
15974 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15975 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15976 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15977 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15978 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15979 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15980 /* VFP instructions with bitshift. */ \
15981 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15982 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15983 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15984 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15985 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15986 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15987 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15988 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15990 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15991 neon_cvt_flavour_##C,
15993 /* The different types of conversions we can do. */
15994 enum neon_cvt_flavour
15997 neon_cvt_flavour_invalid
,
15998 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
16003 static enum neon_cvt_flavour
16004 get_neon_cvt_flavour (enum neon_shape rs
)
16006 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16007 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16008 if (et.type != NT_invtype) \
16010 inst.error = NULL; \
16011 return (neon_cvt_flavour_##C); \
16014 struct neon_type_el et
;
16015 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
16016 || rs
== NS_FF
) ? N_VFP
: 0;
16017 /* The instruction versions which take an immediate take one register
16018 argument, which is extended to the width of the full register. Thus the
16019 "source" and "destination" registers must have the same width. Hack that
16020 here by making the size equal to the key (wider, in this case) operand. */
16021 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
16025 return neon_cvt_flavour_invalid
;
16040 /* Neon-syntax VFP conversions. */
16043 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
16045 const char *opname
= 0;
16047 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
16048 || rs
== NS_FHI
|| rs
== NS_HFI
)
16050 /* Conversions with immediate bitshift. */
16051 const char *enc
[] =
16053 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16059 if (flavour
< (int) ARRAY_SIZE (enc
))
16061 opname
= enc
[flavour
];
16062 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
16063 _("operands 0 and 1 must be the same register"));
16064 inst
.operands
[1] = inst
.operands
[2];
16065 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
16070 /* Conversions without bitshift. */
16071 const char *enc
[] =
16073 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16079 if (flavour
< (int) ARRAY_SIZE (enc
))
16080 opname
= enc
[flavour
];
16084 do_vfp_nsyn_opcode (opname
);
16086 /* ARMv8.2 fp16 VCVT instruction. */
16087 if (flavour
== neon_cvt_flavour_s32_f16
16088 || flavour
== neon_cvt_flavour_u32_f16
16089 || flavour
== neon_cvt_flavour_f16_u32
16090 || flavour
== neon_cvt_flavour_f16_s32
)
16091 do_scalar_fp16_v82_encode ();
16095 do_vfp_nsyn_cvtz (void)
16097 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
16098 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16099 const char *enc
[] =
16101 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16107 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
16108 do_vfp_nsyn_opcode (enc
[flavour
]);
16112 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
16113 enum neon_cvt_mode mode
)
16118 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16119 D register operands. */
16120 if (flavour
== neon_cvt_flavour_s32_f64
16121 || flavour
== neon_cvt_flavour_u32_f64
)
16122 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16125 if (flavour
== neon_cvt_flavour_s32_f16
16126 || flavour
== neon_cvt_flavour_u32_f16
)
16127 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
16130 set_it_insn_type (OUTSIDE_IT_INSN
);
16134 case neon_cvt_flavour_s32_f64
:
16138 case neon_cvt_flavour_s32_f32
:
16142 case neon_cvt_flavour_s32_f16
:
16146 case neon_cvt_flavour_u32_f64
:
16150 case neon_cvt_flavour_u32_f32
:
16154 case neon_cvt_flavour_u32_f16
:
16159 first_error (_("invalid instruction shape"));
16165 case neon_cvt_mode_a
: rm
= 0; break;
16166 case neon_cvt_mode_n
: rm
= 1; break;
16167 case neon_cvt_mode_p
: rm
= 2; break;
16168 case neon_cvt_mode_m
: rm
= 3; break;
16169 default: first_error (_("invalid rounding mode")); return;
16172 NEON_ENCODE (FPV8
, inst
);
16173 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
16174 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
16175 inst
.instruction
|= sz
<< 8;
16177 /* ARMv8.2 fp16 VCVT instruction. */
16178 if (flavour
== neon_cvt_flavour_s32_f16
16179 ||flavour
== neon_cvt_flavour_u32_f16
)
16180 do_scalar_fp16_v82_encode ();
16181 inst
.instruction
|= op
<< 7;
16182 inst
.instruction
|= rm
<< 16;
16183 inst
.instruction
|= 0xf0000000;
16184 inst
.is_neon
= TRUE
;
16188 do_neon_cvt_1 (enum neon_cvt_mode mode
)
16190 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
16191 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
16192 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
16194 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16196 if (flavour
== neon_cvt_flavour_invalid
)
16199 /* PR11109: Handle round-to-zero for VCVT conversions. */
16200 if (mode
== neon_cvt_mode_z
16201 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
16202 && (flavour
== neon_cvt_flavour_s16_f16
16203 || flavour
== neon_cvt_flavour_u16_f16
16204 || flavour
== neon_cvt_flavour_s32_f32
16205 || flavour
== neon_cvt_flavour_u32_f32
16206 || flavour
== neon_cvt_flavour_s32_f64
16207 || flavour
== neon_cvt_flavour_u32_f64
)
16208 && (rs
== NS_FD
|| rs
== NS_FF
))
16210 do_vfp_nsyn_cvtz ();
16214 /* ARMv8.2 fp16 VCVT conversions. */
16215 if (mode
== neon_cvt_mode_z
16216 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
16217 && (flavour
== neon_cvt_flavour_s32_f16
16218 || flavour
== neon_cvt_flavour_u32_f16
)
16221 do_vfp_nsyn_cvtz ();
16222 do_scalar_fp16_v82_encode ();
16226 /* VFP rather than Neon conversions. */
16227 if (flavour
>= neon_cvt_flavour_first_fp
)
16229 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16230 do_vfp_nsyn_cvt (rs
, flavour
);
16232 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16243 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16244 0x0000100, 0x1000100, 0x0, 0x1000000};
16246 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16249 /* Fixed-point conversion with #0 immediate is encoded as an
16250 integer conversion. */
16251 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
16253 NEON_ENCODE (IMMED
, inst
);
16254 if (flavour
!= neon_cvt_flavour_invalid
)
16255 inst
.instruction
|= enctab
[flavour
];
16256 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16257 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16258 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16259 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16260 inst
.instruction
|= neon_quad (rs
) << 6;
16261 inst
.instruction
|= 1 << 21;
16262 if (flavour
< neon_cvt_flavour_s16_f16
)
16264 inst
.instruction
|= 1 << 21;
16265 immbits
= 32 - inst
.operands
[2].imm
;
16266 inst
.instruction
|= immbits
<< 16;
16270 inst
.instruction
|= 3 << 20;
16271 immbits
= 16 - inst
.operands
[2].imm
;
16272 inst
.instruction
|= immbits
<< 16;
16273 inst
.instruction
&= ~(1 << 9);
16276 neon_dp_fixup (&inst
);
16282 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
16284 NEON_ENCODE (FLOAT
, inst
);
16285 set_it_insn_type (OUTSIDE_IT_INSN
);
16287 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16290 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16291 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16292 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16293 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16294 inst
.instruction
|= neon_quad (rs
) << 6;
16295 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
16296 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
16297 inst
.instruction
|= mode
<< 8;
16298 if (flavour
== neon_cvt_flavour_u16_f16
16299 || flavour
== neon_cvt_flavour_s16_f16
)
16300 /* Mask off the original size bits and reencode them. */
16301 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
16304 inst
.instruction
|= 0xfc000000;
16306 inst
.instruction
|= 0xf0000000;
16312 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
16313 0x100, 0x180, 0x0, 0x080};
16315 NEON_ENCODE (INTEGER
, inst
);
16317 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16320 if (flavour
!= neon_cvt_flavour_invalid
)
16321 inst
.instruction
|= enctab
[flavour
];
16323 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16324 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16325 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16326 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16327 inst
.instruction
|= neon_quad (rs
) << 6;
16328 if (flavour
>= neon_cvt_flavour_s16_f16
16329 && flavour
<= neon_cvt_flavour_f16_u16
)
16330 /* Half precision. */
16331 inst
.instruction
|= 1 << 18;
16333 inst
.instruction
|= 2 << 18;
16335 neon_dp_fixup (&inst
);
16340 /* Half-precision conversions for Advanced SIMD -- neon. */
16343 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16347 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
16349 as_bad (_("operand size must match register width"));
16354 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
16356 as_bad (_("operand size must match register width"));
16361 inst
.instruction
= 0x3b60600;
16363 inst
.instruction
= 0x3b60700;
16365 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16366 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16367 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16368 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16369 neon_dp_fixup (&inst
);
16373 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16374 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16375 do_vfp_nsyn_cvt (rs
, flavour
);
16377 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16382 do_neon_cvtr (void)
16384 do_neon_cvt_1 (neon_cvt_mode_x
);
16390 do_neon_cvt_1 (neon_cvt_mode_z
);
16394 do_neon_cvta (void)
16396 do_neon_cvt_1 (neon_cvt_mode_a
);
16400 do_neon_cvtn (void)
16402 do_neon_cvt_1 (neon_cvt_mode_n
);
16406 do_neon_cvtp (void)
16408 do_neon_cvt_1 (neon_cvt_mode_p
);
16412 do_neon_cvtm (void)
16414 do_neon_cvt_1 (neon_cvt_mode_m
);
16418 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
16421 mark_feature_used (&fpu_vfp_ext_armv8
);
16423 encode_arm_vfp_reg (inst
.operands
[0].reg
,
16424 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
16425 encode_arm_vfp_reg (inst
.operands
[1].reg
,
16426 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
16427 inst
.instruction
|= to
? 0x10000 : 0;
16428 inst
.instruction
|= t
? 0x80 : 0;
16429 inst
.instruction
|= is_double
? 0x100 : 0;
16430 do_vfp_cond_or_thumb ();
16434 do_neon_cvttb_1 (bfd_boolean t
)
16436 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
16437 NS_DF
, NS_DH
, NS_NULL
);
16441 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
16444 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
16446 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
16449 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
16451 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
16453 /* The VCVTB and VCVTT instructions with D-register operands
16454 don't work for SP only targets. */
16455 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16459 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
16461 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
16463 /* The VCVTB and VCVTT instructions with D-register operands
16464 don't work for SP only targets. */
16465 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16469 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
16476 do_neon_cvtb (void)
16478 do_neon_cvttb_1 (FALSE
);
16483 do_neon_cvtt (void)
16485 do_neon_cvttb_1 (TRUE
);
16489 neon_move_immediate (void)
16491 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
16492 struct neon_type_el et
= neon_check_type (2, rs
,
16493 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
16494 unsigned immlo
, immhi
= 0, immbits
;
16495 int op
, cmode
, float_p
;
16497 constraint (et
.type
== NT_invtype
,
16498 _("operand size must be specified for immediate VMOV"));
16500 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16501 op
= (inst
.instruction
& (1 << 5)) != 0;
16503 immlo
= inst
.operands
[1].imm
;
16504 if (inst
.operands
[1].regisimm
)
16505 immhi
= inst
.operands
[1].reg
;
16507 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
16508 _("immediate has bits set outside the operand size"));
16510 float_p
= inst
.operands
[1].immisfloat
;
16512 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
16513 et
.size
, et
.type
)) == FAIL
)
16515 /* Invert relevant bits only. */
16516 neon_invert_size (&immlo
, &immhi
, et
.size
);
16517 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16518 with one or the other; those cases are caught by
16519 neon_cmode_for_move_imm. */
16521 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
16522 &op
, et
.size
, et
.type
)) == FAIL
)
16524 first_error (_("immediate out of range"));
16529 inst
.instruction
&= ~(1 << 5);
16530 inst
.instruction
|= op
<< 5;
16532 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16533 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16534 inst
.instruction
|= neon_quad (rs
) << 6;
16535 inst
.instruction
|= cmode
<< 8;
16537 neon_write_immbits (immbits
);
16543 if (inst
.operands
[1].isreg
)
16545 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16547 NEON_ENCODE (INTEGER
, inst
);
16548 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16549 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16550 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16551 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16552 inst
.instruction
|= neon_quad (rs
) << 6;
16556 NEON_ENCODE (IMMED
, inst
);
16557 neon_move_immediate ();
16560 neon_dp_fixup (&inst
);
16563 /* Encode instructions of form:
16565 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16566 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16569 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16571 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16572 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16573 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16574 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16575 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16576 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16577 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16578 inst
.instruction
|= neon_logbits (size
) << 20;
16580 neon_dp_fixup (&inst
);
16584 do_neon_dyadic_long (void)
16586 /* FIXME: Type checking for lengthening op. */
16587 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16588 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16589 neon_mixed_length (et
, et
.size
);
16593 do_neon_abal (void)
16595 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16596 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16597 neon_mixed_length (et
, et
.size
);
16601 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16603 if (inst
.operands
[2].isscalar
)
16605 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16606 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16607 NEON_ENCODE (SCALAR
, inst
);
16608 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16612 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16613 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16614 NEON_ENCODE (INTEGER
, inst
);
16615 neon_mixed_length (et
, et
.size
);
16620 do_neon_mac_maybe_scalar_long (void)
16622 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16625 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16626 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16629 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
16631 unsigned regno
= NEON_SCALAR_REG (scalar
);
16632 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
16636 if (regno
> 7 || elno
> 3)
16639 return ((regno
& 0x7)
16640 | ((elno
& 0x1) << 3)
16641 | (((elno
>> 1) & 0x1) << 5));
16645 if (regno
> 15 || elno
> 1)
16648 return (((regno
& 0x1) << 5)
16649 | ((regno
>> 1) & 0x7)
16650 | ((elno
& 0x1) << 3));
16654 first_error (_("scalar out of range for multiply instruction"));
16659 do_neon_fmac_maybe_scalar_long (int subtype
)
16661 enum neon_shape rs
;
16663 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16664 field (bits[21:20]) has different meaning. For scalar index variant, it's
16665 used to differentiate add and subtract, otherwise it's with fixed value
16669 if (inst
.cond
!= COND_ALWAYS
)
16670 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16671 "behaviour is UNPREDICTABLE"));
16673 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
16676 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
16679 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16680 be a scalar index register. */
16681 if (inst
.operands
[2].isscalar
)
16683 high8
= 0xfe000000;
16686 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
16690 high8
= 0xfc000000;
16693 inst
.instruction
|= (0x1 << 23);
16694 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
16697 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
);
16699 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16700 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16701 so we simply pass -1 as size. */
16702 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
16703 neon_three_same (quad_p
, 0, size
);
16705 /* Undo neon_dp_fixup. Redo the high eight bits. */
16706 inst
.instruction
&= 0x00ffffff;
16707 inst
.instruction
|= high8
;
16709 #define LOW1(R) ((R) & 0x1)
16710 #define HI4(R) (((R) >> 1) & 0xf)
16711 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16712 whether the instruction is in Q form and whether Vm is a scalar indexed
16714 if (inst
.operands
[2].isscalar
)
16717 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
16718 inst
.instruction
&= 0xffffffd0;
16719 inst
.instruction
|= rm
;
16723 /* Redo Rn as well. */
16724 inst
.instruction
&= 0xfff0ff7f;
16725 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
16726 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
16731 /* Redo Rn and Rm. */
16732 inst
.instruction
&= 0xfff0ff50;
16733 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
16734 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
16735 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
16736 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
16741 do_neon_vfmal (void)
16743 return do_neon_fmac_maybe_scalar_long (0);
16747 do_neon_vfmsl (void)
16749 return do_neon_fmac_maybe_scalar_long (1);
16753 do_neon_dyadic_wide (void)
16755 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
16756 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16757 neon_mixed_length (et
, et
.size
);
16761 do_neon_dyadic_narrow (void)
16763 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16764 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
16765 /* Operand sign is unimportant, and the U bit is part of the opcode,
16766 so force the operand type to integer. */
16767 et
.type
= NT_integer
;
16768 neon_mixed_length (et
, et
.size
/ 2);
16772 do_neon_mul_sat_scalar_long (void)
16774 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
16778 do_neon_vmull (void)
16780 if (inst
.operands
[2].isscalar
)
16781 do_neon_mac_maybe_scalar_long ();
16784 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16785 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
16787 if (et
.type
== NT_poly
)
16788 NEON_ENCODE (POLY
, inst
);
16790 NEON_ENCODE (INTEGER
, inst
);
16792 /* For polynomial encoding the U bit must be zero, and the size must
16793 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16794 obviously, as 0b10). */
16797 /* Check we're on the correct architecture. */
16798 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
16800 _("Instruction form not available on this architecture.");
16805 neon_mixed_length (et
, et
.size
);
16812 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
16813 struct neon_type_el et
= neon_check_type (3, rs
,
16814 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16815 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
16817 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
16818 _("shift out of range"));
16819 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16820 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16821 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16822 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16823 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16824 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16825 inst
.instruction
|= neon_quad (rs
) << 6;
16826 inst
.instruction
|= imm
<< 8;
16828 neon_dp_fixup (&inst
);
16834 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16835 struct neon_type_el et
= neon_check_type (2, rs
,
16836 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16837 unsigned op
= (inst
.instruction
>> 7) & 3;
16838 /* N (width of reversed regions) is encoded as part of the bitmask. We
16839 extract it here to check the elements to be reversed are smaller.
16840 Otherwise we'd get a reserved instruction. */
16841 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
16842 gas_assert (elsize
!= 0);
16843 constraint (et
.size
>= elsize
,
16844 _("elements must be smaller than reversal region"));
16845 neon_two_same (neon_quad (rs
), 1, et
.size
);
16851 if (inst
.operands
[1].isscalar
)
16853 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16854 struct neon_type_el et
= neon_check_type (2, rs
,
16855 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16856 unsigned sizebits
= et
.size
>> 3;
16857 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16858 int logsize
= neon_logbits (et
.size
);
16859 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16861 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16864 NEON_ENCODE (SCALAR
, inst
);
16865 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16866 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16867 inst
.instruction
|= LOW4 (dm
);
16868 inst
.instruction
|= HI1 (dm
) << 5;
16869 inst
.instruction
|= neon_quad (rs
) << 6;
16870 inst
.instruction
|= x
<< 17;
16871 inst
.instruction
|= sizebits
<< 16;
16873 neon_dp_fixup (&inst
);
16877 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16878 struct neon_type_el et
= neon_check_type (2, rs
,
16879 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16880 /* Duplicate ARM register to lanes of vector. */
16881 NEON_ENCODE (ARMREG
, inst
);
16884 case 8: inst
.instruction
|= 0x400000; break;
16885 case 16: inst
.instruction
|= 0x000020; break;
16886 case 32: inst
.instruction
|= 0x000000; break;
16889 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16890 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16891 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16892 inst
.instruction
|= neon_quad (rs
) << 21;
16893 /* The encoding for this instruction is identical for the ARM and Thumb
16894 variants, except for the condition field. */
16895 do_vfp_cond_or_thumb ();
16899 /* VMOV has particularly many variations. It can be one of:
16900 0. VMOV<c><q> <Qd>, <Qm>
16901 1. VMOV<c><q> <Dd>, <Dm>
16902 (Register operations, which are VORR with Rm = Rn.)
16903 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16904 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16906 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16907 (ARM register to scalar.)
16908 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16909 (Two ARM registers to vector.)
16910 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16911 (Scalar to ARM register.)
16912 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16913 (Vector to two ARM registers.)
16914 8. VMOV.F32 <Sd>, <Sm>
16915 9. VMOV.F64 <Dd>, <Dm>
16916 (VFP register moves.)
16917 10. VMOV.F32 <Sd>, #imm
16918 11. VMOV.F64 <Dd>, #imm
16919 (VFP float immediate load.)
16920 12. VMOV <Rd>, <Sm>
16921 (VFP single to ARM reg.)
16922 13. VMOV <Sd>, <Rm>
16923 (ARM reg to VFP single.)
16924 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16925 (Two ARM regs to two VFP singles.)
16926 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16927 (Two VFP singles to two ARM regs.)
16929 These cases can be disambiguated using neon_select_shape, except cases 1/9
16930 and 3/11 which depend on the operand type too.
16932 All the encoded bits are hardcoded by this function.
16934 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16935 Cases 5, 7 may be used with VFPv2 and above.
16937 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16938 can specify a type where it doesn't make sense to, and is ignored). */
16943 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16944 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16945 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16946 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16947 struct neon_type_el et
;
16948 const char *ldconst
= 0;
16952 case NS_DD
: /* case 1/9. */
16953 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16954 /* It is not an error here if no type is given. */
16956 if (et
.type
== NT_float
&& et
.size
== 64)
16958 do_vfp_nsyn_opcode ("fcpyd");
16961 /* fall through. */
16963 case NS_QQ
: /* case 0/1. */
16965 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16967 /* The architecture manual I have doesn't explicitly state which
16968 value the U bit should have for register->register moves, but
16969 the equivalent VORR instruction has U = 0, so do that. */
16970 inst
.instruction
= 0x0200110;
16971 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16972 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16973 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16974 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16975 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16976 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16977 inst
.instruction
|= neon_quad (rs
) << 6;
16979 neon_dp_fixup (&inst
);
16983 case NS_DI
: /* case 3/11. */
16984 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16986 if (et
.type
== NT_float
&& et
.size
== 64)
16988 /* case 11 (fconstd). */
16989 ldconst
= "fconstd";
16990 goto encode_fconstd
;
16992 /* fall through. */
16994 case NS_QI
: /* case 2/3. */
16995 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16997 inst
.instruction
= 0x0800010;
16998 neon_move_immediate ();
16999 neon_dp_fixup (&inst
);
17002 case NS_SR
: /* case 4. */
17004 unsigned bcdebits
= 0;
17006 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
17007 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
17009 /* .<size> is optional here, defaulting to .32. */
17010 if (inst
.vectype
.elems
== 0
17011 && inst
.operands
[0].vectype
.type
== NT_invtype
17012 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17014 inst
.vectype
.el
[0].type
= NT_untyped
;
17015 inst
.vectype
.el
[0].size
= 32;
17016 inst
.vectype
.elems
= 1;
17019 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17020 logsize
= neon_logbits (et
.size
);
17022 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17024 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17025 && et
.size
!= 32, _(BAD_FPU
));
17026 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17027 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17031 case 8: bcdebits
= 0x8; break;
17032 case 16: bcdebits
= 0x1; break;
17033 case 32: bcdebits
= 0x0; break;
17037 bcdebits
|= x
<< logsize
;
17039 inst
.instruction
= 0xe000b10;
17040 do_vfp_cond_or_thumb ();
17041 inst
.instruction
|= LOW4 (dn
) << 16;
17042 inst
.instruction
|= HI1 (dn
) << 7;
17043 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17044 inst
.instruction
|= (bcdebits
& 3) << 5;
17045 inst
.instruction
|= (bcdebits
>> 2) << 21;
17049 case NS_DRR
: /* case 5 (fmdrr). */
17050 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17053 inst
.instruction
= 0xc400b10;
17054 do_vfp_cond_or_thumb ();
17055 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
17056 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
17057 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17058 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
17061 case NS_RS
: /* case 6. */
17064 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17065 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
17066 unsigned abcdebits
= 0;
17068 /* .<dt> is optional here, defaulting to .32. */
17069 if (inst
.vectype
.elems
== 0
17070 && inst
.operands
[0].vectype
.type
== NT_invtype
17071 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17073 inst
.vectype
.el
[0].type
= NT_untyped
;
17074 inst
.vectype
.el
[0].size
= 32;
17075 inst
.vectype
.elems
= 1;
17078 et
= neon_check_type (2, NS_NULL
,
17079 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
17080 logsize
= neon_logbits (et
.size
);
17082 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17084 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17085 && et
.size
!= 32, _(BAD_FPU
));
17086 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17087 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17091 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
17092 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
17093 case 32: abcdebits
= 0x00; break;
17097 abcdebits
|= x
<< logsize
;
17098 inst
.instruction
= 0xe100b10;
17099 do_vfp_cond_or_thumb ();
17100 inst
.instruction
|= LOW4 (dn
) << 16;
17101 inst
.instruction
|= HI1 (dn
) << 7;
17102 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17103 inst
.instruction
|= (abcdebits
& 3) << 5;
17104 inst
.instruction
|= (abcdebits
>> 2) << 21;
17108 case NS_RRD
: /* case 7 (fmrrd). */
17109 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17112 inst
.instruction
= 0xc500b10;
17113 do_vfp_cond_or_thumb ();
17114 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17115 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17116 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17117 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17120 case NS_FF
: /* case 8 (fcpys). */
17121 do_vfp_nsyn_opcode ("fcpys");
17125 case NS_FI
: /* case 10 (fconsts). */
17126 ldconst
= "fconsts";
17128 if (!inst
.operands
[1].immisfloat
)
17131 /* Immediate has to fit in 8 bits so float is enough. */
17132 float imm
= (float) inst
.operands
[1].imm
;
17133 memcpy (&new_imm
, &imm
, sizeof (float));
17134 /* But the assembly may have been written to provide an integer
17135 bit pattern that equates to a float, so check that the
17136 conversion has worked. */
17137 if (is_quarter_float (new_imm
))
17139 if (is_quarter_float (inst
.operands
[1].imm
))
17140 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17142 inst
.operands
[1].imm
= new_imm
;
17143 inst
.operands
[1].immisfloat
= 1;
17147 if (is_quarter_float (inst
.operands
[1].imm
))
17149 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
17150 do_vfp_nsyn_opcode (ldconst
);
17152 /* ARMv8.2 fp16 vmov.f16 instruction. */
17154 do_scalar_fp16_v82_encode ();
17157 first_error (_("immediate out of range"));
17161 case NS_RF
: /* case 12 (fmrs). */
17162 do_vfp_nsyn_opcode ("fmrs");
17163 /* ARMv8.2 fp16 vmov.f16 instruction. */
17165 do_scalar_fp16_v82_encode ();
17169 case NS_FR
: /* case 13 (fmsr). */
17170 do_vfp_nsyn_opcode ("fmsr");
17171 /* ARMv8.2 fp16 vmov.f16 instruction. */
17173 do_scalar_fp16_v82_encode ();
17176 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17177 (one of which is a list), but we have parsed four. Do some fiddling to
17178 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17180 case NS_RRFF
: /* case 14 (fmrrs). */
17181 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
17182 _("VFP registers must be adjacent"));
17183 inst
.operands
[2].imm
= 2;
17184 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17185 do_vfp_nsyn_opcode ("fmrrs");
17188 case NS_FFRR
: /* case 15 (fmsrr). */
17189 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
17190 _("VFP registers must be adjacent"));
17191 inst
.operands
[1] = inst
.operands
[2];
17192 inst
.operands
[2] = inst
.operands
[3];
17193 inst
.operands
[0].imm
= 2;
17194 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17195 do_vfp_nsyn_opcode ("fmsrr");
17199 /* neon_select_shape has determined that the instruction
17200 shape is wrong and has already set the error message. */
17209 do_neon_rshift_round_imm (void)
17211 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17212 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
17213 int imm
= inst
.operands
[2].imm
;
17215 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17218 inst
.operands
[2].present
= 0;
17223 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
17224 _("immediate out of range for shift"));
17225 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
17230 do_neon_movhf (void)
17232 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
17233 constraint (rs
!= NS_HH
, _("invalid suffix"));
17235 if (inst
.cond
!= COND_ALWAYS
)
17239 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17240 " the behaviour is UNPREDICTABLE"));
17244 inst
.error
= BAD_COND
;
17249 do_vfp_sp_monadic ();
17252 inst
.instruction
|= 0xf0000000;
17256 do_neon_movl (void)
17258 struct neon_type_el et
= neon_check_type (2, NS_QD
,
17259 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17260 unsigned sizebits
= et
.size
>> 3;
17261 inst
.instruction
|= sizebits
<< 19;
17262 neon_two_same (0, et
.type
== NT_unsigned
, -1);
17268 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17269 struct neon_type_el et
= neon_check_type (2, rs
,
17270 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17271 NEON_ENCODE (INTEGER
, inst
);
17272 neon_two_same (neon_quad (rs
), 1, et
.size
);
17276 do_neon_zip_uzp (void)
17278 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17279 struct neon_type_el et
= neon_check_type (2, rs
,
17280 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17281 if (rs
== NS_DD
&& et
.size
== 32)
17283 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17284 inst
.instruction
= N_MNEM_vtrn
;
17288 neon_two_same (neon_quad (rs
), 1, et
.size
);
17292 do_neon_sat_abs_neg (void)
17294 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17295 struct neon_type_el et
= neon_check_type (2, rs
,
17296 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17297 neon_two_same (neon_quad (rs
), 1, et
.size
);
17301 do_neon_pair_long (void)
17303 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17304 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
17305 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17306 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
17307 neon_two_same (neon_quad (rs
), 1, et
.size
);
17311 do_neon_recip_est (void)
17313 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17314 struct neon_type_el et
= neon_check_type (2, rs
,
17315 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
17316 inst
.instruction
|= (et
.type
== NT_float
) << 8;
17317 neon_two_same (neon_quad (rs
), 1, et
.size
);
17323 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17324 struct neon_type_el et
= neon_check_type (2, rs
,
17325 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17326 neon_two_same (neon_quad (rs
), 1, et
.size
);
17332 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17333 struct neon_type_el et
= neon_check_type (2, rs
,
17334 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
17335 neon_two_same (neon_quad (rs
), 1, et
.size
);
17341 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17342 struct neon_type_el et
= neon_check_type (2, rs
,
17343 N_EQK
| N_INT
, N_8
| N_KEY
);
17344 neon_two_same (neon_quad (rs
), 1, et
.size
);
17350 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17351 neon_two_same (neon_quad (rs
), 1, -1);
17355 do_neon_tbl_tbx (void)
17357 unsigned listlenbits
;
17358 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
17360 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
17362 first_error (_("bad list length for table lookup"));
17366 listlenbits
= inst
.operands
[1].imm
- 1;
17367 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17368 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17369 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17370 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17371 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17372 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17373 inst
.instruction
|= listlenbits
<< 8;
17375 neon_dp_fixup (&inst
);
17379 do_neon_ldm_stm (void)
17381 /* P, U and L bits are part of bitmask. */
17382 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
17383 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
17385 if (inst
.operands
[1].issingle
)
17387 do_vfp_nsyn_ldm_stm (is_dbmode
);
17391 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
17392 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17394 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
17395 _("register list must contain at least 1 and at most 16 "
17398 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
17399 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
17400 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17401 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
17403 inst
.instruction
|= offsetbits
;
17405 do_vfp_cond_or_thumb ();
17409 do_neon_ldr_str (void)
17411 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
17413 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17414 And is UNPREDICTABLE in thumb mode. */
17416 && inst
.operands
[1].reg
== REG_PC
17417 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
17420 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17421 else if (warn_on_deprecated
)
17422 as_tsktsk (_("Use of PC here is deprecated"));
17425 if (inst
.operands
[0].issingle
)
17428 do_vfp_nsyn_opcode ("flds");
17430 do_vfp_nsyn_opcode ("fsts");
17432 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17433 if (inst
.vectype
.el
[0].size
== 16)
17434 do_scalar_fp16_v82_encode ();
17439 do_vfp_nsyn_opcode ("fldd");
17441 do_vfp_nsyn_opcode ("fstd");
17446 do_t_vldr_vstr_sysreg (void)
17448 int fp_vldr_bitno
= 20, sysreg_vldr_bitno
= 20;
17449 bfd_boolean is_vldr
= ((inst
.instruction
& (1 << fp_vldr_bitno
)) != 0);
17451 /* Use of PC is UNPREDICTABLE. */
17452 if (inst
.operands
[1].reg
== REG_PC
)
17453 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17455 if (inst
.operands
[1].immisreg
)
17456 inst
.error
= _("instruction does not accept register index");
17458 if (!inst
.operands
[1].isreg
)
17459 inst
.error
= _("instruction does not accept PC-relative addressing");
17461 if (abs (inst
.operands
[1].imm
) >= (1 << 7))
17462 inst
.error
= _("immediate value out of range");
17464 inst
.instruction
= 0xec000f80;
17466 inst
.instruction
|= 1 << sysreg_vldr_bitno
;
17467 encode_arm_cp_address (1, TRUE
, FALSE
, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
);
17468 inst
.instruction
|= (inst
.operands
[0].imm
& 0x7) << 13;
17469 inst
.instruction
|= (inst
.operands
[0].imm
& 0x8) << 19;
17473 do_vldr_vstr (void)
17475 bfd_boolean sysreg_op
= !inst
.operands
[0].isreg
;
17477 /* VLDR/VSTR (System Register). */
17480 if (!mark_feature_used (&arm_ext_v8_1m_main
))
17481 as_bad (_("Instruction not permitted on this architecture"));
17483 do_t_vldr_vstr_sysreg ();
17488 if (!mark_feature_used (&fpu_vfp_ext_v1xd
))
17489 as_bad (_("Instruction not permitted on this architecture"));
17490 do_neon_ldr_str ();
17494 /* "interleave" version also handles non-interleaving register VLD1/VST1
17498 do_neon_ld_st_interleave (void)
17500 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
17501 N_8
| N_16
| N_32
| N_64
);
17502 unsigned alignbits
= 0;
17504 /* The bits in this table go:
17505 0: register stride of one (0) or two (1)
17506 1,2: register list length, minus one (1, 2, 3, 4).
17507 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17508 We use -1 for invalid entries. */
17509 const int typetable
[] =
17511 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17512 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17513 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17514 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17518 if (et
.type
== NT_invtype
)
17521 if (inst
.operands
[1].immisalign
)
17522 switch (inst
.operands
[1].imm
>> 8)
17524 case 64: alignbits
= 1; break;
17526 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
17527 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17528 goto bad_alignment
;
17532 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17533 goto bad_alignment
;
17538 first_error (_("bad alignment"));
17542 inst
.instruction
|= alignbits
<< 4;
17543 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17545 /* Bits [4:6] of the immediate in a list specifier encode register stride
17546 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17547 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17548 up the right value for "type" in a table based on this value and the given
17549 list style, then stick it back. */
17550 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
17551 | (((inst
.instruction
>> 8) & 3) << 3);
17553 typebits
= typetable
[idx
];
17555 constraint (typebits
== -1, _("bad list type for instruction"));
17556 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
17557 _("bad element type for instruction"));
17559 inst
.instruction
&= ~0xf00;
17560 inst
.instruction
|= typebits
<< 8;
17563 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17564 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17565 otherwise. The variable arguments are a list of pairs of legal (size, align)
17566 values, terminated with -1. */
17569 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
17572 int result
= FAIL
, thissize
, thisalign
;
17574 if (!inst
.operands
[1].immisalign
)
17580 va_start (ap
, do_alignment
);
17584 thissize
= va_arg (ap
, int);
17585 if (thissize
== -1)
17587 thisalign
= va_arg (ap
, int);
17589 if (size
== thissize
&& align
== thisalign
)
17592 while (result
!= SUCCESS
);
17596 if (result
== SUCCESS
)
17599 first_error (_("unsupported alignment for instruction"));
17605 do_neon_ld_st_lane (void)
17607 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17608 int align_good
, do_alignment
= 0;
17609 int logsize
= neon_logbits (et
.size
);
17610 int align
= inst
.operands
[1].imm
>> 8;
17611 int n
= (inst
.instruction
>> 8) & 3;
17612 int max_el
= 64 / et
.size
;
17614 if (et
.type
== NT_invtype
)
17617 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
17618 _("bad list length"));
17619 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
17620 _("scalar index out of range"));
17621 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
17623 _("stride of 2 unavailable when element size is 8"));
17627 case 0: /* VLD1 / VST1. */
17628 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
17630 if (align_good
== FAIL
)
17634 unsigned alignbits
= 0;
17637 case 16: alignbits
= 0x1; break;
17638 case 32: alignbits
= 0x3; break;
17641 inst
.instruction
|= alignbits
<< 4;
17645 case 1: /* VLD2 / VST2. */
17646 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
17647 16, 32, 32, 64, -1);
17648 if (align_good
== FAIL
)
17651 inst
.instruction
|= 1 << 4;
17654 case 2: /* VLD3 / VST3. */
17655 constraint (inst
.operands
[1].immisalign
,
17656 _("can't use alignment with this instruction"));
17659 case 3: /* VLD4 / VST4. */
17660 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17661 16, 64, 32, 64, 32, 128, -1);
17662 if (align_good
== FAIL
)
17666 unsigned alignbits
= 0;
17669 case 8: alignbits
= 0x1; break;
17670 case 16: alignbits
= 0x1; break;
17671 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
17674 inst
.instruction
|= alignbits
<< 4;
17681 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17682 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17683 inst
.instruction
|= 1 << (4 + logsize
);
17685 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
17686 inst
.instruction
|= logsize
<< 10;
17689 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17692 do_neon_ld_dup (void)
17694 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17695 int align_good
, do_alignment
= 0;
17697 if (et
.type
== NT_invtype
)
17700 switch ((inst
.instruction
>> 8) & 3)
17702 case 0: /* VLD1. */
17703 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
17704 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17705 &do_alignment
, 16, 16, 32, 32, -1);
17706 if (align_good
== FAIL
)
17708 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
17711 case 2: inst
.instruction
|= 1 << 5; break;
17712 default: first_error (_("bad list length")); return;
17714 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17717 case 1: /* VLD2. */
17718 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17719 &do_alignment
, 8, 16, 16, 32, 32, 64,
17721 if (align_good
== FAIL
)
17723 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
17724 _("bad list length"));
17725 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17726 inst
.instruction
|= 1 << 5;
17727 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17730 case 2: /* VLD3. */
17731 constraint (inst
.operands
[1].immisalign
,
17732 _("can't use alignment with this instruction"));
17733 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
17734 _("bad list length"));
17735 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17736 inst
.instruction
|= 1 << 5;
17737 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17740 case 3: /* VLD4. */
17742 int align
= inst
.operands
[1].imm
>> 8;
17743 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17744 16, 64, 32, 64, 32, 128, -1);
17745 if (align_good
== FAIL
)
17747 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
17748 _("bad list length"));
17749 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17750 inst
.instruction
|= 1 << 5;
17751 if (et
.size
== 32 && align
== 128)
17752 inst
.instruction
|= 0x3 << 6;
17754 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17761 inst
.instruction
|= do_alignment
<< 4;
17764 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17765 apart from bits [11:4]. */
17768 do_neon_ldx_stx (void)
17770 if (inst
.operands
[1].isreg
)
17771 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
17773 switch (NEON_LANE (inst
.operands
[0].imm
))
17775 case NEON_INTERLEAVE_LANES
:
17776 NEON_ENCODE (INTERLV
, inst
);
17777 do_neon_ld_st_interleave ();
17780 case NEON_ALL_LANES
:
17781 NEON_ENCODE (DUP
, inst
);
17782 if (inst
.instruction
== N_INV
)
17784 first_error ("only loads support such operands");
17791 NEON_ENCODE (LANE
, inst
);
17792 do_neon_ld_st_lane ();
17795 /* L bit comes from bit mask. */
17796 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17797 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17798 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17800 if (inst
.operands
[1].postind
)
17802 int postreg
= inst
.operands
[1].imm
& 0xf;
17803 constraint (!inst
.operands
[1].immisreg
,
17804 _("post-index must be a register"));
17805 constraint (postreg
== 0xd || postreg
== 0xf,
17806 _("bad register for post-index"));
17807 inst
.instruction
|= postreg
;
17811 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17812 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
17813 || inst
.relocs
[0].exp
.X_add_number
!= 0,
17816 if (inst
.operands
[1].writeback
)
17818 inst
.instruction
|= 0xd;
17821 inst
.instruction
|= 0xf;
17825 inst
.instruction
|= 0xf9000000;
17827 inst
.instruction
|= 0xf4000000;
17832 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
17834 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17835 D register operands. */
17836 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17837 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17840 NEON_ENCODE (FPV8
, inst
);
17842 if (rs
== NS_FFF
|| rs
== NS_HHH
)
17844 do_vfp_sp_dyadic ();
17846 /* ARMv8.2 fp16 instruction. */
17848 do_scalar_fp16_v82_encode ();
17851 do_vfp_dp_rd_rn_rm ();
17854 inst
.instruction
|= 0x100;
17856 inst
.instruction
|= 0xf0000000;
17862 set_it_insn_type (OUTSIDE_IT_INSN
);
17864 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
17865 first_error (_("invalid instruction shape"));
17871 set_it_insn_type (OUTSIDE_IT_INSN
);
17873 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
17876 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17879 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
17883 do_vrint_1 (enum neon_cvt_mode mode
)
17885 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
17886 struct neon_type_el et
;
17891 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17892 D register operands. */
17893 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17894 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17897 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
17899 if (et
.type
!= NT_invtype
)
17901 /* VFP encodings. */
17902 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17903 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
17904 set_it_insn_type (OUTSIDE_IT_INSN
);
17906 NEON_ENCODE (FPV8
, inst
);
17907 if (rs
== NS_FF
|| rs
== NS_HH
)
17908 do_vfp_sp_monadic ();
17910 do_vfp_dp_rd_rm ();
17914 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
17915 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
17916 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
17917 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
17918 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
17919 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
17920 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
17924 inst
.instruction
|= (rs
== NS_DD
) << 8;
17925 do_vfp_cond_or_thumb ();
17927 /* ARMv8.2 fp16 vrint instruction. */
17929 do_scalar_fp16_v82_encode ();
17933 /* Neon encodings (or something broken...). */
17935 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
17937 if (et
.type
== NT_invtype
)
17940 set_it_insn_type (OUTSIDE_IT_INSN
);
17941 NEON_ENCODE (FLOAT
, inst
);
17943 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17946 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17947 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17948 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17949 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17950 inst
.instruction
|= neon_quad (rs
) << 6;
17951 /* Mask off the original size bits and reencode them. */
17952 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
17953 | neon_logbits (et
.size
) << 18);
17957 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17958 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17959 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17960 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17961 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17962 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17963 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17968 inst
.instruction
|= 0xfc000000;
17970 inst
.instruction
|= 0xf0000000;
17977 do_vrint_1 (neon_cvt_mode_x
);
17983 do_vrint_1 (neon_cvt_mode_z
);
17989 do_vrint_1 (neon_cvt_mode_r
);
17995 do_vrint_1 (neon_cvt_mode_a
);
18001 do_vrint_1 (neon_cvt_mode_n
);
18007 do_vrint_1 (neon_cvt_mode_p
);
18013 do_vrint_1 (neon_cvt_mode_m
);
18017 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
18019 unsigned regno
= NEON_SCALAR_REG (opnd
);
18020 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
18022 if (elsize
== 16 && elno
< 2 && regno
< 16)
18023 return regno
| (elno
<< 4);
18024 else if (elsize
== 32 && elno
== 0)
18027 first_error (_("scalar out of range"));
18034 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18036 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18037 _("expression too complex"));
18038 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18039 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
18040 _("immediate out of range"));
18042 if (inst
.operands
[2].isscalar
)
18044 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
18045 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18046 N_KEY
| N_F16
| N_F32
).size
;
18047 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
18049 inst
.instruction
= 0xfe000800;
18050 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18051 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18052 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18053 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
18054 inst
.instruction
|= LOW4 (m
);
18055 inst
.instruction
|= HI1 (m
) << 5;
18056 inst
.instruction
|= neon_quad (rs
) << 6;
18057 inst
.instruction
|= rot
<< 20;
18058 inst
.instruction
|= (size
== 32) << 23;
18062 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18063 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18064 N_KEY
| N_F16
| N_F32
).size
;
18065 neon_three_same (neon_quad (rs
), 0, -1);
18066 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18067 inst
.instruction
|= 0xfc200800;
18068 inst
.instruction
|= rot
<< 23;
18069 inst
.instruction
|= (size
== 32) << 20;
18076 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18078 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18079 _("expression too complex"));
18080 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18081 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
18082 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18083 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18084 N_KEY
| N_F16
| N_F32
).size
;
18085 neon_three_same (neon_quad (rs
), 0, -1);
18086 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18087 inst
.instruction
|= 0xfc800800;
18088 inst
.instruction
|= (rot
== 270) << 24;
18089 inst
.instruction
|= (size
== 32) << 20;
18092 /* Dot Product instructions encoding support. */
18095 do_neon_dotproduct (int unsigned_p
)
18097 enum neon_shape rs
;
18098 unsigned scalar_oprd2
= 0;
18101 if (inst
.cond
!= COND_ALWAYS
)
18102 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18103 "is UNPREDICTABLE"));
18105 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18108 /* Dot Product instructions are in three-same D/Q register format or the third
18109 operand can be a scalar index register. */
18110 if (inst
.operands
[2].isscalar
)
18112 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
18113 high8
= 0xfe000000;
18114 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
18118 high8
= 0xfc000000;
18119 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18123 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
18125 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
18127 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18128 Product instruction, so we pass 0 as the "ubit" parameter. And the
18129 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18130 neon_three_same (neon_quad (rs
), 0, 32);
18132 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18133 different NEON three-same encoding. */
18134 inst
.instruction
&= 0x00ffffff;
18135 inst
.instruction
|= high8
;
18136 /* Encode 'U' bit which indicates signedness. */
18137 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
18138 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18139 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18140 the instruction encoding. */
18141 if (inst
.operands
[2].isscalar
)
18143 inst
.instruction
&= 0xffffffd0;
18144 inst
.instruction
|= LOW4 (scalar_oprd2
);
18145 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
18149 /* Dot Product instructions for signed integer. */
18152 do_neon_dotproduct_s (void)
18154 return do_neon_dotproduct (0);
18157 /* Dot Product instructions for unsigned integer. */
18160 do_neon_dotproduct_u (void)
18162 return do_neon_dotproduct (1);
18165 /* Crypto v1 instructions. */
18167 do_crypto_2op_1 (unsigned elttype
, int op
)
18169 set_it_insn_type (OUTSIDE_IT_INSN
);
18171 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
18177 NEON_ENCODE (INTEGER
, inst
);
18178 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18179 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18180 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18181 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18183 inst
.instruction
|= op
<< 6;
18186 inst
.instruction
|= 0xfc000000;
18188 inst
.instruction
|= 0xf0000000;
18192 do_crypto_3op_1 (int u
, int op
)
18194 set_it_insn_type (OUTSIDE_IT_INSN
);
18196 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
18197 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
18202 NEON_ENCODE (INTEGER
, inst
);
18203 neon_three_same (1, u
, 8 << op
);
18209 do_crypto_2op_1 (N_8
, 0);
18215 do_crypto_2op_1 (N_8
, 1);
18221 do_crypto_2op_1 (N_8
, 2);
18227 do_crypto_2op_1 (N_8
, 3);
18233 do_crypto_3op_1 (0, 0);
18239 do_crypto_3op_1 (0, 1);
18245 do_crypto_3op_1 (0, 2);
18251 do_crypto_3op_1 (0, 3);
18257 do_crypto_3op_1 (1, 0);
18263 do_crypto_3op_1 (1, 1);
18267 do_sha256su1 (void)
18269 do_crypto_3op_1 (1, 2);
18275 do_crypto_2op_1 (N_32
, -1);
18281 do_crypto_2op_1 (N_32
, 0);
18285 do_sha256su0 (void)
18287 do_crypto_2op_1 (N_32
, 1);
18291 do_crc32_1 (unsigned int poly
, unsigned int sz
)
18293 unsigned int Rd
= inst
.operands
[0].reg
;
18294 unsigned int Rn
= inst
.operands
[1].reg
;
18295 unsigned int Rm
= inst
.operands
[2].reg
;
18297 set_it_insn_type (OUTSIDE_IT_INSN
);
18298 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
18299 inst
.instruction
|= LOW4 (Rn
) << 16;
18300 inst
.instruction
|= LOW4 (Rm
);
18301 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
18302 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
18304 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
18305 as_warn (UNPRED_REG ("r15"));
18347 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18349 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
18350 do_vfp_sp_dp_cvt ();
18351 do_vfp_cond_or_thumb ();
18355 /* Overall per-instruction processing. */
18357 /* We need to be able to fix up arbitrary expressions in some statements.
18358 This is so that we can handle symbols that are an arbitrary distance from
18359 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18360 which returns part of an address in a form which will be valid for
18361 a data instruction. We do this by pushing the expression into a symbol
18362 in the expr_section, and creating a fix for that. */
18365 fix_new_arm (fragS
* frag
,
18379 /* Create an absolute valued symbol, so we have something to
18380 refer to in the object file. Unfortunately for us, gas's
18381 generic expression parsing will already have folded out
18382 any use of .set foo/.type foo %function that may have
18383 been used to set type information of the target location,
18384 that's being specified symbolically. We have to presume
18385 the user knows what they are doing. */
18389 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
18391 symbol
= symbol_find_or_make (name
);
18392 S_SET_SEGMENT (symbol
, absolute_section
);
18393 symbol_set_frag (symbol
, &zero_address_frag
);
18394 S_SET_VALUE (symbol
, exp
->X_add_number
);
18395 exp
->X_op
= O_symbol
;
18396 exp
->X_add_symbol
= symbol
;
18397 exp
->X_add_number
= 0;
18403 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
18404 (enum bfd_reloc_code_real
) reloc
);
18408 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
18409 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
18413 /* Mark whether the fix is to a THUMB instruction, or an ARM
18415 new_fix
->tc_fix_data
= thumb_mode
;
18418 /* Create a frg for an instruction requiring relaxation. */
18420 output_relax_insn (void)
18426 /* The size of the instruction is unknown, so tie the debug info to the
18427 start of the instruction. */
18428 dwarf2_emit_insn (0);
18430 switch (inst
.relocs
[0].exp
.X_op
)
18433 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
18434 offset
= inst
.relocs
[0].exp
.X_add_number
;
18438 offset
= inst
.relocs
[0].exp
.X_add_number
;
18441 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
18445 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
18446 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
18447 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
18450 /* Write a 32-bit thumb instruction to buf. */
18452 put_thumb32_insn (char * buf
, unsigned long insn
)
18454 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
18455 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
18459 output_inst (const char * str
)
18465 as_bad ("%s -- `%s'", inst
.error
, str
);
18470 output_relax_insn ();
18473 if (inst
.size
== 0)
18476 to
= frag_more (inst
.size
);
18477 /* PR 9814: Record the thumb mode into the current frag so that we know
18478 what type of NOP padding to use, if necessary. We override any previous
18479 setting so that if the mode has changed then the NOPS that we use will
18480 match the encoding of the last instruction in the frag. */
18481 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18483 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
18485 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
18486 put_thumb32_insn (to
, inst
.instruction
);
18488 else if (inst
.size
> INSN_SIZE
)
18490 gas_assert (inst
.size
== (2 * INSN_SIZE
));
18491 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
18492 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
18495 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
18498 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
18500 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
18501 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
18502 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
18503 inst
.relocs
[r
].type
);
18506 dwarf2_emit_insn (inst
.size
);
18510 output_it_inst (int cond
, int mask
, char * to
)
18512 unsigned long instruction
= 0xbf00;
18515 instruction
|= mask
;
18516 instruction
|= cond
<< 4;
18520 to
= frag_more (2);
18522 dwarf2_emit_insn (2);
18526 md_number_to_chars (to
, instruction
, 2);
18531 /* Tag values used in struct asm_opcode's tag field. */
18534 OT_unconditional
, /* Instruction cannot be conditionalized.
18535 The ARM condition field is still 0xE. */
18536 OT_unconditionalF
, /* Instruction cannot be conditionalized
18537 and carries 0xF in its ARM condition field. */
18538 OT_csuffix
, /* Instruction takes a conditional suffix. */
18539 OT_csuffixF
, /* Some forms of the instruction take a conditional
18540 suffix, others place 0xF where the condition field
18542 OT_cinfix3
, /* Instruction takes a conditional infix,
18543 beginning at character index 3. (In
18544 unified mode, it becomes a suffix.) */
18545 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
18546 tsts, cmps, cmns, and teqs. */
18547 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
18548 character index 3, even in unified mode. Used for
18549 legacy instructions where suffix and infix forms
18550 may be ambiguous. */
18551 OT_csuf_or_in3
, /* Instruction takes either a conditional
18552 suffix or an infix at character index 3. */
18553 OT_odd_infix_unc
, /* This is the unconditional variant of an
18554 instruction that takes a conditional infix
18555 at an unusual position. In unified mode,
18556 this variant will accept a suffix. */
18557 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
18558 are the conditional variants of instructions that
18559 take conditional infixes in unusual positions.
18560 The infix appears at character index
18561 (tag - OT_odd_infix_0). These are not accepted
18562 in unified mode. */
18565 /* Subroutine of md_assemble, responsible for looking up the primary
18566 opcode from the mnemonic the user wrote. STR points to the
18567 beginning of the mnemonic.
18569 This is not simply a hash table lookup, because of conditional
18570 variants. Most instructions have conditional variants, which are
18571 expressed with a _conditional affix_ to the mnemonic. If we were
18572 to encode each conditional variant as a literal string in the opcode
18573 table, it would have approximately 20,000 entries.
18575 Most mnemonics take this affix as a suffix, and in unified syntax,
18576 'most' is upgraded to 'all'. However, in the divided syntax, some
18577 instructions take the affix as an infix, notably the s-variants of
18578 the arithmetic instructions. Of those instructions, all but six
18579 have the infix appear after the third character of the mnemonic.
18581 Accordingly, the algorithm for looking up primary opcodes given
18584 1. Look up the identifier in the opcode table.
18585 If we find a match, go to step U.
18587 2. Look up the last two characters of the identifier in the
18588 conditions table. If we find a match, look up the first N-2
18589 characters of the identifier in the opcode table. If we
18590 find a match, go to step CE.
18592 3. Look up the fourth and fifth characters of the identifier in
18593 the conditions table. If we find a match, extract those
18594 characters from the identifier, and look up the remaining
18595 characters in the opcode table. If we find a match, go
18600 U. Examine the tag field of the opcode structure, in case this is
18601 one of the six instructions with its conditional infix in an
18602 unusual place. If it is, the tag tells us where to find the
18603 infix; look it up in the conditions table and set inst.cond
18604 accordingly. Otherwise, this is an unconditional instruction.
18605 Again set inst.cond accordingly. Return the opcode structure.
18607 CE. Examine the tag field to make sure this is an instruction that
18608 should receive a conditional suffix. If it is not, fail.
18609 Otherwise, set inst.cond from the suffix we already looked up,
18610 and return the opcode structure.
18612 CM. Examine the tag field to make sure this is an instruction that
18613 should receive a conditional infix after the third character.
18614 If it is not, fail. Otherwise, undo the edits to the current
18615 line of input and proceed as for case CE. */
18617 static const struct asm_opcode
*
18618 opcode_lookup (char **str
)
18622 const struct asm_opcode
*opcode
;
18623 const struct asm_cond
*cond
;
18626 /* Scan up to the end of the mnemonic, which must end in white space,
18627 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18628 for (base
= end
= *str
; *end
!= '\0'; end
++)
18629 if (*end
== ' ' || *end
== '.')
18635 /* Handle a possible width suffix and/or Neon type suffix. */
18640 /* The .w and .n suffixes are only valid if the unified syntax is in
18642 if (unified_syntax
&& end
[1] == 'w')
18644 else if (unified_syntax
&& end
[1] == 'n')
18649 inst
.vectype
.elems
= 0;
18651 *str
= end
+ offset
;
18653 if (end
[offset
] == '.')
18655 /* See if we have a Neon type suffix (possible in either unified or
18656 non-unified ARM syntax mode). */
18657 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
18660 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
18666 /* Look for unaffixed or special-case affixed mnemonic. */
18667 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18672 if (opcode
->tag
< OT_odd_infix_0
)
18674 inst
.cond
= COND_ALWAYS
;
18678 if (warn_on_deprecated
&& unified_syntax
)
18679 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18680 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
18681 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18684 inst
.cond
= cond
->value
;
18688 /* Cannot have a conditional suffix on a mnemonic of less than two
18690 if (end
- base
< 3)
18693 /* Look for suffixed mnemonic. */
18695 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18696 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18698 if (opcode
&& cond
)
18701 switch (opcode
->tag
)
18703 case OT_cinfix3_legacy
:
18704 /* Ignore conditional suffixes matched on infix only mnemonics. */
18708 case OT_cinfix3_deprecated
:
18709 case OT_odd_infix_unc
:
18710 if (!unified_syntax
)
18712 /* Fall through. */
18716 case OT_csuf_or_in3
:
18717 inst
.cond
= cond
->value
;
18720 case OT_unconditional
:
18721 case OT_unconditionalF
:
18723 inst
.cond
= cond
->value
;
18726 /* Delayed diagnostic. */
18727 inst
.error
= BAD_COND
;
18728 inst
.cond
= COND_ALWAYS
;
18737 /* Cannot have a usual-position infix on a mnemonic of less than
18738 six characters (five would be a suffix). */
18739 if (end
- base
< 6)
18742 /* Look for infixed mnemonic in the usual position. */
18744 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18748 memcpy (save
, affix
, 2);
18749 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
18750 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18752 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
18753 memcpy (affix
, save
, 2);
18756 && (opcode
->tag
== OT_cinfix3
18757 || opcode
->tag
== OT_cinfix3_deprecated
18758 || opcode
->tag
== OT_csuf_or_in3
18759 || opcode
->tag
== OT_cinfix3_legacy
))
18762 if (warn_on_deprecated
&& unified_syntax
18763 && (opcode
->tag
== OT_cinfix3
18764 || opcode
->tag
== OT_cinfix3_deprecated
))
18765 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18767 inst
.cond
= cond
->value
;
18774 /* This function generates an initial IT instruction, leaving its block
18775 virtually open for the new instructions. Eventually,
18776 the mask will be updated by now_it_add_mask () each time
18777 a new instruction needs to be included in the IT block.
18778 Finally, the block is closed with close_automatic_it_block ().
18779 The block closure can be requested either from md_assemble (),
18780 a tencode (), or due to a label hook. */
18783 new_automatic_it_block (int cond
)
18785 now_it
.state
= AUTOMATIC_IT_BLOCK
;
18786 now_it
.mask
= 0x18;
18788 now_it
.block_length
= 1;
18789 mapping_state (MAP_THUMB
);
18790 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
18791 now_it
.warn_deprecated
= FALSE
;
18792 now_it
.insn_cond
= TRUE
;
18795 /* Close an automatic IT block.
18796 See comments in new_automatic_it_block (). */
18799 close_automatic_it_block (void)
18801 now_it
.mask
= 0x10;
18802 now_it
.block_length
= 0;
18805 /* Update the mask of the current automatically-generated IT
18806 instruction. See comments in new_automatic_it_block (). */
18809 now_it_add_mask (int cond
)
18811 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18812 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18813 | ((bitvalue) << (nbit)))
18814 const int resulting_bit
= (cond
& 1);
18816 now_it
.mask
&= 0xf;
18817 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18819 (5 - now_it
.block_length
));
18820 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18822 ((5 - now_it
.block_length
) - 1) );
18823 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
18826 #undef SET_BIT_VALUE
18829 /* The IT blocks handling machinery is accessed through the these functions:
18830 it_fsm_pre_encode () from md_assemble ()
18831 set_it_insn_type () optional, from the tencode functions
18832 set_it_insn_type_last () ditto
18833 in_it_block () ditto
18834 it_fsm_post_encode () from md_assemble ()
18835 force_automatic_it_block_close () from label handling functions
18838 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18839 initializing the IT insn type with a generic initial value depending
18840 on the inst.condition.
18841 2) During the tencode function, two things may happen:
18842 a) The tencode function overrides the IT insn type by
18843 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18844 b) The tencode function queries the IT block state by
18845 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18847 Both set_it_insn_type and in_it_block run the internal FSM state
18848 handling function (handle_it_state), because: a) setting the IT insn
18849 type may incur in an invalid state (exiting the function),
18850 and b) querying the state requires the FSM to be updated.
18851 Specifically we want to avoid creating an IT block for conditional
18852 branches, so it_fsm_pre_encode is actually a guess and we can't
18853 determine whether an IT block is required until the tencode () routine
18854 has decided what type of instruction this actually it.
18855 Because of this, if set_it_insn_type and in_it_block have to be used,
18856 set_it_insn_type has to be called first.
18858 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18859 determines the insn IT type depending on the inst.cond code.
18860 When a tencode () routine encodes an instruction that can be
18861 either outside an IT block, or, in the case of being inside, has to be
18862 the last one, set_it_insn_type_last () will determine the proper
18863 IT instruction type based on the inst.cond code. Otherwise,
18864 set_it_insn_type can be called for overriding that logic or
18865 for covering other cases.
18867 Calling handle_it_state () may not transition the IT block state to
18868 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18869 still queried. Instead, if the FSM determines that the state should
18870 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18871 after the tencode () function: that's what it_fsm_post_encode () does.
18873 Since in_it_block () calls the state handling function to get an
18874 updated state, an error may occur (due to invalid insns combination).
18875 In that case, inst.error is set.
18876 Therefore, inst.error has to be checked after the execution of
18877 the tencode () routine.
18879 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18880 any pending state change (if any) that didn't take place in
18881 handle_it_state () as explained above. */
18884 it_fsm_pre_encode (void)
18886 if (inst
.cond
!= COND_ALWAYS
)
18887 inst
.it_insn_type
= INSIDE_IT_INSN
;
18889 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
18891 now_it
.state_handled
= 0;
18894 /* IT state FSM handling function. */
18897 handle_it_state (void)
18899 now_it
.state_handled
= 1;
18900 now_it
.insn_cond
= FALSE
;
18902 switch (now_it
.state
)
18904 case OUTSIDE_IT_BLOCK
:
18905 switch (inst
.it_insn_type
)
18907 case OUTSIDE_IT_INSN
:
18910 case INSIDE_IT_INSN
:
18911 case INSIDE_IT_LAST_INSN
:
18912 if (thumb_mode
== 0)
18915 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
18916 as_tsktsk (_("Warning: conditional outside an IT block"\
18921 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
18922 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
18924 /* Automatically generate the IT instruction. */
18925 new_automatic_it_block (inst
.cond
);
18926 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
18927 close_automatic_it_block ();
18931 inst
.error
= BAD_OUT_IT
;
18937 case IF_INSIDE_IT_LAST_INSN
:
18938 case NEUTRAL_IT_INSN
:
18942 now_it
.state
= MANUAL_IT_BLOCK
;
18943 now_it
.block_length
= 0;
18948 case AUTOMATIC_IT_BLOCK
:
18949 /* Three things may happen now:
18950 a) We should increment current it block size;
18951 b) We should close current it block (closing insn or 4 insns);
18952 c) We should close current it block and start a new one (due
18953 to incompatible conditions or
18954 4 insns-length block reached). */
18956 switch (inst
.it_insn_type
)
18958 case OUTSIDE_IT_INSN
:
18959 /* The closure of the block shall happen immediately,
18960 so any in_it_block () call reports the block as closed. */
18961 force_automatic_it_block_close ();
18964 case INSIDE_IT_INSN
:
18965 case INSIDE_IT_LAST_INSN
:
18966 case IF_INSIDE_IT_LAST_INSN
:
18967 now_it
.block_length
++;
18969 if (now_it
.block_length
> 4
18970 || !now_it_compatible (inst
.cond
))
18972 force_automatic_it_block_close ();
18973 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
18974 new_automatic_it_block (inst
.cond
);
18978 now_it
.insn_cond
= TRUE
;
18979 now_it_add_mask (inst
.cond
);
18982 if (now_it
.state
== AUTOMATIC_IT_BLOCK
18983 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
18984 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
18985 close_automatic_it_block ();
18988 case NEUTRAL_IT_INSN
:
18989 now_it
.block_length
++;
18990 now_it
.insn_cond
= TRUE
;
18992 if (now_it
.block_length
> 4)
18993 force_automatic_it_block_close ();
18995 now_it_add_mask (now_it
.cc
& 1);
18999 close_automatic_it_block ();
19000 now_it
.state
= MANUAL_IT_BLOCK
;
19005 case MANUAL_IT_BLOCK
:
19007 /* Check conditional suffixes. */
19008 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
19011 now_it
.mask
&= 0x1f;
19012 is_last
= (now_it
.mask
== 0x10);
19013 now_it
.insn_cond
= TRUE
;
19015 switch (inst
.it_insn_type
)
19017 case OUTSIDE_IT_INSN
:
19018 inst
.error
= BAD_NOT_IT
;
19021 case INSIDE_IT_INSN
:
19022 if (cond
!= inst
.cond
)
19024 inst
.error
= BAD_IT_COND
;
19029 case INSIDE_IT_LAST_INSN
:
19030 case IF_INSIDE_IT_LAST_INSN
:
19031 if (cond
!= inst
.cond
)
19033 inst
.error
= BAD_IT_COND
;
19038 inst
.error
= BAD_BRANCH
;
19043 case NEUTRAL_IT_INSN
:
19044 /* The BKPT instruction is unconditional even in an IT block. */
19048 inst
.error
= BAD_IT_IT
;
19058 struct depr_insn_mask
19060 unsigned long pattern
;
19061 unsigned long mask
;
19062 const char* description
;
19065 /* List of 16-bit instruction patterns deprecated in an IT block in
19067 static const struct depr_insn_mask depr_it_insns
[] = {
19068 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19069 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19070 { 0xa000, 0xb800, N_("ADR") },
19071 { 0x4800, 0xf800, N_("Literal loads") },
19072 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19073 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19074 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19075 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19076 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19081 it_fsm_post_encode (void)
19085 if (!now_it
.state_handled
)
19086 handle_it_state ();
19088 if (now_it
.insn_cond
19089 && !now_it
.warn_deprecated
19090 && warn_on_deprecated
19091 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
19092 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
19094 if (inst
.instruction
>= 0x10000)
19096 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19097 "performance deprecated in ARMv8-A and ARMv8-R"));
19098 now_it
.warn_deprecated
= TRUE
;
19102 const struct depr_insn_mask
*p
= depr_it_insns
;
19104 while (p
->mask
!= 0)
19106 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
19108 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19109 "instructions of the following class are "
19110 "performance deprecated in ARMv8-A and "
19111 "ARMv8-R: %s"), p
->description
);
19112 now_it
.warn_deprecated
= TRUE
;
19120 if (now_it
.block_length
> 1)
19122 as_tsktsk (_("IT blocks containing more than one conditional "
19123 "instruction are performance deprecated in ARMv8-A and "
19125 now_it
.warn_deprecated
= TRUE
;
19129 is_last
= (now_it
.mask
== 0x10);
19132 now_it
.state
= OUTSIDE_IT_BLOCK
;
19138 force_automatic_it_block_close (void)
19140 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
19142 close_automatic_it_block ();
19143 now_it
.state
= OUTSIDE_IT_BLOCK
;
19151 if (!now_it
.state_handled
)
19152 handle_it_state ();
19154 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
19157 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19158 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19159 here, hence the "known" in the function name. */
19162 known_t32_only_insn (const struct asm_opcode
*opcode
)
19164 /* Original Thumb-1 wide instruction. */
19165 if (opcode
->tencode
== do_t_blx
19166 || opcode
->tencode
== do_t_branch23
19167 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
19168 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
19171 /* Wide-only instruction added to ARMv8-M Baseline. */
19172 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
19173 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
19174 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
19175 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
19181 /* Whether wide instruction variant can be used if available for a valid OPCODE
19185 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
19187 if (known_t32_only_insn (opcode
))
19190 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19191 of variant T3 of B.W is checked in do_t_branch. */
19192 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19193 && opcode
->tencode
== do_t_branch
)
19196 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19197 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19198 && opcode
->tencode
== do_t_mov_cmp
19199 /* Make sure CMP instruction is not affected. */
19200 && opcode
->aencode
== do_mov
)
19203 /* Wide instruction variants of all instructions with narrow *and* wide
19204 variants become available with ARMv6t2. Other opcodes are either
19205 narrow-only or wide-only and are thus available if OPCODE is valid. */
19206 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
19209 /* OPCODE with narrow only instruction variant or wide variant not
19215 md_assemble (char *str
)
19218 const struct asm_opcode
* opcode
;
19220 /* Align the previous label if needed. */
19221 if (last_label_seen
!= NULL
)
19223 symbol_set_frag (last_label_seen
, frag_now
);
19224 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
19225 S_SET_SEGMENT (last_label_seen
, now_seg
);
19228 memset (&inst
, '\0', sizeof (inst
));
19230 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
19231 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
19233 opcode
= opcode_lookup (&p
);
19236 /* It wasn't an instruction, but it might be a register alias of
19237 the form alias .req reg, or a Neon .dn/.qn directive. */
19238 if (! create_register_alias (str
, p
)
19239 && ! create_neon_reg_alias (str
, p
))
19240 as_bad (_("bad instruction `%s'"), str
);
19245 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
19246 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19248 /* The value which unconditional instructions should have in place of the
19249 condition field. */
19250 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
19254 arm_feature_set variant
;
19256 variant
= cpu_variant
;
19257 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19258 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
19259 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
19260 /* Check that this instruction is supported for this CPU. */
19261 if (!opcode
->tvariant
19262 || (thumb_mode
== 1
19263 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
19265 if (opcode
->tencode
== do_t_swi
)
19266 as_bad (_("SVC is not permitted on this architecture"));
19268 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
19271 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
19272 && opcode
->tencode
!= do_t_branch
)
19274 as_bad (_("Thumb does not support conditional execution"));
19278 /* Two things are addressed here:
19279 1) Implicit require narrow instructions on Thumb-1.
19280 This avoids relaxation accidentally introducing Thumb-2
19282 2) Reject wide instructions in non Thumb-2 cores.
19284 Only instructions with narrow and wide variants need to be handled
19285 but selecting all non wide-only instructions is easier. */
19286 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
19287 && !t32_insn_ok (variant
, opcode
))
19289 if (inst
.size_req
== 0)
19291 else if (inst
.size_req
== 4)
19293 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
19294 as_bad (_("selected processor does not support 32bit wide "
19295 "variant of instruction `%s'"), str
);
19297 as_bad (_("selected processor does not support `%s' in "
19298 "Thumb-2 mode"), str
);
19303 inst
.instruction
= opcode
->tvalue
;
19305 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
19307 /* Prepare the it_insn_type for those encodings that don't set
19309 it_fsm_pre_encode ();
19311 opcode
->tencode ();
19313 it_fsm_post_encode ();
19316 if (!(inst
.error
|| inst
.relax
))
19318 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
19319 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
19320 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
19322 as_bad (_("cannot honor width suffix -- `%s'"), str
);
19327 /* Something has gone badly wrong if we try to relax a fixed size
19329 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
19331 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19332 *opcode
->tvariant
);
19333 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
19334 set those bits when Thumb-2 32-bit instructions are seen. The impact
19335 of relaxable instructions will be considered later after we finish all
19337 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
19338 variant
= arm_arch_none
;
19340 variant
= cpu_variant
;
19341 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
19342 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19345 check_neon_suffixes
;
19349 mapping_state (MAP_THUMB
);
19352 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
19356 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
19357 is_bx
= (opcode
->aencode
== do_bx
);
19359 /* Check that this instruction is supported for this CPU. */
19360 if (!(is_bx
&& fix_v4bx
)
19361 && !(opcode
->avariant
&&
19362 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
19364 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
19369 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
19373 inst
.instruction
= opcode
->avalue
;
19374 if (opcode
->tag
== OT_unconditionalF
)
19375 inst
.instruction
|= 0xFU
<< 28;
19377 inst
.instruction
|= inst
.cond
<< 28;
19378 inst
.size
= INSN_SIZE
;
19379 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
19381 it_fsm_pre_encode ();
19382 opcode
->aencode ();
19383 it_fsm_post_encode ();
19385 /* Arm mode bx is marked as both v4T and v5 because it's still required
19386 on a hypothetical non-thumb v5 core. */
19388 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
19390 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
19391 *opcode
->avariant
);
19393 check_neon_suffixes
;
19397 mapping_state (MAP_ARM
);
19402 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
19410 check_it_blocks_finished (void)
19415 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
19416 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
19417 == MANUAL_IT_BLOCK
)
19419 as_warn (_("section '%s' finished with an open IT block."),
19423 if (now_it
.state
== MANUAL_IT_BLOCK
)
19424 as_warn (_("file finished with an open IT block."));
19428 /* Various frobbings of labels and their addresses. */
19431 arm_start_line_hook (void)
19433 last_label_seen
= NULL
;
19437 arm_frob_label (symbolS
* sym
)
19439 last_label_seen
= sym
;
19441 ARM_SET_THUMB (sym
, thumb_mode
);
19443 #if defined OBJ_COFF || defined OBJ_ELF
19444 ARM_SET_INTERWORK (sym
, support_interwork
);
19447 force_automatic_it_block_close ();
19449 /* Note - do not allow local symbols (.Lxxx) to be labelled
19450 as Thumb functions. This is because these labels, whilst
19451 they exist inside Thumb code, are not the entry points for
19452 possible ARM->Thumb calls. Also, these labels can be used
19453 as part of a computed goto or switch statement. eg gcc
19454 can generate code that looks like this:
19456 ldr r2, [pc, .Laaa]
19466 The first instruction loads the address of the jump table.
19467 The second instruction converts a table index into a byte offset.
19468 The third instruction gets the jump address out of the table.
19469 The fourth instruction performs the jump.
19471 If the address stored at .Laaa is that of a symbol which has the
19472 Thumb_Func bit set, then the linker will arrange for this address
19473 to have the bottom bit set, which in turn would mean that the
19474 address computation performed by the third instruction would end
19475 up with the bottom bit set. Since the ARM is capable of unaligned
19476 word loads, the instruction would then load the incorrect address
19477 out of the jump table, and chaos would ensue. */
19478 if (label_is_thumb_function_name
19479 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
19480 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
19482 /* When the address of a Thumb function is taken the bottom
19483 bit of that address should be set. This will allow
19484 interworking between Arm and Thumb functions to work
19487 THUMB_SET_FUNC (sym
, 1);
19489 label_is_thumb_function_name
= FALSE
;
19492 dwarf2_emit_label (sym
);
19496 arm_data_in_code (void)
19498 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
19500 *input_line_pointer
= '/';
19501 input_line_pointer
+= 5;
19502 *input_line_pointer
= 0;
19510 arm_canonicalize_symbol_name (char * name
)
19514 if (thumb_mode
&& (len
= strlen (name
)) > 5
19515 && streq (name
+ len
- 5, "/data"))
19516 *(name
+ len
- 5) = 0;
19521 /* Table of all register names defined by default. The user can
19522 define additional names with .req. Note that all register names
19523 should appear in both upper and lowercase variants. Some registers
19524 also have mixed-case names. */
19526 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19527 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19528 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19529 #define REGSET(p,t) \
19530 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19531 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19532 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19533 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19534 #define REGSETH(p,t) \
19535 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19536 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19537 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19538 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19539 #define REGSET2(p,t) \
19540 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19541 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19542 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19543 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19544 #define SPLRBANK(base,bank,t) \
19545 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19546 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19547 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19548 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19549 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19550 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19552 static const struct reg_entry reg_names
[] =
19554 /* ARM integer registers. */
19555 REGSET(r
, RN
), REGSET(R
, RN
),
19557 /* ATPCS synonyms. */
19558 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
19559 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
19560 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
19562 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
19563 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
19564 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
19566 /* Well-known aliases. */
19567 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
19568 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
19570 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
19571 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
19573 /* Coprocessor numbers. */
19574 REGSET(p
, CP
), REGSET(P
, CP
),
19576 /* Coprocessor register numbers. The "cr" variants are for backward
19578 REGSET(c
, CN
), REGSET(C
, CN
),
19579 REGSET(cr
, CN
), REGSET(CR
, CN
),
19581 /* ARM banked registers. */
19582 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
19583 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
19584 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
19585 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
19586 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
19587 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
19588 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
19590 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
19591 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
19592 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
19593 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
19594 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
19595 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
19596 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
19597 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
19599 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
19600 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
19601 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
19602 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
19603 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
19604 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
19605 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
19606 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
19607 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
19609 /* FPA registers. */
19610 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
19611 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
19613 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
19614 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
19616 /* VFP SP registers. */
19617 REGSET(s
,VFS
), REGSET(S
,VFS
),
19618 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
19620 /* VFP DP Registers. */
19621 REGSET(d
,VFD
), REGSET(D
,VFD
),
19622 /* Extra Neon DP registers. */
19623 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
19625 /* Neon QP registers. */
19626 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
19628 /* VFP control registers. */
19629 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
19630 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
19631 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
19632 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
19633 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
19634 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
19635 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
19637 /* Maverick DSP coprocessor registers. */
19638 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
19639 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
19641 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
19642 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
19643 REGDEF(dspsc
,0,DSPSC
),
19645 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
19646 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
19647 REGDEF(DSPSC
,0,DSPSC
),
19649 /* iWMMXt data registers - p0, c0-15. */
19650 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
19652 /* iWMMXt control registers - p1, c0-3. */
19653 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
19654 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
19655 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
19656 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
19658 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19659 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
19660 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
19661 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
19662 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
19664 /* XScale accumulator registers. */
19665 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
19671 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19672 within psr_required_here. */
19673 static const struct asm_psr psrs
[] =
19675 /* Backward compatibility notation. Note that "all" is no longer
19676 truly all possible PSR bits. */
19677 {"all", PSR_c
| PSR_f
},
19681 /* Individual flags. */
19687 /* Combinations of flags. */
19688 {"fs", PSR_f
| PSR_s
},
19689 {"fx", PSR_f
| PSR_x
},
19690 {"fc", PSR_f
| PSR_c
},
19691 {"sf", PSR_s
| PSR_f
},
19692 {"sx", PSR_s
| PSR_x
},
19693 {"sc", PSR_s
| PSR_c
},
19694 {"xf", PSR_x
| PSR_f
},
19695 {"xs", PSR_x
| PSR_s
},
19696 {"xc", PSR_x
| PSR_c
},
19697 {"cf", PSR_c
| PSR_f
},
19698 {"cs", PSR_c
| PSR_s
},
19699 {"cx", PSR_c
| PSR_x
},
19700 {"fsx", PSR_f
| PSR_s
| PSR_x
},
19701 {"fsc", PSR_f
| PSR_s
| PSR_c
},
19702 {"fxs", PSR_f
| PSR_x
| PSR_s
},
19703 {"fxc", PSR_f
| PSR_x
| PSR_c
},
19704 {"fcs", PSR_f
| PSR_c
| PSR_s
},
19705 {"fcx", PSR_f
| PSR_c
| PSR_x
},
19706 {"sfx", PSR_s
| PSR_f
| PSR_x
},
19707 {"sfc", PSR_s
| PSR_f
| PSR_c
},
19708 {"sxf", PSR_s
| PSR_x
| PSR_f
},
19709 {"sxc", PSR_s
| PSR_x
| PSR_c
},
19710 {"scf", PSR_s
| PSR_c
| PSR_f
},
19711 {"scx", PSR_s
| PSR_c
| PSR_x
},
19712 {"xfs", PSR_x
| PSR_f
| PSR_s
},
19713 {"xfc", PSR_x
| PSR_f
| PSR_c
},
19714 {"xsf", PSR_x
| PSR_s
| PSR_f
},
19715 {"xsc", PSR_x
| PSR_s
| PSR_c
},
19716 {"xcf", PSR_x
| PSR_c
| PSR_f
},
19717 {"xcs", PSR_x
| PSR_c
| PSR_s
},
19718 {"cfs", PSR_c
| PSR_f
| PSR_s
},
19719 {"cfx", PSR_c
| PSR_f
| PSR_x
},
19720 {"csf", PSR_c
| PSR_s
| PSR_f
},
19721 {"csx", PSR_c
| PSR_s
| PSR_x
},
19722 {"cxf", PSR_c
| PSR_x
| PSR_f
},
19723 {"cxs", PSR_c
| PSR_x
| PSR_s
},
19724 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
19725 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
19726 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
19727 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
19728 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
19729 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
19730 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
19731 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
19732 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
19733 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
19734 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
19735 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
19736 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
19737 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
19738 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
19739 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
19740 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
19741 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
19742 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
19743 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
19744 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
19745 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
19746 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
19747 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
19750 /* Table of V7M psr names. */
19751 static const struct asm_psr v7m_psrs
[] =
19753 {"apsr", 0x0 }, {"APSR", 0x0 },
19754 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19755 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19756 {"psr", 0x3 }, {"PSR", 0x3 },
19757 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19758 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19759 {"epsr", 0x6 }, {"EPSR", 0x6 },
19760 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19761 {"msp", 0x8 }, {"MSP", 0x8 },
19762 {"psp", 0x9 }, {"PSP", 0x9 },
19763 {"msplim", 0xa }, {"MSPLIM", 0xa },
19764 {"psplim", 0xb }, {"PSPLIM", 0xb },
19765 {"primask", 0x10}, {"PRIMASK", 0x10},
19766 {"basepri", 0x11}, {"BASEPRI", 0x11},
19767 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19768 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19769 {"control", 0x14}, {"CONTROL", 0x14},
19770 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19771 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19772 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19773 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19774 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19775 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19776 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19777 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19778 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19781 /* Table of all shift-in-operand names. */
19782 static const struct asm_shift_name shift_names
[] =
19784 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
19785 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
19786 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
19787 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
19788 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
19789 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
19792 /* Table of all explicit relocation names. */
19794 static struct reloc_entry reloc_names
[] =
19796 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
19797 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
19798 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
19799 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
19800 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
19801 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
19802 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
19803 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
19804 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
19805 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
19806 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
19807 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
19808 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
19809 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
19810 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
19811 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
19812 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
19813 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
19814 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
19815 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
19816 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
19817 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
19818 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
19819 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
19820 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
19821 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
19822 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
19826 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19827 static const struct asm_cond conds
[] =
19831 {"cs", 0x2}, {"hs", 0x2},
19832 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19846 #define UL_BARRIER(L,U,CODE,FEAT) \
19847 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19848 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19850 static struct asm_barrier_opt barrier_opt_names
[] =
19852 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
19853 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
19854 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
19855 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
19856 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
19857 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
19858 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
19859 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
19860 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
19861 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
19862 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
19863 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
19864 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
19865 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
19866 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
19867 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
19872 /* Table of ARM-format instructions. */
19874 /* Macros for gluing together operand strings. N.B. In all cases
19875 other than OPS0, the trailing OP_stop comes from default
19876 zero-initialization of the unspecified elements of the array. */
19877 #define OPS0() { OP_stop, }
19878 #define OPS1(a) { OP_##a, }
19879 #define OPS2(a,b) { OP_##a,OP_##b, }
19880 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19881 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19882 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19883 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19885 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19886 This is useful when mixing operands for ARM and THUMB, i.e. using the
19887 MIX_ARM_THUMB_OPERANDS macro.
19888 In order to use these macros, prefix the number of operands with _
19890 #define OPS_1(a) { a, }
19891 #define OPS_2(a,b) { a,b, }
19892 #define OPS_3(a,b,c) { a,b,c, }
19893 #define OPS_4(a,b,c,d) { a,b,c,d, }
19894 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19895 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19897 /* These macros abstract out the exact format of the mnemonic table and
19898 save some repeated characters. */
19900 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19901 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19902 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19903 THUMB_VARIANT, do_##ae, do_##te }
19905 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19906 a T_MNEM_xyz enumerator. */
19907 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19908 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19909 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19910 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19912 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19913 infix after the third character. */
19914 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19915 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19916 THUMB_VARIANT, do_##ae, do_##te }
19917 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19918 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19919 THUMB_VARIANT, do_##ae, do_##te }
19920 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19921 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19922 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19923 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19924 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19925 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19926 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19927 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19929 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19930 field is still 0xE. Many of the Thumb variants can be executed
19931 conditionally, so this is checked separately. */
19932 #define TUE(mnem, op, top, nops, ops, ae, te) \
19933 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19934 THUMB_VARIANT, do_##ae, do_##te }
19936 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19937 Used by mnemonics that have very minimal differences in the encoding for
19938 ARM and Thumb variants and can be handled in a common function. */
19939 #define TUEc(mnem, op, top, nops, ops, en) \
19940 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19941 THUMB_VARIANT, do_##en, do_##en }
19943 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19944 condition code field. */
19945 #define TUF(mnem, op, top, nops, ops, ae, te) \
19946 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19947 THUMB_VARIANT, do_##ae, do_##te }
19949 /* ARM-only variants of all the above. */
19950 #define CE(mnem, op, nops, ops, ae) \
19951 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19953 #define C3(mnem, op, nops, ops, ae) \
19954 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19956 /* Thumb-only variants of TCE and TUE. */
19957 #define ToC(mnem, top, nops, ops, te) \
19958 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19961 #define ToU(mnem, top, nops, ops, te) \
19962 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19965 /* T_MNEM_xyz enumerator variants of ToC. */
19966 #define toC(mnem, top, nops, ops, te) \
19967 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
19970 /* T_MNEM_xyz enumerator variants of ToU. */
19971 #define toU(mnem, top, nops, ops, te) \
19972 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
19975 /* Legacy mnemonics that always have conditional infix after the third
19977 #define CL(mnem, op, nops, ops, ae) \
19978 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19979 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19981 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19982 #define cCE(mnem, op, nops, ops, ae) \
19983 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19985 /* Legacy coprocessor instructions where conditional infix and conditional
19986 suffix are ambiguous. For consistency this includes all FPA instructions,
19987 not just the potentially ambiguous ones. */
19988 #define cCL(mnem, op, nops, ops, ae) \
19989 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19990 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19992 /* Coprocessor, takes either a suffix or a position-3 infix
19993 (for an FPA corner case). */
19994 #define C3E(mnem, op, nops, ops, ae) \
19995 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19996 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19998 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19999 { m1 #m2 m3, OPS##nops ops, \
20000 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
20001 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
20003 #define CM(m1, m2, op, nops, ops, ae) \
20004 xCM_ (m1, , m2, op, nops, ops, ae), \
20005 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20006 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20007 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20008 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20009 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20010 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20011 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20012 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20013 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20014 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20015 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20016 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20017 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20018 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20019 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20020 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20021 xCM_ (m1, le, m2, op, nops, ops, ae), \
20022 xCM_ (m1, al, m2, op, nops, ops, ae)
20024 #define UE(mnem, op, nops, ops, ae) \
20025 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20027 #define UF(mnem, op, nops, ops, ae) \
20028 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20030 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20031 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20032 use the same encoding function for each. */
20033 #define NUF(mnem, op, nops, ops, enc) \
20034 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20035 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20037 /* Neon data processing, version which indirects through neon_enc_tab for
20038 the various overloaded versions of opcodes. */
20039 #define nUF(mnem, op, nops, ops, enc) \
20040 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20041 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20043 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20045 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
20046 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20047 THUMB_VARIANT, do_##enc, do_##enc }
20049 #define NCE(mnem, op, nops, ops, enc) \
20050 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20052 #define NCEF(mnem, op, nops, ops, enc) \
20053 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20055 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20056 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
20057 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20058 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20060 #define nCE(mnem, op, nops, ops, enc) \
20061 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20063 #define nCEF(mnem, op, nops, ops, enc) \
20064 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20068 static const struct asm_opcode insns
[] =
20070 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20071 #define THUMB_VARIANT & arm_ext_v4t
20072 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20073 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20074 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20075 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20076 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20077 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20078 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20079 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20080 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20081 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20082 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20083 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20084 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20085 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20086 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20087 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20089 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20090 for setting PSR flag bits. They are obsolete in V6 and do not
20091 have Thumb equivalents. */
20092 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20093 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20094 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
20095 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20096 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20097 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
20098 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20099 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20100 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
20102 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
20103 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
20104 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20105 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20107 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
20108 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20109 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
20111 OP_ADDRGLDR
),ldst
, t_ldst
),
20112 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20114 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20115 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20116 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20117 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20118 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20119 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20121 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
20122 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
20125 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
20126 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
20127 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
20128 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
20130 /* Thumb-compatibility pseudo ops. */
20131 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20132 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20133 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20134 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20135 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20136 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20137 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20138 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20139 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
20140 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
20141 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
20142 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
20144 /* These may simplify to neg. */
20145 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20146 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20148 #undef THUMB_VARIANT
20149 #define THUMB_VARIANT & arm_ext_os
20151 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20152 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20154 #undef THUMB_VARIANT
20155 #define THUMB_VARIANT & arm_ext_v6
20157 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
20159 /* V1 instructions with no Thumb analogue prior to V6T2. */
20160 #undef THUMB_VARIANT
20161 #define THUMB_VARIANT & arm_ext_v6t2
20163 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20164 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20165 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
20167 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20168 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20169 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
20170 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20172 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20173 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20175 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20176 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20178 /* V1 instructions with no Thumb analogue at all. */
20179 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
20180 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
20182 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20183 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20184 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20185 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20186 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20187 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20188 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20189 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20192 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20193 #undef THUMB_VARIANT
20194 #define THUMB_VARIANT & arm_ext_v4t
20196 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20197 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20199 #undef THUMB_VARIANT
20200 #define THUMB_VARIANT & arm_ext_v6t2
20202 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20203 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
20205 /* Generic coprocessor instructions. */
20206 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20207 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20208 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20209 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20210 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20211 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20212 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20215 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20217 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20218 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20221 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20222 #undef THUMB_VARIANT
20223 #define THUMB_VARIANT & arm_ext_msr
20225 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
20226 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
20229 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20230 #undef THUMB_VARIANT
20231 #define THUMB_VARIANT & arm_ext_v6t2
20233 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20234 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20235 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20236 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20237 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20238 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20239 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20240 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20243 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20244 #undef THUMB_VARIANT
20245 #define THUMB_VARIANT & arm_ext_v4t
20247 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20248 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20249 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20250 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20251 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20252 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20255 #define ARM_VARIANT & arm_ext_v4t_5
20257 /* ARM Architecture 4T. */
20258 /* Note: bx (and blx) are required on V5, even if the processor does
20259 not support Thumb. */
20260 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
20263 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
20264 #undef THUMB_VARIANT
20265 #define THUMB_VARIANT & arm_ext_v5t
20267 /* Note: blx has 2 variants; the .value coded here is for
20268 BLX(2). Only this variant has conditional execution. */
20269 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
20270 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
20272 #undef THUMB_VARIANT
20273 #define THUMB_VARIANT & arm_ext_v6t2
20275 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
20276 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20277 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20278 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20279 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20280 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20281 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20282 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20285 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
20286 #undef THUMB_VARIANT
20287 #define THUMB_VARIANT & arm_ext_v5exp
20289 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20290 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20291 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20292 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20294 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20295 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20297 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20298 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20299 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20300 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20302 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20303 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20304 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20305 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20307 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20308 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20310 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20311 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20312 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20313 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20316 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
20317 #undef THUMB_VARIANT
20318 #define THUMB_VARIANT & arm_ext_v6t2
20320 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
20321 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
20323 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
20324 ADDRGLDRS
), ldrd
, t_ldstd
),
20326 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20327 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20330 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
20332 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
20335 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
20336 #undef THUMB_VARIANT
20337 #define THUMB_VARIANT & arm_ext_v6
20339 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20340 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20341 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20342 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20343 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20344 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20345 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20346 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20347 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20348 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
20350 #undef THUMB_VARIANT
20351 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20353 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
20354 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20356 #undef THUMB_VARIANT
20357 #define THUMB_VARIANT & arm_ext_v6t2
20359 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20360 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20362 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
20363 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
20365 /* ARM V6 not included in V7M. */
20366 #undef THUMB_VARIANT
20367 #define THUMB_VARIANT & arm_ext_v6_notm
20368 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20369 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20370 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
20371 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
20372 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
20373 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20374 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
20375 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
20376 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
20377 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20378 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20379 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20380 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
20381 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
20382 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
20383 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
20384 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
20385 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
20386 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
20388 /* ARM V6 not included in V7M (eg. integer SIMD). */
20389 #undef THUMB_VARIANT
20390 #define THUMB_VARIANT & arm_ext_v6_dsp
20391 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
20392 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
20393 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20394 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20395 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20396 /* Old name for QASX. */
20397 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20398 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20399 /* Old name for QSAX. */
20400 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20401 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20402 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20403 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20404 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20405 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20406 /* Old name for SASX. */
20407 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20408 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20409 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20410 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20411 /* Old name for SHASX. */
20412 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20413 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20414 /* Old name for SHSAX. */
20415 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20416 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20417 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20418 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20419 /* Old name for SSAX. */
20420 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20421 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20422 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20423 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20424 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20425 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20426 /* Old name for UASX. */
20427 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20428 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20429 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20430 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20431 /* Old name for UHASX. */
20432 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20433 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20434 /* Old name for UHSAX. */
20435 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20436 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20437 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20438 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20439 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20440 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20441 /* Old name for UQASX. */
20442 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20443 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20444 /* Old name for UQSAX. */
20445 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20446 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20447 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20448 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20449 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20450 /* Old name for USAX. */
20451 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20452 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20453 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20454 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20455 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20456 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20457 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20458 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20459 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20460 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20461 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20462 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20463 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20464 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20465 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20466 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20467 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20468 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20469 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20470 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20471 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20472 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20473 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20474 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20475 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20476 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20477 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20478 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20479 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20480 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
20481 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
20482 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20483 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20484 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
20487 #define ARM_VARIANT & arm_ext_v6k_v6t2
20488 #undef THUMB_VARIANT
20489 #define THUMB_VARIANT & arm_ext_v6k_v6t2
20491 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
20492 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
20493 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
20494 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
20496 #undef THUMB_VARIANT
20497 #define THUMB_VARIANT & arm_ext_v6_notm
20498 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
20500 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
20501 RRnpcb
), strexd
, t_strexd
),
20503 #undef THUMB_VARIANT
20504 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20505 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
20507 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
20509 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20511 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20513 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
20516 #define ARM_VARIANT & arm_ext_sec
20517 #undef THUMB_VARIANT
20518 #define THUMB_VARIANT & arm_ext_sec
20520 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
20523 #define ARM_VARIANT & arm_ext_virt
20524 #undef THUMB_VARIANT
20525 #define THUMB_VARIANT & arm_ext_virt
20527 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
20528 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
20531 #define ARM_VARIANT & arm_ext_pan
20532 #undef THUMB_VARIANT
20533 #define THUMB_VARIANT & arm_ext_pan
20535 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
20538 #define ARM_VARIANT & arm_ext_v6t2
20539 #undef THUMB_VARIANT
20540 #define THUMB_VARIANT & arm_ext_v6t2
20542 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
20543 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
20544 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
20545 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
20547 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20548 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
20550 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20551 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20552 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20553 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20556 #define ARM_VARIANT & arm_ext_v3
20557 #undef THUMB_VARIANT
20558 #define THUMB_VARIANT & arm_ext_v6t2
20560 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
20561 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
20562 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
20565 #define ARM_VARIANT & arm_ext_v6t2
20566 #undef THUMB_VARIANT
20567 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20568 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
20569 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
20571 /* Thumb-only instructions. */
20573 #define ARM_VARIANT NULL
20574 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
20575 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
20577 /* ARM does not really have an IT instruction, so always allow it.
20578 The opcode is copied from Thumb in order to allow warnings in
20579 -mimplicit-it=[never | arm] modes. */
20581 #define ARM_VARIANT & arm_ext_v1
20582 #undef THUMB_VARIANT
20583 #define THUMB_VARIANT & arm_ext_v6t2
20585 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
20586 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
20587 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
20588 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
20589 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
20590 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
20591 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
20592 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
20593 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
20594 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
20595 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
20596 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
20597 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
20598 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
20599 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
20600 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20601 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
20602 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
20604 /* Thumb2 only instructions. */
20606 #define ARM_VARIANT NULL
20608 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
20609 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
20610 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
20611 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
20612 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
20613 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
20615 /* Hardware division instructions. */
20617 #define ARM_VARIANT & arm_ext_adiv
20618 #undef THUMB_VARIANT
20619 #define THUMB_VARIANT & arm_ext_div
20621 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
20622 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
20624 /* ARM V6M/V7 instructions. */
20626 #define ARM_VARIANT & arm_ext_barrier
20627 #undef THUMB_VARIANT
20628 #define THUMB_VARIANT & arm_ext_barrier
20630 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
20631 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
20632 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
20634 /* ARM V7 instructions. */
20636 #define ARM_VARIANT & arm_ext_v7
20637 #undef THUMB_VARIANT
20638 #define THUMB_VARIANT & arm_ext_v7
20640 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
20641 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
20644 #define ARM_VARIANT & arm_ext_mp
20645 #undef THUMB_VARIANT
20646 #define THUMB_VARIANT & arm_ext_mp
20648 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
20650 /* AArchv8 instructions. */
20652 #define ARM_VARIANT & arm_ext_v8
20654 /* Instructions shared between armv8-a and armv8-m. */
20655 #undef THUMB_VARIANT
20656 #define THUMB_VARIANT & arm_ext_atomics
20658 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20659 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20660 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20661 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20662 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20663 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20664 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20665 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
20666 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20667 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20669 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20671 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20673 #undef THUMB_VARIANT
20674 #define THUMB_VARIANT & arm_ext_v8
20676 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
20677 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
20679 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
20682 /* Defined in V8 but is in undefined encoding space for earlier
20683 architectures. However earlier architectures are required to treat
20684 this instuction as a semihosting trap as well. Hence while not explicitly
20685 defined as such, it is in fact correct to define the instruction for all
20687 #undef THUMB_VARIANT
20688 #define THUMB_VARIANT & arm_ext_v1
20690 #define ARM_VARIANT & arm_ext_v1
20691 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
20693 /* ARMv8 T32 only. */
20695 #define ARM_VARIANT NULL
20696 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
20697 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
20698 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
20700 /* FP for ARMv8. */
20702 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20703 #undef THUMB_VARIANT
20704 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20706 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20707 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20708 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20709 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20710 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
20711 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
20712 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
20713 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
20714 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
20715 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
20716 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
20717 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
20718 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
20719 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
20720 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
20721 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
20722 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
20724 /* Crypto v1 extensions. */
20726 #define ARM_VARIANT & fpu_crypto_ext_armv8
20727 #undef THUMB_VARIANT
20728 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20730 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
20731 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
20732 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
20733 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
20734 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
20735 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
20736 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
20737 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
20738 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
20739 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
20740 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
20741 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
20742 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
20743 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
20746 #define ARM_VARIANT & crc_ext_armv8
20747 #undef THUMB_VARIANT
20748 #define THUMB_VARIANT & crc_ext_armv8
20749 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
20750 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
20751 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
20752 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
20753 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
20754 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
20756 /* ARMv8.2 RAS extension. */
20758 #define ARM_VARIANT & arm_ext_ras
20759 #undef THUMB_VARIANT
20760 #define THUMB_VARIANT & arm_ext_ras
20761 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
20764 #define ARM_VARIANT & arm_ext_v8_3
20765 #undef THUMB_VARIANT
20766 #define THUMB_VARIANT & arm_ext_v8_3
20767 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
20768 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
20769 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
20772 #define ARM_VARIANT & fpu_neon_ext_dotprod
20773 #undef THUMB_VARIANT
20774 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20775 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
20776 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
20779 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20780 #undef THUMB_VARIANT
20781 #define THUMB_VARIANT NULL
20783 cCE("wfs", e200110
, 1, (RR
), rd
),
20784 cCE("rfs", e300110
, 1, (RR
), rd
),
20785 cCE("wfc", e400110
, 1, (RR
), rd
),
20786 cCE("rfc", e500110
, 1, (RR
), rd
),
20788 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20789 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20790 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20791 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20793 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20794 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20795 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20796 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20798 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
20799 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
20800 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
20801 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
20802 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
20803 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
20804 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
20805 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
20806 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
20807 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
20808 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
20809 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
20811 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
20812 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
20813 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
20814 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
20815 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
20816 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
20817 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
20818 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
20819 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
20820 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
20821 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
20822 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
20824 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
20825 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
20826 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
20827 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
20828 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
20829 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
20830 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
20831 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
20832 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
20833 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
20834 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
20835 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
20837 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
20838 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
20839 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
20840 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
20841 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
20842 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
20843 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
20844 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
20845 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
20846 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
20847 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
20848 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
20850 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
20851 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
20852 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
20853 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
20854 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
20855 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
20856 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
20857 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
20858 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
20859 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
20860 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
20861 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
20863 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
20864 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
20865 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
20866 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
20867 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
20868 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
20869 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
20870 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
20871 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
20872 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
20873 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
20874 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
20876 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
20877 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
20878 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
20879 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
20880 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
20881 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
20882 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
20883 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
20884 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
20885 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
20886 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
20887 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
20889 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
20890 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
20891 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
20892 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
20893 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
20894 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
20895 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
20896 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
20897 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
20898 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
20899 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
20900 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
20902 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
20903 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
20904 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
20905 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
20906 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
20907 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
20908 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
20909 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
20910 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
20911 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
20912 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
20913 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
20915 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
20916 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
20917 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
20918 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
20919 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
20920 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
20921 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
20922 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
20923 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
20924 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
20925 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
20926 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
20928 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
20929 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
20930 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
20931 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
20932 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
20933 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
20934 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
20935 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
20936 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
20937 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
20938 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
20939 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
20941 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
20942 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
20943 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
20944 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
20945 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
20946 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
20947 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
20948 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
20949 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
20950 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
20951 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
20952 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
20954 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
20955 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
20956 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
20957 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
20958 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
20959 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
20960 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
20961 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
20962 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
20963 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
20964 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
20965 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
20967 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
20968 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
20969 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
20970 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
20971 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
20972 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
20973 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
20974 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
20975 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
20976 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
20977 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
20978 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
20980 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
20981 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
20982 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
20983 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
20984 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
20985 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
20986 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
20987 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
20988 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
20989 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
20990 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
20991 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
20993 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
20994 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
20995 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
20996 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
20997 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
20998 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
20999 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
21000 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
21001 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
21002 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
21003 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
21004 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
21006 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21007 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21008 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21009 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21010 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21011 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21012 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21013 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21014 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21015 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21016 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21017 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21019 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21020 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21021 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21022 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21023 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21024 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21025 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21026 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21027 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21028 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21029 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21030 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21032 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21033 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21034 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21035 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21036 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21037 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21038 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21039 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21040 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21041 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21042 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21043 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21045 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21046 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21047 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21048 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21049 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21050 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21051 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21052 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21053 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21054 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21055 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21056 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21058 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21059 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21060 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21061 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21062 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21063 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21064 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21065 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21066 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21067 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21068 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21069 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21071 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21072 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21073 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21074 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21075 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21076 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21077 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21078 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21079 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21080 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21081 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21082 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21084 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21085 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21086 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21087 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21088 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21089 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21090 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21091 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21092 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21093 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21094 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21095 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21097 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21098 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21099 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21100 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21101 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21102 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21103 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21104 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21105 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21106 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21107 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21108 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21110 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21111 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21112 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21113 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21114 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21115 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21116 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21117 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21118 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21119 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21120 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21121 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21123 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21124 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21125 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21126 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21127 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21128 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21129 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21130 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21131 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21132 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21133 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21134 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21136 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21137 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21138 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21139 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21140 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21141 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21142 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21143 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21144 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21145 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21146 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21147 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21149 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21150 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21151 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21152 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21153 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21154 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21155 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21156 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21157 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21158 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21159 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21160 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21162 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21163 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21164 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21165 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21166 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21167 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21168 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21169 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21170 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21171 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21172 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21173 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21175 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21176 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21177 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21178 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21180 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
21181 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
21182 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
21183 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
21184 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
21185 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
21186 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
21187 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
21188 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
21189 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
21190 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
21191 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
21193 /* The implementation of the FIX instruction is broken on some
21194 assemblers, in that it accepts a precision specifier as well as a
21195 rounding specifier, despite the fact that this is meaningless.
21196 To be more compatible, we accept it as well, though of course it
21197 does not set any bits. */
21198 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
21199 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
21200 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
21201 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
21202 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
21203 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
21204 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
21205 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
21206 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
21207 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
21208 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
21209 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
21210 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
21212 /* Instructions that were new with the real FPA, call them V2. */
21214 #define ARM_VARIANT & fpu_fpa_ext_v2
21216 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21217 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21218 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21219 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21220 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21221 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21224 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21226 /* Moves and type conversions. */
21227 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21228 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
21229 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
21230 cCE("fmstat", ef1fa10
, 0, (), noargs
),
21231 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
21232 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
21233 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21234 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21235 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21236 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21237 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21238 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21239 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
21240 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
21242 /* Memory operations. */
21243 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21244 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21245 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21246 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21247 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21248 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21249 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21250 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21251 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21252 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21253 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21254 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21255 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21256 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21257 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21258 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21259 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21260 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21262 /* Monadic operations. */
21263 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21264 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21265 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21267 /* Dyadic operations. */
21268 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21269 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21270 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21271 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21272 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21273 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21274 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21275 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21276 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21279 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21280 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
21281 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21282 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
21284 /* Double precision load/store are still present on single precision
21285 implementations. */
21286 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21287 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21288 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21289 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21290 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21291 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21292 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21293 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21294 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21295 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21298 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
21300 /* Moves and type conversions. */
21301 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21302 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21303 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21304 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21305 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21306 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21307 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21308 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21309 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21310 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21311 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21312 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21313 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21315 /* Monadic operations. */
21316 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21317 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21318 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21320 /* Dyadic operations. */
21321 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21322 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21323 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21324 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21325 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21326 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21327 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21328 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21329 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21332 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21333 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
21334 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21335 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
21338 #define ARM_VARIANT & fpu_vfp_ext_v2
21340 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
21341 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
21342 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
21343 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
21345 /* Instructions which may belong to either the Neon or VFP instruction sets.
21346 Individual encoder functions perform additional architecture checks. */
21348 #define ARM_VARIANT & fpu_vfp_ext_v1xd
21349 #undef THUMB_VARIANT
21350 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
21352 /* These mnemonics are unique to VFP. */
21353 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
21354 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
21355 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21356 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21357 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21358 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
21359 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
21360 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
21361 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
21362 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
21364 /* Mnemonics shared by Neon and VFP. */
21365 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
21366 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
21367 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
21369 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
21370 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
21372 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
21373 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
21375 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21376 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21377 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21378 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21379 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21380 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21382 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
21383 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
21384 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
21385 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
21388 /* NOTE: All VMOV encoding is special-cased! */
21389 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
21390 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
21392 #undef THUMB_VARIANT
21393 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
21394 by different feature bits. Since we are setting the Thumb guard, we can
21395 require Thumb-1 which makes it a nop guard and set the right feature bit in
21396 do_vldr_vstr (). */
21397 #define THUMB_VARIANT & arm_ext_v4t
21398 NCE(vldr
, d100b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
21399 NCE(vstr
, d000b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
21402 #define ARM_VARIANT & arm_ext_fp16
21403 #undef THUMB_VARIANT
21404 #define THUMB_VARIANT & arm_ext_fp16
21405 /* New instructions added from v8.2, allowing the extraction and insertion of
21406 the upper 16 bits of a 32-bit vector register. */
21407 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
21408 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
21410 /* New backported fma/fms instructions optional in v8.2. */
21411 NCE (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
21412 NCE (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
21414 #undef THUMB_VARIANT
21415 #define THUMB_VARIANT & fpu_neon_ext_v1
21417 #define ARM_VARIANT & fpu_neon_ext_v1
21419 /* Data processing with three registers of the same length. */
21420 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
21421 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
21422 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
21423 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21424 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21425 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21426 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21427 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21428 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21429 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
21430 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
21431 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
21432 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
21433 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
21434 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
21435 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
21436 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
21437 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
21438 /* If not immediate, fall back to neon_dyadic_i64_su.
21439 shl_imm should accept I8 I16 I32 I64,
21440 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
21441 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
21442 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
21443 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
21444 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
21445 /* Logic ops, types optional & ignored. */
21446 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21447 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21448 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21449 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21450 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21451 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21452 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21453 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21454 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
21455 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
21456 /* Bitfield ops, untyped. */
21457 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21458 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21459 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21460 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21461 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21462 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21463 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
21464 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21465 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21466 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21467 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21468 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21469 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21470 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
21471 back to neon_dyadic_if_su. */
21472 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
21473 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
21474 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
21475 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
21476 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
21477 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
21478 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
21479 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
21480 /* Comparison. Type I8 I16 I32 F32. */
21481 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
21482 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
21483 /* As above, D registers only. */
21484 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
21485 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
21486 /* Int and float variants, signedness unimportant. */
21487 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
21488 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
21489 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
21490 /* Add/sub take types I8 I16 I32 I64 F32. */
21491 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
21492 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
21493 /* vtst takes sizes 8, 16, 32. */
21494 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
21495 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
21496 /* VMUL takes I8 I16 I32 F32 P8. */
21497 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
21498 /* VQD{R}MULH takes S16 S32. */
21499 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
21500 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
21501 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
21502 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
21503 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
21504 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
21505 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
21506 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
21507 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
21508 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
21509 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
21510 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
21511 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
21512 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
21513 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
21514 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
21515 /* ARM v8.1 extension. */
21516 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
21517 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
21518 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
21519 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
21521 /* Two address, int/float. Types S8 S16 S32 F32. */
21522 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
21523 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
21525 /* Data processing with two registers and a shift amount. */
21526 /* Right shifts, and variants with rounding.
21527 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21528 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
21529 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
21530 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
21531 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
21532 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
21533 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
21534 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
21535 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
21536 /* Shift and insert. Sizes accepted 8 16 32 64. */
21537 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
21538 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
21539 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
21540 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
21541 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21542 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
21543 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
21544 /* Right shift immediate, saturating & narrowing, with rounding variants.
21545 Types accepted S16 S32 S64 U16 U32 U64. */
21546 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
21547 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
21548 /* As above, unsigned. Types accepted S16 S32 S64. */
21549 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
21550 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
21551 /* Right shift narrowing. Types accepted I16 I32 I64. */
21552 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
21553 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
21554 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21555 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
21556 /* CVT with optional immediate for fixed-point variant. */
21557 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
21559 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
21560 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
21562 /* Data processing, three registers of different lengths. */
21563 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21564 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
21565 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21566 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21567 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21568 /* If not scalar, fall back to neon_dyadic_long.
21569 Vector types as above, scalar types S16 S32 U16 U32. */
21570 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
21571 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
21572 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21573 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
21574 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
21575 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21576 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21577 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21578 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21579 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21580 /* Saturating doubling multiplies. Types S16 S32. */
21581 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21582 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21583 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21584 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21585 S16 S32 U16 U32. */
21586 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
21588 /* Extract. Size 8. */
21589 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
21590 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
21592 /* Two registers, miscellaneous. */
21593 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21594 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
21595 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
21596 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
21597 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
21598 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
21599 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
21600 /* Vector replicate. Sizes 8 16 32. */
21601 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
21602 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
21603 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21604 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
21605 /* VMOVN. Types I16 I32 I64. */
21606 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
21607 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21608 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
21609 /* VQMOVUN. Types S16 S32 S64. */
21610 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
21611 /* VZIP / VUZP. Sizes 8 16 32. */
21612 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
21613 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
21614 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
21615 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
21616 /* VQABS / VQNEG. Types S8 S16 S32. */
21617 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
21618 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
21619 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
21620 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
21621 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21622 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
21623 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
21624 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
21625 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
21626 /* Reciprocal estimates. Types U32 F16 F32. */
21627 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
21628 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
21629 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
21630 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
21631 /* VCLS. Types S8 S16 S32. */
21632 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
21633 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
21634 /* VCLZ. Types I8 I16 I32. */
21635 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
21636 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
21637 /* VCNT. Size 8. */
21638 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
21639 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
21640 /* Two address, untyped. */
21641 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
21642 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
21643 /* VTRN. Sizes 8 16 32. */
21644 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
21645 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
21647 /* Table lookup. Size 8. */
21648 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
21649 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
21651 #undef THUMB_VARIANT
21652 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21654 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21656 /* Neon element/structure load/store. */
21657 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21658 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21659 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21660 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21661 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21662 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21663 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21664 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21666 #undef THUMB_VARIANT
21667 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21669 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21670 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
21671 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21672 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21673 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21674 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21675 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21676 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21677 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21678 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21680 #undef THUMB_VARIANT
21681 #define THUMB_VARIANT & fpu_vfp_ext_v3
21683 #define ARM_VARIANT & fpu_vfp_ext_v3
21685 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
21686 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21687 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21688 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21689 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21690 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21691 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21692 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21693 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21696 #define ARM_VARIANT & fpu_vfp_ext_fma
21697 #undef THUMB_VARIANT
21698 #define THUMB_VARIANT & fpu_vfp_ext_fma
21699 /* Mnemonics shared by Neon and VFP. These are included in the
21700 VFP FMA variant; NEON and VFP FMA always includes the NEON
21701 FMA instructions. */
21702 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
21703 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
21704 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21705 the v form should always be used. */
21706 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21707 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21708 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21709 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21710 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21711 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21713 #undef THUMB_VARIANT
21715 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21717 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21718 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21719 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21720 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21721 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21722 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21723 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
21724 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
21727 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21729 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
21730 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
21731 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
21732 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
21733 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
21734 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
21735 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
21736 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
21737 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
21738 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21739 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21740 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21741 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21742 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21743 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21744 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21745 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21746 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21747 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
21748 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
21749 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21750 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21751 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21752 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21753 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21754 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21755 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
21756 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
21757 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
21758 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
21759 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
21760 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
21761 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
21762 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
21763 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21764 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21765 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21766 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21767 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21768 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21769 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21770 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21771 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21772 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21773 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21774 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21775 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
21776 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21777 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21778 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21779 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21780 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21781 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21782 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21783 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21784 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21785 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21786 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21787 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21788 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21789 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21790 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21791 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21792 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21793 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21794 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21795 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21796 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21797 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21798 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21799 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21800 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21801 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21802 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21803 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21804 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21805 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21806 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21807 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21808 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21809 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21810 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21811 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21812 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21813 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21814 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21815 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21816 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21817 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
21818 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21819 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21820 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21821 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21822 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21823 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21824 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21825 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21826 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21827 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21828 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21829 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21830 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21831 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21832 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21833 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21834 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21835 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21836 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21837 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21838 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21839 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
21840 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21841 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21842 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21843 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21844 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21845 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21846 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21847 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21848 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21849 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21850 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21851 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21852 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21853 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21854 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21855 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21856 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21857 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21858 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21859 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21860 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21861 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21862 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21863 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21864 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21865 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21866 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21867 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21868 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21869 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21870 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21871 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21872 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21873 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21874 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21875 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21876 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21877 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21878 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21879 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21880 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21881 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21882 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21883 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21884 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21885 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21886 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21887 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21888 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21889 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21890 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
21893 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21895 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
21896 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
21897 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
21898 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21899 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21900 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21901 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21902 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21903 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21904 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21905 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21906 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21907 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21908 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21909 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21910 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21911 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21912 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21913 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21914 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21915 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
21916 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21917 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21918 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21919 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21920 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21921 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21922 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21923 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21924 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21925 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21926 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21927 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21928 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21929 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21930 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21931 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21932 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21933 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21934 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21935 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21936 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21937 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21938 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21939 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21940 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21941 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21942 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21943 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21944 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21945 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21946 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21947 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21948 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21949 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21950 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21951 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21954 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21956 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21957 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21958 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21959 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21960 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21961 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21962 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21963 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21964 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
21965 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
21966 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
21967 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
21968 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
21969 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
21970 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
21971 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
21972 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
21973 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
21974 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
21975 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
21976 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
21977 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
21978 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
21979 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
21980 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
21981 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
21982 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
21983 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
21984 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
21985 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
21986 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
21987 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
21988 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
21989 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
21990 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
21991 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
21992 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
21993 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
21994 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
21995 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
21996 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
21997 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
21998 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
21999 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
22000 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
22001 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
22002 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
22003 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
22004 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
22005 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
22006 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
22007 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
22008 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
22009 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
22010 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22011 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22012 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22013 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22014 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22015 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22016 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
22017 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
22018 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
22019 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
22020 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22021 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22022 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22023 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22024 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22025 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22026 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22027 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22028 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22029 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22030 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22031 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22033 /* ARMv8.5-A instructions. */
22035 #define ARM_VARIANT & arm_ext_sb
22036 #undef THUMB_VARIANT
22037 #define THUMB_VARIANT & arm_ext_sb
22038 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
22041 #define ARM_VARIANT & arm_ext_predres
22042 #undef THUMB_VARIANT
22043 #define THUMB_VARIANT & arm_ext_predres
22044 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
22045 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
22046 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
22048 /* ARMv8-M instructions. */
22050 #define ARM_VARIANT NULL
22051 #undef THUMB_VARIANT
22052 #define THUMB_VARIANT & arm_ext_v8m
22053 ToU("sg", e97fe97f
, 0, (), noargs
),
22054 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
22055 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
22056 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
22057 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
22058 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
22059 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
22061 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22062 instructions behave as nop if no VFP is present. */
22063 #undef THUMB_VARIANT
22064 #define THUMB_VARIANT & arm_ext_v8m_main
22065 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
22066 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
22068 /* Armv8.1-M Mainline instructions. */
22069 #undef THUMB_VARIANT
22070 #define THUMB_VARIANT & arm_ext_v8_1m_main
22071 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
22072 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
22073 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22074 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
22075 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22077 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
22078 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
22079 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
22081 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
),
22082 ToC("vscclrm", ec9f0a00
, 1, (VRSDVLST
), t_vscclrm
)
22085 #undef THUMB_VARIANT
22117 /* MD interface: bits in the object file. */
22119 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
22120 for use in the a.out file, and stores them in the array pointed to by buf.
22121 This knows about the endian-ness of the target machine and does
22122 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
22123 2 (short) and 4 (long) Floating numbers are put out as a series of
22124 LITTLENUMS (shorts, here at least). */
22127 md_number_to_chars (char * buf
, valueT val
, int n
)
22129 if (target_big_endian
)
22130 number_to_chars_bigendian (buf
, val
, n
);
22132 number_to_chars_littleendian (buf
, val
, n
);
22136 md_chars_to_number (char * buf
, int n
)
22139 unsigned char * where
= (unsigned char *) buf
;
22141 if (target_big_endian
)
22146 result
|= (*where
++ & 255);
22154 result
|= (where
[n
] & 255);
22161 /* MD interface: Sections. */
22163 /* Calculate the maximum variable size (i.e., excluding fr_fix)
22164 that an rs_machine_dependent frag may reach. */
22167 arm_frag_max_var (fragS
*fragp
)
22169 /* We only use rs_machine_dependent for variable-size Thumb instructions,
22170 which are either THUMB_SIZE (2) or INSN_SIZE (4).
22172 Note that we generate relaxable instructions even for cases that don't
22173 really need it, like an immediate that's a trivial constant. So we're
22174 overestimating the instruction size for some of those cases. Rather
22175 than putting more intelligence here, it would probably be better to
22176 avoid generating a relaxation frag in the first place when it can be
22177 determined up front that a short instruction will suffice. */
22179 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
22183 /* Estimate the size of a frag before relaxing. Assume everything fits in
22187 md_estimate_size_before_relax (fragS
* fragp
,
22188 segT segtype ATTRIBUTE_UNUSED
)
22194 /* Convert a machine dependent frag. */
22197 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
22199 unsigned long insn
;
22200 unsigned long old_op
;
22208 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
22210 old_op
= bfd_get_16(abfd
, buf
);
22211 if (fragp
->fr_symbol
)
22213 exp
.X_op
= O_symbol
;
22214 exp
.X_add_symbol
= fragp
->fr_symbol
;
22218 exp
.X_op
= O_constant
;
22220 exp
.X_add_number
= fragp
->fr_offset
;
22221 opcode
= fragp
->fr_subtype
;
22224 case T_MNEM_ldr_pc
:
22225 case T_MNEM_ldr_pc2
:
22226 case T_MNEM_ldr_sp
:
22227 case T_MNEM_str_sp
:
22234 if (fragp
->fr_var
== 4)
22236 insn
= THUMB_OP32 (opcode
);
22237 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
22239 insn
|= (old_op
& 0x700) << 4;
22243 insn
|= (old_op
& 7) << 12;
22244 insn
|= (old_op
& 0x38) << 13;
22246 insn
|= 0x00000c00;
22247 put_thumb32_insn (buf
, insn
);
22248 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
22252 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
22254 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
22257 if (fragp
->fr_var
== 4)
22259 insn
= THUMB_OP32 (opcode
);
22260 insn
|= (old_op
& 0xf0) << 4;
22261 put_thumb32_insn (buf
, insn
);
22262 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
22266 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22267 exp
.X_add_number
-= 4;
22275 if (fragp
->fr_var
== 4)
22277 int r0off
= (opcode
== T_MNEM_mov
22278 || opcode
== T_MNEM_movs
) ? 0 : 8;
22279 insn
= THUMB_OP32 (opcode
);
22280 insn
= (insn
& 0xe1ffffff) | 0x10000000;
22281 insn
|= (old_op
& 0x700) << r0off
;
22282 put_thumb32_insn (buf
, insn
);
22283 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
22287 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
22292 if (fragp
->fr_var
== 4)
22294 insn
= THUMB_OP32(opcode
);
22295 put_thumb32_insn (buf
, insn
);
22296 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
22299 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
22303 if (fragp
->fr_var
== 4)
22305 insn
= THUMB_OP32(opcode
);
22306 insn
|= (old_op
& 0xf00) << 14;
22307 put_thumb32_insn (buf
, insn
);
22308 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
22311 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
22314 case T_MNEM_add_sp
:
22315 case T_MNEM_add_pc
:
22316 case T_MNEM_inc_sp
:
22317 case T_MNEM_dec_sp
:
22318 if (fragp
->fr_var
== 4)
22320 /* ??? Choose between add and addw. */
22321 insn
= THUMB_OP32 (opcode
);
22322 insn
|= (old_op
& 0xf0) << 4;
22323 put_thumb32_insn (buf
, insn
);
22324 if (opcode
== T_MNEM_add_pc
)
22325 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
22327 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
22330 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22338 if (fragp
->fr_var
== 4)
22340 insn
= THUMB_OP32 (opcode
);
22341 insn
|= (old_op
& 0xf0) << 4;
22342 insn
|= (old_op
& 0xf) << 16;
22343 put_thumb32_insn (buf
, insn
);
22344 if (insn
& (1 << 20))
22345 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
22347 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
22350 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22356 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
22357 (enum bfd_reloc_code_real
) reloc_type
);
22358 fixp
->fx_file
= fragp
->fr_file
;
22359 fixp
->fx_line
= fragp
->fr_line
;
22360 fragp
->fr_fix
+= fragp
->fr_var
;
22362 /* Set whether we use thumb-2 ISA based on final relaxation results. */
22363 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
22364 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
22365 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
22368 /* Return the size of a relaxable immediate operand instruction.
22369 SHIFT and SIZE specify the form of the allowable immediate. */
22371 relax_immediate (fragS
*fragp
, int size
, int shift
)
22377 /* ??? Should be able to do better than this. */
22378 if (fragp
->fr_symbol
)
22381 low
= (1 << shift
) - 1;
22382 mask
= (1 << (shift
+ size
)) - (1 << shift
);
22383 offset
= fragp
->fr_offset
;
22384 /* Force misaligned offsets to 32-bit variant. */
22387 if (offset
& ~mask
)
22392 /* Get the address of a symbol during relaxation. */
22394 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
22400 sym
= fragp
->fr_symbol
;
22401 sym_frag
= symbol_get_frag (sym
);
22402 know (S_GET_SEGMENT (sym
) != absolute_section
22403 || sym_frag
== &zero_address_frag
);
22404 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
22406 /* If frag has yet to be reached on this pass, assume it will
22407 move by STRETCH just as we did. If this is not so, it will
22408 be because some frag between grows, and that will force
22412 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
22416 /* Adjust stretch for any alignment frag. Note that if have
22417 been expanding the earlier code, the symbol may be
22418 defined in what appears to be an earlier frag. FIXME:
22419 This doesn't handle the fr_subtype field, which specifies
22420 a maximum number of bytes to skip when doing an
22422 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
22424 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
22427 stretch
= - ((- stretch
)
22428 & ~ ((1 << (int) f
->fr_offset
) - 1));
22430 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
22442 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
22445 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
22450 /* Assume worst case for symbols not known to be in the same section. */
22451 if (fragp
->fr_symbol
== NULL
22452 || !S_IS_DEFINED (fragp
->fr_symbol
)
22453 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
22454 || S_IS_WEAK (fragp
->fr_symbol
))
22457 val
= relaxed_symbol_addr (fragp
, stretch
);
22458 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
22459 addr
= (addr
+ 4) & ~3;
22460 /* Force misaligned targets to 32-bit variant. */
22464 if (val
< 0 || val
> 1020)
22469 /* Return the size of a relaxable add/sub immediate instruction. */
22471 relax_addsub (fragS
*fragp
, asection
*sec
)
22476 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
22477 op
= bfd_get_16(sec
->owner
, buf
);
22478 if ((op
& 0xf) == ((op
>> 4) & 0xf))
22479 return relax_immediate (fragp
, 8, 0);
22481 return relax_immediate (fragp
, 3, 0);
22484 /* Return TRUE iff the definition of symbol S could be pre-empted
22485 (overridden) at link or load time. */
22487 symbol_preemptible (symbolS
*s
)
22489 /* Weak symbols can always be pre-empted. */
22493 /* Non-global symbols cannot be pre-empted. */
22494 if (! S_IS_EXTERNAL (s
))
22498 /* In ELF, a global symbol can be marked protected, or private. In that
22499 case it can't be pre-empted (other definitions in the same link unit
22500 would violate the ODR). */
22501 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
22505 /* Other global symbols might be pre-empted. */
22509 /* Return the size of a relaxable branch instruction. BITS is the
22510 size of the offset field in the narrow instruction. */
22513 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
22519 /* Assume worst case for symbols not known to be in the same section. */
22520 if (!S_IS_DEFINED (fragp
->fr_symbol
)
22521 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
22522 || S_IS_WEAK (fragp
->fr_symbol
))
22526 /* A branch to a function in ARM state will require interworking. */
22527 if (S_IS_DEFINED (fragp
->fr_symbol
)
22528 && ARM_IS_FUNC (fragp
->fr_symbol
))
22532 if (symbol_preemptible (fragp
->fr_symbol
))
22535 val
= relaxed_symbol_addr (fragp
, stretch
);
22536 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
22539 /* Offset is a signed value *2 */
22541 if (val
>= limit
|| val
< -limit
)
22547 /* Relax a machine dependent frag. This returns the amount by which
22548 the current size of the frag should change. */
22551 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
22556 oldsize
= fragp
->fr_var
;
22557 switch (fragp
->fr_subtype
)
22559 case T_MNEM_ldr_pc2
:
22560 newsize
= relax_adr (fragp
, sec
, stretch
);
22562 case T_MNEM_ldr_pc
:
22563 case T_MNEM_ldr_sp
:
22564 case T_MNEM_str_sp
:
22565 newsize
= relax_immediate (fragp
, 8, 2);
22569 newsize
= relax_immediate (fragp
, 5, 2);
22573 newsize
= relax_immediate (fragp
, 5, 1);
22577 newsize
= relax_immediate (fragp
, 5, 0);
22580 newsize
= relax_adr (fragp
, sec
, stretch
);
22586 newsize
= relax_immediate (fragp
, 8, 0);
22589 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
22592 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
22594 case T_MNEM_add_sp
:
22595 case T_MNEM_add_pc
:
22596 newsize
= relax_immediate (fragp
, 8, 2);
22598 case T_MNEM_inc_sp
:
22599 case T_MNEM_dec_sp
:
22600 newsize
= relax_immediate (fragp
, 7, 2);
22606 newsize
= relax_addsub (fragp
, sec
);
22612 fragp
->fr_var
= newsize
;
22613 /* Freeze wide instructions that are at or before the same location as
22614 in the previous pass. This avoids infinite loops.
22615 Don't freeze them unconditionally because targets may be artificially
22616 misaligned by the expansion of preceding frags. */
22617 if (stretch
<= 0 && newsize
> 2)
22619 md_convert_frag (sec
->owner
, sec
, fragp
);
22623 return newsize
- oldsize
;
22626 /* Round up a section size to the appropriate boundary. */
22629 md_section_align (segT segment ATTRIBUTE_UNUSED
,
22635 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22636 of an rs_align_code fragment. */
22639 arm_handle_align (fragS
* fragP
)
22641 static unsigned char const arm_noop
[2][2][4] =
22644 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22645 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22648 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22649 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22652 static unsigned char const thumb_noop
[2][2][2] =
22655 {0xc0, 0x46}, /* LE */
22656 {0x46, 0xc0}, /* BE */
22659 {0x00, 0xbf}, /* LE */
22660 {0xbf, 0x00} /* BE */
22663 static unsigned char const wide_thumb_noop
[2][4] =
22664 { /* Wide Thumb-2 */
22665 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22666 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22669 unsigned bytes
, fix
, noop_size
;
22671 const unsigned char * noop
;
22672 const unsigned char *narrow_noop
= NULL
;
22677 if (fragP
->fr_type
!= rs_align_code
)
22680 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
22681 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
22684 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
22685 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
22687 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
22689 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
22691 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
22692 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
22694 narrow_noop
= thumb_noop
[1][target_big_endian
];
22695 noop
= wide_thumb_noop
[target_big_endian
];
22698 noop
= thumb_noop
[0][target_big_endian
];
22706 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
22707 ? selected_cpu
: arm_arch_none
,
22709 [target_big_endian
];
22716 fragP
->fr_var
= noop_size
;
22718 if (bytes
& (noop_size
- 1))
22720 fix
= bytes
& (noop_size
- 1);
22722 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
22724 memset (p
, 0, fix
);
22731 if (bytes
& noop_size
)
22733 /* Insert a narrow noop. */
22734 memcpy (p
, narrow_noop
, noop_size
);
22736 bytes
-= noop_size
;
22740 /* Use wide noops for the remainder */
22744 while (bytes
>= noop_size
)
22746 memcpy (p
, noop
, noop_size
);
22748 bytes
-= noop_size
;
22752 fragP
->fr_fix
+= fix
;
22755 /* Called from md_do_align. Used to create an alignment
22756 frag in a code section. */
22759 arm_frag_align_code (int n
, int max
)
22763 /* We assume that there will never be a requirement
22764 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22765 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
22770 _("alignments greater than %d bytes not supported in .text sections."),
22771 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
22772 as_fatal ("%s", err_msg
);
22775 p
= frag_var (rs_align_code
,
22776 MAX_MEM_FOR_RS_ALIGN_CODE
,
22778 (relax_substateT
) max
,
22785 /* Perform target specific initialisation of a frag.
22786 Note - despite the name this initialisation is not done when the frag
22787 is created, but only when its type is assigned. A frag can be created
22788 and used a long time before its type is set, so beware of assuming that
22789 this initialisation is performed first. */
22793 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
22795 /* Record whether this frag is in an ARM or a THUMB area. */
22796 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22799 #else /* OBJ_ELF is defined. */
22801 arm_init_frag (fragS
* fragP
, int max_chars
)
22803 bfd_boolean frag_thumb_mode
;
22805 /* If the current ARM vs THUMB mode has not already
22806 been recorded into this frag then do so now. */
22807 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
22808 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22810 /* PR 21809: Do not set a mapping state for debug sections
22811 - it just confuses other tools. */
22812 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
22815 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
22817 /* Record a mapping symbol for alignment frags. We will delete this
22818 later if the alignment ends up empty. */
22819 switch (fragP
->fr_type
)
22822 case rs_align_test
:
22824 mapping_state_2 (MAP_DATA
, max_chars
);
22826 case rs_align_code
:
22827 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
22834 /* When we change sections we need to issue a new mapping symbol. */
22837 arm_elf_change_section (void)
22839 /* Link an unlinked unwind index table section to the .text section. */
22840 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
22841 && elf_linked_to_section (now_seg
) == NULL
)
22842 elf_linked_to_section (now_seg
) = text_section
;
22846 arm_elf_section_type (const char * str
, size_t len
)
22848 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
22849 return SHT_ARM_EXIDX
;
22854 /* Code to deal with unwinding tables. */
22856 static void add_unwind_adjustsp (offsetT
);
22858 /* Generate any deferred unwind frame offset. */
22861 flush_pending_unwind (void)
22865 offset
= unwind
.pending_offset
;
22866 unwind
.pending_offset
= 0;
22868 add_unwind_adjustsp (offset
);
22871 /* Add an opcode to this list for this function. Two-byte opcodes should
22872 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22876 add_unwind_opcode (valueT op
, int length
)
22878 /* Add any deferred stack adjustment. */
22879 if (unwind
.pending_offset
)
22880 flush_pending_unwind ();
22882 unwind
.sp_restored
= 0;
22884 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
22886 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
22887 if (unwind
.opcodes
)
22888 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
22889 unwind
.opcode_alloc
);
22891 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
22896 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
22898 unwind
.opcode_count
++;
22902 /* Add unwind opcodes to adjust the stack pointer. */
22905 add_unwind_adjustsp (offsetT offset
)
22909 if (offset
> 0x200)
22911 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22916 /* Long form: 0xb2, uleb128. */
22917 /* This might not fit in a word so add the individual bytes,
22918 remembering the list is built in reverse order. */
22919 o
= (valueT
) ((offset
- 0x204) >> 2);
22921 add_unwind_opcode (0, 1);
22923 /* Calculate the uleb128 encoding of the offset. */
22927 bytes
[n
] = o
& 0x7f;
22933 /* Add the insn. */
22935 add_unwind_opcode (bytes
[n
- 1], 1);
22936 add_unwind_opcode (0xb2, 1);
22938 else if (offset
> 0x100)
22940 /* Two short opcodes. */
22941 add_unwind_opcode (0x3f, 1);
22942 op
= (offset
- 0x104) >> 2;
22943 add_unwind_opcode (op
, 1);
22945 else if (offset
> 0)
22947 /* Short opcode. */
22948 op
= (offset
- 4) >> 2;
22949 add_unwind_opcode (op
, 1);
22951 else if (offset
< 0)
22954 while (offset
> 0x100)
22956 add_unwind_opcode (0x7f, 1);
22959 op
= ((offset
- 4) >> 2) | 0x40;
22960 add_unwind_opcode (op
, 1);
22964 /* Finish the list of unwind opcodes for this function. */
22967 finish_unwind_opcodes (void)
22971 if (unwind
.fp_used
)
22973 /* Adjust sp as necessary. */
22974 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
22975 flush_pending_unwind ();
22977 /* After restoring sp from the frame pointer. */
22978 op
= 0x90 | unwind
.fp_reg
;
22979 add_unwind_opcode (op
, 1);
22982 flush_pending_unwind ();
22986 /* Start an exception table entry. If idx is nonzero this is an index table
22990 start_unwind_section (const segT text_seg
, int idx
)
22992 const char * text_name
;
22993 const char * prefix
;
22994 const char * prefix_once
;
22995 const char * group_name
;
23003 prefix
= ELF_STRING_ARM_unwind
;
23004 prefix_once
= ELF_STRING_ARM_unwind_once
;
23005 type
= SHT_ARM_EXIDX
;
23009 prefix
= ELF_STRING_ARM_unwind_info
;
23010 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
23011 type
= SHT_PROGBITS
;
23014 text_name
= segment_name (text_seg
);
23015 if (streq (text_name
, ".text"))
23018 if (strncmp (text_name
, ".gnu.linkonce.t.",
23019 strlen (".gnu.linkonce.t.")) == 0)
23021 prefix
= prefix_once
;
23022 text_name
+= strlen (".gnu.linkonce.t.");
23025 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
23031 /* Handle COMDAT group. */
23032 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
23034 group_name
= elf_group_name (text_seg
);
23035 if (group_name
== NULL
)
23037 as_bad (_("Group section `%s' has no group signature"),
23038 segment_name (text_seg
));
23039 ignore_rest_of_line ();
23042 flags
|= SHF_GROUP
;
23046 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
23049 /* Set the section link for index tables. */
23051 elf_linked_to_section (now_seg
) = text_seg
;
23055 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23056 personality routine data. Returns zero, or the index table value for
23057 an inline entry. */
23060 create_unwind_entry (int have_data
)
23065 /* The current word of data. */
23067 /* The number of bytes left in this word. */
23070 finish_unwind_opcodes ();
23072 /* Remember the current text section. */
23073 unwind
.saved_seg
= now_seg
;
23074 unwind
.saved_subseg
= now_subseg
;
23076 start_unwind_section (now_seg
, 0);
23078 if (unwind
.personality_routine
== NULL
)
23080 if (unwind
.personality_index
== -2)
23083 as_bad (_("handlerdata in cantunwind frame"));
23084 return 1; /* EXIDX_CANTUNWIND. */
23087 /* Use a default personality routine if none is specified. */
23088 if (unwind
.personality_index
== -1)
23090 if (unwind
.opcode_count
> 3)
23091 unwind
.personality_index
= 1;
23093 unwind
.personality_index
= 0;
23096 /* Space for the personality routine entry. */
23097 if (unwind
.personality_index
== 0)
23099 if (unwind
.opcode_count
> 3)
23100 as_bad (_("too many unwind opcodes for personality routine 0"));
23104 /* All the data is inline in the index table. */
23107 while (unwind
.opcode_count
> 0)
23109 unwind
.opcode_count
--;
23110 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23114 /* Pad with "finish" opcodes. */
23116 data
= (data
<< 8) | 0xb0;
23123 /* We get two opcodes "free" in the first word. */
23124 size
= unwind
.opcode_count
- 2;
23128 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
23129 if (unwind
.personality_index
!= -1)
23131 as_bad (_("attempt to recreate an unwind entry"));
23135 /* An extra byte is required for the opcode count. */
23136 size
= unwind
.opcode_count
+ 1;
23139 size
= (size
+ 3) >> 2;
23141 as_bad (_("too many unwind opcodes"));
23143 frag_align (2, 0, 0);
23144 record_alignment (now_seg
, 2);
23145 unwind
.table_entry
= expr_build_dot ();
23147 /* Allocate the table entry. */
23148 ptr
= frag_more ((size
<< 2) + 4);
23149 /* PR 13449: Zero the table entries in case some of them are not used. */
23150 memset (ptr
, 0, (size
<< 2) + 4);
23151 where
= frag_now_fix () - ((size
<< 2) + 4);
23153 switch (unwind
.personality_index
)
23156 /* ??? Should this be a PLT generating relocation? */
23157 /* Custom personality routine. */
23158 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
23159 BFD_RELOC_ARM_PREL31
);
23164 /* Set the first byte to the number of additional words. */
23165 data
= size
> 0 ? size
- 1 : 0;
23169 /* ABI defined personality routines. */
23171 /* Three opcodes bytes are packed into the first word. */
23178 /* The size and first two opcode bytes go in the first word. */
23179 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
23184 /* Should never happen. */
23188 /* Pack the opcodes into words (MSB first), reversing the list at the same
23190 while (unwind
.opcode_count
> 0)
23194 md_number_to_chars (ptr
, data
, 4);
23199 unwind
.opcode_count
--;
23201 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23204 /* Finish off the last word. */
23207 /* Pad with "finish" opcodes. */
23209 data
= (data
<< 8) | 0xb0;
23211 md_number_to_chars (ptr
, data
, 4);
23216 /* Add an empty descriptor if there is no user-specified data. */
23217 ptr
= frag_more (4);
23218 md_number_to_chars (ptr
, 0, 4);
23225 /* Initialize the DWARF-2 unwind information for this procedure. */
23228 tc_arm_frame_initial_instructions (void)
23230 cfi_add_CFA_def_cfa (REG_SP
, 0);
23232 #endif /* OBJ_ELF */
23234 /* Convert REGNAME to a DWARF-2 register number. */
23237 tc_arm_regname_to_dw2regnum (char *regname
)
23239 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
23243 /* PR 16694: Allow VFP registers as well. */
23244 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
23248 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
23257 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
23261 exp
.X_op
= O_secrel
;
23262 exp
.X_add_symbol
= symbol
;
23263 exp
.X_add_number
= 0;
23264 emit_expr (&exp
, size
);
23268 /* MD interface: Symbol and relocation handling. */
23270 /* Return the address within the segment that a PC-relative fixup is
23271 relative to. For ARM, PC-relative fixups applied to instructions
23272 are generally relative to the location of the fixup plus 8 bytes.
23273 Thumb branches are offset by 4, and Thumb loads relative to PC
23274 require special handling. */
23277 md_pcrel_from_section (fixS
* fixP
, segT seg
)
23279 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23281 /* If this is pc-relative and we are going to emit a relocation
23282 then we just want to put out any pipeline compensation that the linker
23283 will need. Otherwise we want to use the calculated base.
23284 For WinCE we skip the bias for externals as well, since this
23285 is how the MS ARM-CE assembler behaves and we want to be compatible. */
23287 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23288 || (arm_force_relocation (fixP
)
23290 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
23296 switch (fixP
->fx_r_type
)
23298 /* PC relative addressing on the Thumb is slightly odd as the
23299 bottom two bits of the PC are forced to zero for the
23300 calculation. This happens *after* application of the
23301 pipeline offset. However, Thumb adrl already adjusts for
23302 this, so we need not do it again. */
23303 case BFD_RELOC_ARM_THUMB_ADD
:
23306 case BFD_RELOC_ARM_THUMB_OFFSET
:
23307 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
23308 case BFD_RELOC_ARM_T32_ADD_PC12
:
23309 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23310 return (base
+ 4) & ~3;
23312 /* Thumb branches are simply offset by +4. */
23313 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
23314 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23315 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23316 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23317 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23318 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23319 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
23320 case BFD_RELOC_ARM_THUMB_BF17
:
23321 case BFD_RELOC_ARM_THUMB_BF19
:
23322 case BFD_RELOC_ARM_THUMB_BF13
:
23323 case BFD_RELOC_ARM_THUMB_LOOP12
:
23326 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23328 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23329 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23330 && ARM_IS_FUNC (fixP
->fx_addsy
)
23331 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23332 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23335 /* BLX is like branches above, but forces the low two bits of PC to
23337 case BFD_RELOC_THUMB_PCREL_BLX
:
23339 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23340 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23341 && THUMB_IS_FUNC (fixP
->fx_addsy
)
23342 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23343 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23344 return (base
+ 4) & ~3;
23346 /* ARM mode branches are offset by +8. However, the Windows CE
23347 loader expects the relocation not to take this into account. */
23348 case BFD_RELOC_ARM_PCREL_BLX
:
23350 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23351 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23352 && ARM_IS_FUNC (fixP
->fx_addsy
)
23353 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23354 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23357 case BFD_RELOC_ARM_PCREL_CALL
:
23359 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23360 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23361 && THUMB_IS_FUNC (fixP
->fx_addsy
)
23362 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23363 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23366 case BFD_RELOC_ARM_PCREL_BRANCH
:
23367 case BFD_RELOC_ARM_PCREL_JUMP
:
23368 case BFD_RELOC_ARM_PLT32
:
23370 /* When handling fixups immediately, because we have already
23371 discovered the value of a symbol, or the address of the frag involved
23372 we must account for the offset by +8, as the OS loader will never see the reloc.
23373 see fixup_segment() in write.c
23374 The S_IS_EXTERNAL test handles the case of global symbols.
23375 Those need the calculated base, not just the pipe compensation the linker will need. */
23377 && fixP
->fx_addsy
!= NULL
23378 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23379 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
23387 /* ARM mode loads relative to PC are also offset by +8. Unlike
23388 branches, the Windows CE loader *does* expect the relocation
23389 to take this into account. */
23390 case BFD_RELOC_ARM_OFFSET_IMM
:
23391 case BFD_RELOC_ARM_OFFSET_IMM8
:
23392 case BFD_RELOC_ARM_HWLITERAL
:
23393 case BFD_RELOC_ARM_LITERAL
:
23394 case BFD_RELOC_ARM_CP_OFF_IMM
:
23398 /* Other PC-relative relocations are un-offset. */
23404 static bfd_boolean flag_warn_syms
= TRUE
;
23407 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
23409 /* PR 18347 - Warn if the user attempts to create a symbol with the same
23410 name as an ARM instruction. Whilst strictly speaking it is allowed, it
23411 does mean that the resulting code might be very confusing to the reader.
23412 Also this warning can be triggered if the user omits an operand before
23413 an immediate address, eg:
23417 GAS treats this as an assignment of the value of the symbol foo to a
23418 symbol LDR, and so (without this code) it will not issue any kind of
23419 warning or error message.
23421 Note - ARM instructions are case-insensitive but the strings in the hash
23422 table are all stored in lower case, so we must first ensure that name is
23424 if (flag_warn_syms
&& arm_ops_hsh
)
23426 char * nbuf
= strdup (name
);
23429 for (p
= nbuf
; *p
; p
++)
23431 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
23433 static struct hash_control
* already_warned
= NULL
;
23435 if (already_warned
== NULL
)
23436 already_warned
= hash_new ();
23437 /* Only warn about the symbol once. To keep the code
23438 simple we let hash_insert do the lookup for us. */
23439 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
23440 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
23449 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
23450 Otherwise we have no need to default values of symbols. */
23453 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
23456 if (name
[0] == '_' && name
[1] == 'G'
23457 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
23461 if (symbol_find (name
))
23462 as_bad (_("GOT already in the symbol table"));
23464 GOT_symbol
= symbol_new (name
, undefined_section
,
23465 (valueT
) 0, & zero_address_frag
);
23475 /* Subroutine of md_apply_fix. Check to see if an immediate can be
23476 computed as two separate immediate values, added together. We
23477 already know that this value cannot be computed by just one ARM
23480 static unsigned int
23481 validate_immediate_twopart (unsigned int val
,
23482 unsigned int * highpart
)
23487 for (i
= 0; i
< 32; i
+= 2)
23488 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
23494 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
23496 else if (a
& 0xff0000)
23498 if (a
& 0xff000000)
23500 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
23504 gas_assert (a
& 0xff000000);
23505 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
23508 return (a
& 0xff) | (i
<< 7);
23515 validate_offset_imm (unsigned int val
, int hwse
)
23517 if ((hwse
&& val
> 255) || val
> 4095)
23522 /* Subroutine of md_apply_fix. Do those data_ops which can take a
23523 negative immediate constant by altering the instruction. A bit of
23528 by inverting the second operand, and
23531 by negating the second operand. */
23534 negate_data_op (unsigned long * instruction
,
23535 unsigned long value
)
23538 unsigned long negated
, inverted
;
23540 negated
= encode_arm_immediate (-value
);
23541 inverted
= encode_arm_immediate (~value
);
23543 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
23546 /* First negates. */
23547 case OPCODE_SUB
: /* ADD <-> SUB */
23548 new_inst
= OPCODE_ADD
;
23553 new_inst
= OPCODE_SUB
;
23557 case OPCODE_CMP
: /* CMP <-> CMN */
23558 new_inst
= OPCODE_CMN
;
23563 new_inst
= OPCODE_CMP
;
23567 /* Now Inverted ops. */
23568 case OPCODE_MOV
: /* MOV <-> MVN */
23569 new_inst
= OPCODE_MVN
;
23574 new_inst
= OPCODE_MOV
;
23578 case OPCODE_AND
: /* AND <-> BIC */
23579 new_inst
= OPCODE_BIC
;
23584 new_inst
= OPCODE_AND
;
23588 case OPCODE_ADC
: /* ADC <-> SBC */
23589 new_inst
= OPCODE_SBC
;
23594 new_inst
= OPCODE_ADC
;
23598 /* We cannot do anything. */
23603 if (value
== (unsigned) FAIL
)
23606 *instruction
&= OPCODE_MASK
;
23607 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
23611 /* Like negate_data_op, but for Thumb-2. */
23613 static unsigned int
23614 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
23618 unsigned int negated
, inverted
;
23620 negated
= encode_thumb32_immediate (-value
);
23621 inverted
= encode_thumb32_immediate (~value
);
23623 rd
= (*instruction
>> 8) & 0xf;
23624 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
23627 /* ADD <-> SUB. Includes CMP <-> CMN. */
23628 case T2_OPCODE_SUB
:
23629 new_inst
= T2_OPCODE_ADD
;
23633 case T2_OPCODE_ADD
:
23634 new_inst
= T2_OPCODE_SUB
;
23638 /* ORR <-> ORN. Includes MOV <-> MVN. */
23639 case T2_OPCODE_ORR
:
23640 new_inst
= T2_OPCODE_ORN
;
23644 case T2_OPCODE_ORN
:
23645 new_inst
= T2_OPCODE_ORR
;
23649 /* AND <-> BIC. TST has no inverted equivalent. */
23650 case T2_OPCODE_AND
:
23651 new_inst
= T2_OPCODE_BIC
;
23658 case T2_OPCODE_BIC
:
23659 new_inst
= T2_OPCODE_AND
;
23664 case T2_OPCODE_ADC
:
23665 new_inst
= T2_OPCODE_SBC
;
23669 case T2_OPCODE_SBC
:
23670 new_inst
= T2_OPCODE_ADC
;
23674 /* We cannot do anything. */
23679 if (value
== (unsigned int)FAIL
)
23682 *instruction
&= T2_OPCODE_MASK
;
23683 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
23687 /* Read a 32-bit thumb instruction from buf. */
23689 static unsigned long
23690 get_thumb32_insn (char * buf
)
23692 unsigned long insn
;
23693 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
23694 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23699 /* We usually want to set the low bit on the address of thumb function
23700 symbols. In particular .word foo - . should have the low bit set.
23701 Generic code tries to fold the difference of two symbols to
23702 a constant. Prevent this and force a relocation when the first symbols
23703 is a thumb function. */
23706 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
23708 if (op
== O_subtract
23709 && l
->X_op
== O_symbol
23710 && r
->X_op
== O_symbol
23711 && THUMB_IS_FUNC (l
->X_add_symbol
))
23713 l
->X_op
= O_subtract
;
23714 l
->X_op_symbol
= r
->X_add_symbol
;
23715 l
->X_add_number
-= r
->X_add_number
;
23719 /* Process as normal. */
23723 /* Encode Thumb2 unconditional branches and calls. The encoding
23724 for the 2 are identical for the immediate values. */
23727 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
23729 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23732 addressT S
, I1
, I2
, lo
, hi
;
23734 S
= (value
>> 24) & 0x01;
23735 I1
= (value
>> 23) & 0x01;
23736 I2
= (value
>> 22) & 0x01;
23737 hi
= (value
>> 12) & 0x3ff;
23738 lo
= (value
>> 1) & 0x7ff;
23739 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23740 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23741 newval
|= (S
<< 10) | hi
;
23742 newval2
&= ~T2I1I2MASK
;
23743 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
23744 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23745 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23749 md_apply_fix (fixS
* fixP
,
23753 offsetT value
= * valP
;
23755 unsigned int newimm
;
23756 unsigned long temp
;
23758 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
23760 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
23762 /* Note whether this will delete the relocation. */
23764 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
23767 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23768 consistency with the behaviour on 32-bit hosts. Remember value
23770 value
&= 0xffffffff;
23771 value
^= 0x80000000;
23772 value
-= 0x80000000;
23775 fixP
->fx_addnumber
= value
;
23777 /* Same treatment for fixP->fx_offset. */
23778 fixP
->fx_offset
&= 0xffffffff;
23779 fixP
->fx_offset
^= 0x80000000;
23780 fixP
->fx_offset
-= 0x80000000;
23782 switch (fixP
->fx_r_type
)
23784 case BFD_RELOC_NONE
:
23785 /* This will need to go in the object file. */
23789 case BFD_RELOC_ARM_IMMEDIATE
:
23790 /* We claim that this fixup has been processed here,
23791 even if in fact we generate an error because we do
23792 not have a reloc for it, so tc_gen_reloc will reject it. */
23795 if (fixP
->fx_addsy
)
23797 const char *msg
= 0;
23799 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23800 msg
= _("undefined symbol %s used as an immediate value");
23801 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23802 msg
= _("symbol %s is in a different section");
23803 else if (S_IS_WEAK (fixP
->fx_addsy
))
23804 msg
= _("symbol %s is weak and may be overridden later");
23808 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23809 msg
, S_GET_NAME (fixP
->fx_addsy
));
23814 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23816 /* If the offset is negative, we should use encoding A2 for ADR. */
23817 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
23818 newimm
= negate_data_op (&temp
, value
);
23821 newimm
= encode_arm_immediate (value
);
23823 /* If the instruction will fail, see if we can fix things up by
23824 changing the opcode. */
23825 if (newimm
== (unsigned int) FAIL
)
23826 newimm
= negate_data_op (&temp
, value
);
23827 /* MOV accepts both ARM modified immediate (A1 encoding) and
23828 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23829 When disassembling, MOV is preferred when there is no encoding
23831 if (newimm
== (unsigned int) FAIL
23832 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
23833 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
23834 && !((temp
>> SBIT_SHIFT
) & 0x1)
23835 && value
>= 0 && value
<= 0xffff)
23837 /* Clear bits[23:20] to change encoding from A1 to A2. */
23838 temp
&= 0xff0fffff;
23839 /* Encoding high 4bits imm. Code below will encode the remaining
23841 temp
|= (value
& 0x0000f000) << 4;
23842 newimm
= value
& 0x00000fff;
23846 if (newimm
== (unsigned int) FAIL
)
23848 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23849 _("invalid constant (%lx) after fixup"),
23850 (unsigned long) value
);
23854 newimm
|= (temp
& 0xfffff000);
23855 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23858 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23860 unsigned int highpart
= 0;
23861 unsigned int newinsn
= 0xe1a00000; /* nop. */
23863 if (fixP
->fx_addsy
)
23865 const char *msg
= 0;
23867 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23868 msg
= _("undefined symbol %s used as an immediate value");
23869 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23870 msg
= _("symbol %s is in a different section");
23871 else if (S_IS_WEAK (fixP
->fx_addsy
))
23872 msg
= _("symbol %s is weak and may be overridden later");
23876 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23877 msg
, S_GET_NAME (fixP
->fx_addsy
));
23882 newimm
= encode_arm_immediate (value
);
23883 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23885 /* If the instruction will fail, see if we can fix things up by
23886 changing the opcode. */
23887 if (newimm
== (unsigned int) FAIL
23888 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
23890 /* No ? OK - try using two ADD instructions to generate
23892 newimm
= validate_immediate_twopart (value
, & highpart
);
23894 /* Yes - then make sure that the second instruction is
23896 if (newimm
!= (unsigned int) FAIL
)
23898 /* Still No ? Try using a negated value. */
23899 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
23900 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
23901 /* Otherwise - give up. */
23904 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23905 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23910 /* Replace the first operand in the 2nd instruction (which
23911 is the PC) with the destination register. We have
23912 already added in the PC in the first instruction and we
23913 do not want to do it again. */
23914 newinsn
&= ~ 0xf0000;
23915 newinsn
|= ((newinsn
& 0x0f000) << 4);
23918 newimm
|= (temp
& 0xfffff000);
23919 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23921 highpart
|= (newinsn
& 0xfffff000);
23922 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
23926 case BFD_RELOC_ARM_OFFSET_IMM
:
23927 if (!fixP
->fx_done
&& seg
->use_rela_p
)
23929 /* Fall through. */
23931 case BFD_RELOC_ARM_LITERAL
:
23937 if (validate_offset_imm (value
, 0) == FAIL
)
23939 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
23940 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23941 _("invalid literal constant: pool needs to be closer"));
23943 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23944 _("bad immediate value for offset (%ld)"),
23949 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23951 newval
&= 0xfffff000;
23954 newval
&= 0xff7ff000;
23955 newval
|= value
| (sign
? INDEX_UP
: 0);
23957 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23960 case BFD_RELOC_ARM_OFFSET_IMM8
:
23961 case BFD_RELOC_ARM_HWLITERAL
:
23967 if (validate_offset_imm (value
, 1) == FAIL
)
23969 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
23970 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23971 _("invalid literal constant: pool needs to be closer"));
23973 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23974 _("bad immediate value for 8-bit offset (%ld)"),
23979 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23981 newval
&= 0xfffff0f0;
23984 newval
&= 0xff7ff0f0;
23985 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
23987 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23990 case BFD_RELOC_ARM_T32_OFFSET_U8
:
23991 if (value
< 0 || value
> 1020 || value
% 4 != 0)
23992 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23993 _("bad immediate value for offset (%ld)"), (long) value
);
23996 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
23998 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
24001 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24002 /* This is a complicated relocation used for all varieties of Thumb32
24003 load/store instruction with immediate offset:
24005 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24006 *4, optional writeback(W)
24007 (doubleword load/store)
24009 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24010 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24011 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24012 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24013 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24015 Uppercase letters indicate bits that are already encoded at
24016 this point. Lowercase letters are our problem. For the
24017 second block of instructions, the secondary opcode nybble
24018 (bits 8..11) is present, and bit 23 is zero, even if this is
24019 a PC-relative operation. */
24020 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24022 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
24024 if ((newval
& 0xf0000000) == 0xe0000000)
24026 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24028 newval
|= (1 << 23);
24031 if (value
% 4 != 0)
24033 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24034 _("offset not a multiple of 4"));
24040 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24041 _("offset out of range"));
24046 else if ((newval
& 0x000f0000) == 0x000f0000)
24048 /* PC-relative, 12-bit offset. */
24050 newval
|= (1 << 23);
24055 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24056 _("offset out of range"));
24061 else if ((newval
& 0x00000100) == 0x00000100)
24063 /* Writeback: 8-bit, +/- offset. */
24065 newval
|= (1 << 9);
24070 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24071 _("offset out of range"));
24076 else if ((newval
& 0x00000f00) == 0x00000e00)
24078 /* T-instruction: positive 8-bit offset. */
24079 if (value
< 0 || value
> 0xff)
24081 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24082 _("offset out of range"));
24090 /* Positive 12-bit or negative 8-bit offset. */
24094 newval
|= (1 << 23);
24104 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24105 _("offset out of range"));
24112 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
24113 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
24116 case BFD_RELOC_ARM_SHIFT_IMM
:
24117 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24118 if (((unsigned long) value
) > 32
24120 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
24122 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24123 _("shift expression is too large"));
24128 /* Shifts of zero must be done as lsl. */
24130 else if (value
== 32)
24132 newval
&= 0xfffff07f;
24133 newval
|= (value
& 0x1f) << 7;
24134 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24137 case BFD_RELOC_ARM_T32_IMMEDIATE
:
24138 case BFD_RELOC_ARM_T32_ADD_IMM
:
24139 case BFD_RELOC_ARM_T32_IMM12
:
24140 case BFD_RELOC_ARM_T32_ADD_PC12
:
24141 /* We claim that this fixup has been processed here,
24142 even if in fact we generate an error because we do
24143 not have a reloc for it, so tc_gen_reloc will reject it. */
24147 && ! S_IS_DEFINED (fixP
->fx_addsy
))
24149 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24150 _("undefined symbol %s used as an immediate value"),
24151 S_GET_NAME (fixP
->fx_addsy
));
24155 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24157 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
24160 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24161 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
24162 Thumb2 modified immediate encoding (T2). */
24163 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
24164 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24166 newimm
= encode_thumb32_immediate (value
);
24167 if (newimm
== (unsigned int) FAIL
)
24168 newimm
= thumb32_negate_data_op (&newval
, value
);
24170 if (newimm
== (unsigned int) FAIL
)
24172 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
24174 /* Turn add/sum into addw/subw. */
24175 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24176 newval
= (newval
& 0xfeffffff) | 0x02000000;
24177 /* No flat 12-bit imm encoding for addsw/subsw. */
24178 if ((newval
& 0x00100000) == 0)
24180 /* 12 bit immediate for addw/subw. */
24184 newval
^= 0x00a00000;
24187 newimm
= (unsigned int) FAIL
;
24194 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24195 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24196 disassembling, MOV is preferred when there is no encoding
24198 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
24199 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24200 but with the Rn field [19:16] set to 1111. */
24201 && (((newval
>> 16) & 0xf) == 0xf)
24202 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
24203 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
24204 && value
>= 0 && value
<= 0xffff)
24206 /* Toggle bit[25] to change encoding from T2 to T3. */
24208 /* Clear bits[19:16]. */
24209 newval
&= 0xfff0ffff;
24210 /* Encoding high 4bits imm. Code below will encode the
24211 remaining low 12bits. */
24212 newval
|= (value
& 0x0000f000) << 4;
24213 newimm
= value
& 0x00000fff;
24218 if (newimm
== (unsigned int)FAIL
)
24220 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24221 _("invalid constant (%lx) after fixup"),
24222 (unsigned long) value
);
24226 newval
|= (newimm
& 0x800) << 15;
24227 newval
|= (newimm
& 0x700) << 4;
24228 newval
|= (newimm
& 0x0ff);
24230 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
24231 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
24234 case BFD_RELOC_ARM_SMC
:
24235 if (((unsigned long) value
) > 0xffff)
24236 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24237 _("invalid smc expression"));
24238 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24239 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24240 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24243 case BFD_RELOC_ARM_HVC
:
24244 if (((unsigned long) value
) > 0xffff)
24245 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24246 _("invalid hvc expression"));
24247 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24248 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24249 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24252 case BFD_RELOC_ARM_SWI
:
24253 if (fixP
->tc_fix_data
!= 0)
24255 if (((unsigned long) value
) > 0xff)
24256 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24257 _("invalid swi expression"));
24258 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24260 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24264 if (((unsigned long) value
) > 0x00ffffff)
24265 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24266 _("invalid swi expression"));
24267 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24269 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24273 case BFD_RELOC_ARM_MULTI
:
24274 if (((unsigned long) value
) > 0xffff)
24275 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24276 _("invalid expression in load/store multiple"));
24277 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
24278 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24282 case BFD_RELOC_ARM_PCREL_CALL
:
24284 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24286 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24287 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24288 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24289 /* Flip the bl to blx. This is a simple flip
24290 bit here because we generate PCREL_CALL for
24291 unconditional bls. */
24293 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24294 newval
= newval
| 0x10000000;
24295 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24301 goto arm_branch_common
;
24303 case BFD_RELOC_ARM_PCREL_JUMP
:
24304 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24306 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24307 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24308 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24310 /* This would map to a bl<cond>, b<cond>,
24311 b<always> to a Thumb function. We
24312 need to force a relocation for this particular
24314 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24317 /* Fall through. */
24319 case BFD_RELOC_ARM_PLT32
:
24321 case BFD_RELOC_ARM_PCREL_BRANCH
:
24323 goto arm_branch_common
;
24325 case BFD_RELOC_ARM_PCREL_BLX
:
24328 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24330 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24331 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24332 && ARM_IS_FUNC (fixP
->fx_addsy
))
24334 /* Flip the blx to a bl and warn. */
24335 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
24336 newval
= 0xeb000000;
24337 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
24338 _("blx to '%s' an ARM ISA state function changed to bl"),
24340 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24346 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
24347 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
24351 /* We are going to store value (shifted right by two) in the
24352 instruction, in a 24 bit, signed field. Bits 26 through 32 either
24353 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
24356 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24357 _("misaligned branch destination"));
24358 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
24359 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
24360 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24362 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24364 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24365 newval
|= (value
>> 2) & 0x00ffffff;
24366 /* Set the H bit on BLX instructions. */
24370 newval
|= 0x01000000;
24372 newval
&= ~0x01000000;
24374 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24378 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
24379 /* CBZ can only branch forward. */
24381 /* Attempts to use CBZ to branch to the next instruction
24382 (which, strictly speaking, are prohibited) will be turned into
24385 FIXME: It may be better to remove the instruction completely and
24386 perform relaxation. */
24389 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24390 newval
= 0xbf00; /* NOP encoding T1 */
24391 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24396 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24398 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24400 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24401 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
24402 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24407 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
24408 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
24409 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24411 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24413 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24414 newval
|= (value
& 0x1ff) >> 1;
24415 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24419 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
24420 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
24421 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24423 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24425 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24426 newval
|= (value
& 0xfff) >> 1;
24427 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24431 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24433 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24434 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24435 && ARM_IS_FUNC (fixP
->fx_addsy
)
24436 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24438 /* Force a relocation for a branch 20 bits wide. */
24441 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
24442 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24443 _("conditional branch out of range"));
24445 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24448 addressT S
, J1
, J2
, lo
, hi
;
24450 S
= (value
& 0x00100000) >> 20;
24451 J2
= (value
& 0x00080000) >> 19;
24452 J1
= (value
& 0x00040000) >> 18;
24453 hi
= (value
& 0x0003f000) >> 12;
24454 lo
= (value
& 0x00000ffe) >> 1;
24456 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24457 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24458 newval
|= (S
<< 10) | hi
;
24459 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
24460 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24461 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
24465 case BFD_RELOC_THUMB_PCREL_BLX
:
24466 /* If there is a blx from a thumb state function to
24467 another thumb function flip this to a bl and warn
24471 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24472 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24473 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24475 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
24476 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
24477 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
24479 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24480 newval
= newval
| 0x1000;
24481 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
24482 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24487 goto thumb_bl_common
;
24489 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24490 /* A bl from Thumb state ISA to an internal ARM state function
24491 is converted to a blx. */
24493 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24494 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24495 && ARM_IS_FUNC (fixP
->fx_addsy
)
24496 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24498 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24499 newval
= newval
& ~0x1000;
24500 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
24501 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
24507 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
24508 /* For a BLX instruction, make sure that the relocation is rounded up
24509 to a word boundary. This follows the semantics of the instruction
24510 which specifies that bit 1 of the target address will come from bit
24511 1 of the base address. */
24512 value
= (value
+ 3) & ~ 3;
24515 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
24516 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
24517 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24520 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
24522 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
24523 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24524 else if ((value
& ~0x1ffffff)
24525 && ((value
& ~0x1ffffff) != ~0x1ffffff))
24526 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24527 _("Thumb2 branch out of range"));
24530 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24531 encode_thumb2_b_bl_offset (buf
, value
);
24535 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24536 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
24537 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24539 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24540 encode_thumb2_b_bl_offset (buf
, value
);
24545 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24550 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24551 md_number_to_chars (buf
, value
, 2);
24555 case BFD_RELOC_ARM_TLS_CALL
:
24556 case BFD_RELOC_ARM_THM_TLS_CALL
:
24557 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24558 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24559 case BFD_RELOC_ARM_TLS_GOTDESC
:
24560 case BFD_RELOC_ARM_TLS_GD32
:
24561 case BFD_RELOC_ARM_TLS_LE32
:
24562 case BFD_RELOC_ARM_TLS_IE32
:
24563 case BFD_RELOC_ARM_TLS_LDM32
:
24564 case BFD_RELOC_ARM_TLS_LDO32
:
24565 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
24568 /* Same handling as above, but with the arm_fdpic guard. */
24569 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
24570 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
24571 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
24574 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
24578 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24579 _("Relocation supported only in FDPIC mode"));
24583 case BFD_RELOC_ARM_GOT32
:
24584 case BFD_RELOC_ARM_GOTOFF
:
24587 case BFD_RELOC_ARM_GOT_PREL
:
24588 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24589 md_number_to_chars (buf
, value
, 4);
24592 case BFD_RELOC_ARM_TARGET2
:
24593 /* TARGET2 is not partial-inplace, so we need to write the
24594 addend here for REL targets, because it won't be written out
24595 during reloc processing later. */
24596 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24597 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
24600 /* Relocations for FDPIC. */
24601 case BFD_RELOC_ARM_GOTFUNCDESC
:
24602 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
24603 case BFD_RELOC_ARM_FUNCDESC
:
24606 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24607 md_number_to_chars (buf
, 0, 4);
24611 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24612 _("Relocation supported only in FDPIC mode"));
24617 case BFD_RELOC_RVA
:
24619 case BFD_RELOC_ARM_TARGET1
:
24620 case BFD_RELOC_ARM_ROSEGREL32
:
24621 case BFD_RELOC_ARM_SBREL32
:
24622 case BFD_RELOC_32_PCREL
:
24624 case BFD_RELOC_32_SECREL
:
24626 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24628 /* For WinCE we only do this for pcrel fixups. */
24629 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
24631 md_number_to_chars (buf
, value
, 4);
24635 case BFD_RELOC_ARM_PREL31
:
24636 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24638 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
24639 if ((value
^ (value
>> 1)) & 0x40000000)
24641 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24642 _("rel31 relocation overflow"));
24644 newval
|= value
& 0x7fffffff;
24645 md_number_to_chars (buf
, newval
, 4);
24650 case BFD_RELOC_ARM_CP_OFF_IMM
:
24651 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
24652 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
:
24653 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
24654 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24656 newval
= get_thumb32_insn (buf
);
24657 if ((newval
& 0x0f200f00) == 0x0d000900)
24659 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24660 has permitted values that are multiples of 2, in the range 0
24662 if (value
< -510 || value
> 510 || (value
& 1))
24663 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24664 _("co-processor offset out of range"));
24666 else if ((newval
& 0xfe001f80) == 0xec000f80)
24668 if (value
< -511 || value
> 512 || (value
& 3))
24669 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24670 _("co-processor offset out of range"));
24672 else if (value
< -1023 || value
> 1023 || (value
& 3))
24673 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24674 _("co-processor offset out of range"));
24679 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24680 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
24681 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24683 newval
= get_thumb32_insn (buf
);
24686 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
24687 newval
&= 0xffffff80;
24689 newval
&= 0xffffff00;
24693 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
24694 newval
&= 0xff7fff80;
24696 newval
&= 0xff7fff00;
24697 if ((newval
& 0x0f200f00) == 0x0d000900)
24699 /* This is a fp16 vstr/vldr.
24701 It requires the immediate offset in the instruction is shifted
24702 left by 1 to be a half-word offset.
24704 Here, left shift by 1 first, and later right shift by 2
24705 should get the right offset. */
24708 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
24710 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24711 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
24712 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24714 put_thumb32_insn (buf
, newval
);
24717 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
24718 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
24719 if (value
< -255 || value
> 255)
24720 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24721 _("co-processor offset out of range"));
24723 goto cp_off_common
;
24725 case BFD_RELOC_ARM_THUMB_OFFSET
:
24726 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24727 /* Exactly what ranges, and where the offset is inserted depends
24728 on the type of instruction, we can establish this from the
24730 switch (newval
>> 12)
24732 case 4: /* PC load. */
24733 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24734 forced to zero for these loads; md_pcrel_from has already
24735 compensated for this. */
24737 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24738 _("invalid offset, target not word aligned (0x%08lX)"),
24739 (((unsigned long) fixP
->fx_frag
->fr_address
24740 + (unsigned long) fixP
->fx_where
) & ~3)
24741 + (unsigned long) value
);
24743 if (value
& ~0x3fc)
24744 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24745 _("invalid offset, value too big (0x%08lX)"),
24748 newval
|= value
>> 2;
24751 case 9: /* SP load/store. */
24752 if (value
& ~0x3fc)
24753 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24754 _("invalid offset, value too big (0x%08lX)"),
24756 newval
|= value
>> 2;
24759 case 6: /* Word load/store. */
24761 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24762 _("invalid offset, value too big (0x%08lX)"),
24764 newval
|= value
<< 4; /* 6 - 2. */
24767 case 7: /* Byte load/store. */
24769 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24770 _("invalid offset, value too big (0x%08lX)"),
24772 newval
|= value
<< 6;
24775 case 8: /* Halfword load/store. */
24777 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24778 _("invalid offset, value too big (0x%08lX)"),
24780 newval
|= value
<< 5; /* 6 - 1. */
24784 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24785 "Unable to process relocation for thumb opcode: %lx",
24786 (unsigned long) newval
);
24789 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24792 case BFD_RELOC_ARM_THUMB_ADD
:
24793 /* This is a complicated relocation, since we use it for all of
24794 the following immediate relocations:
24798 9bit ADD/SUB SP word-aligned
24799 10bit ADD PC/SP word-aligned
24801 The type of instruction being processed is encoded in the
24808 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24810 int rd
= (newval
>> 4) & 0xf;
24811 int rs
= newval
& 0xf;
24812 int subtract
= !!(newval
& 0x8000);
24814 /* Check for HI regs, only very restricted cases allowed:
24815 Adjusting SP, and using PC or SP to get an address. */
24816 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
24817 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
24818 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24819 _("invalid Hi register with immediate"));
24821 /* If value is negative, choose the opposite instruction. */
24825 subtract
= !subtract
;
24827 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24828 _("immediate value out of range"));
24833 if (value
& ~0x1fc)
24834 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24835 _("invalid immediate for stack address calculation"));
24836 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
24837 newval
|= value
>> 2;
24839 else if (rs
== REG_PC
|| rs
== REG_SP
)
24841 /* PR gas/18541. If the addition is for a defined symbol
24842 within range of an ADR instruction then accept it. */
24845 && fixP
->fx_addsy
!= NULL
)
24849 if (! S_IS_DEFINED (fixP
->fx_addsy
)
24850 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
24851 || S_IS_WEAK (fixP
->fx_addsy
))
24853 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24854 _("address calculation needs a strongly defined nearby symbol"));
24858 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24860 /* Round up to the next 4-byte boundary. */
24865 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
24869 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24870 _("symbol too far away"));
24880 if (subtract
|| value
& ~0x3fc)
24881 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24882 _("invalid immediate for address calculation (value = 0x%08lX)"),
24883 (unsigned long) (subtract
? - value
: value
));
24884 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
24886 newval
|= value
>> 2;
24891 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24892 _("immediate value out of range"));
24893 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
24894 newval
|= (rd
<< 8) | value
;
24899 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24900 _("immediate value out of range"));
24901 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
24902 newval
|= rd
| (rs
<< 3) | (value
<< 6);
24905 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24908 case BFD_RELOC_ARM_THUMB_IMM
:
24909 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24910 if (value
< 0 || value
> 255)
24911 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24912 _("invalid immediate: %ld is out of range"),
24915 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24918 case BFD_RELOC_ARM_THUMB_SHIFT
:
24919 /* 5bit shift value (0..32). LSL cannot take 32. */
24920 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
24921 temp
= newval
& 0xf800;
24922 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
24923 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24924 _("invalid shift value: %ld"), (long) value
);
24925 /* Shifts of zero must be encoded as LSL. */
24927 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
24928 /* Shifts of 32 are encoded as zero. */
24929 else if (value
== 32)
24931 newval
|= value
<< 6;
24932 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24935 case BFD_RELOC_VTABLE_INHERIT
:
24936 case BFD_RELOC_VTABLE_ENTRY
:
24940 case BFD_RELOC_ARM_MOVW
:
24941 case BFD_RELOC_ARM_MOVT
:
24942 case BFD_RELOC_ARM_THUMB_MOVW
:
24943 case BFD_RELOC_ARM_THUMB_MOVT
:
24944 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24946 /* REL format relocations are limited to a 16-bit addend. */
24947 if (!fixP
->fx_done
)
24949 if (value
< -0x8000 || value
> 0x7fff)
24950 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24951 _("offset out of range"));
24953 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24954 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24959 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24960 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24962 newval
= get_thumb32_insn (buf
);
24963 newval
&= 0xfbf08f00;
24964 newval
|= (value
& 0xf000) << 4;
24965 newval
|= (value
& 0x0800) << 15;
24966 newval
|= (value
& 0x0700) << 4;
24967 newval
|= (value
& 0x00ff);
24968 put_thumb32_insn (buf
, newval
);
24972 newval
= md_chars_to_number (buf
, 4);
24973 newval
&= 0xfff0f000;
24974 newval
|= value
& 0x0fff;
24975 newval
|= (value
& 0xf000) << 4;
24976 md_number_to_chars (buf
, newval
, 4);
24981 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24982 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24983 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24984 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24985 gas_assert (!fixP
->fx_done
);
24988 bfd_boolean is_mov
;
24989 bfd_vma encoded_addend
= value
;
24991 /* Check that addend can be encoded in instruction. */
24992 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
24993 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24994 _("the offset 0x%08lX is not representable"),
24995 (unsigned long) encoded_addend
);
24997 /* Extract the instruction. */
24998 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
24999 is_mov
= (insn
& 0xf800) == 0x2000;
25004 if (!seg
->use_rela_p
)
25005 insn
|= encoded_addend
;
25011 /* Extract the instruction. */
25012 /* Encoding is the following
25017 /* The following conditions must be true :
25022 rd
= (insn
>> 4) & 0xf;
25024 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
25025 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25026 _("Unable to process relocation for thumb opcode: %lx"),
25027 (unsigned long) insn
);
25029 /* Encode as ADD immediate8 thumb 1 code. */
25030 insn
= 0x3000 | (rd
<< 8);
25032 /* Place the encoded addend into the first 8 bits of the
25034 if (!seg
->use_rela_p
)
25035 insn
|= encoded_addend
;
25038 /* Update the instruction. */
25039 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
25043 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25044 case BFD_RELOC_ARM_ALU_PC_G0
:
25045 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25046 case BFD_RELOC_ARM_ALU_PC_G1
:
25047 case BFD_RELOC_ARM_ALU_PC_G2
:
25048 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25049 case BFD_RELOC_ARM_ALU_SB_G0
:
25050 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25051 case BFD_RELOC_ARM_ALU_SB_G1
:
25052 case BFD_RELOC_ARM_ALU_SB_G2
:
25053 gas_assert (!fixP
->fx_done
);
25054 if (!seg
->use_rela_p
)
25057 bfd_vma encoded_addend
;
25058 bfd_vma addend_abs
= llabs (value
);
25060 /* Check that the absolute value of the addend can be
25061 expressed as an 8-bit constant plus a rotation. */
25062 encoded_addend
= encode_arm_immediate (addend_abs
);
25063 if (encoded_addend
== (unsigned int) FAIL
)
25064 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25065 _("the offset 0x%08lX is not representable"),
25066 (unsigned long) addend_abs
);
25068 /* Extract the instruction. */
25069 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25071 /* If the addend is positive, use an ADD instruction.
25072 Otherwise use a SUB. Take care not to destroy the S bit. */
25073 insn
&= 0xff1fffff;
25079 /* Place the encoded addend into the first 12 bits of the
25081 insn
&= 0xfffff000;
25082 insn
|= encoded_addend
;
25084 /* Update the instruction. */
25085 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25089 case BFD_RELOC_ARM_LDR_PC_G0
:
25090 case BFD_RELOC_ARM_LDR_PC_G1
:
25091 case BFD_RELOC_ARM_LDR_PC_G2
:
25092 case BFD_RELOC_ARM_LDR_SB_G0
:
25093 case BFD_RELOC_ARM_LDR_SB_G1
:
25094 case BFD_RELOC_ARM_LDR_SB_G2
:
25095 gas_assert (!fixP
->fx_done
);
25096 if (!seg
->use_rela_p
)
25099 bfd_vma addend_abs
= llabs (value
);
25101 /* Check that the absolute value of the addend can be
25102 encoded in 12 bits. */
25103 if (addend_abs
>= 0x1000)
25104 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25105 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25106 (unsigned long) addend_abs
);
25108 /* Extract the instruction. */
25109 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25111 /* If the addend is negative, clear bit 23 of the instruction.
25112 Otherwise set it. */
25114 insn
&= ~(1 << 23);
25118 /* Place the absolute value of the addend into the first 12 bits
25119 of the instruction. */
25120 insn
&= 0xfffff000;
25121 insn
|= addend_abs
;
25123 /* Update the instruction. */
25124 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25128 case BFD_RELOC_ARM_LDRS_PC_G0
:
25129 case BFD_RELOC_ARM_LDRS_PC_G1
:
25130 case BFD_RELOC_ARM_LDRS_PC_G2
:
25131 case BFD_RELOC_ARM_LDRS_SB_G0
:
25132 case BFD_RELOC_ARM_LDRS_SB_G1
:
25133 case BFD_RELOC_ARM_LDRS_SB_G2
:
25134 gas_assert (!fixP
->fx_done
);
25135 if (!seg
->use_rela_p
)
25138 bfd_vma addend_abs
= llabs (value
);
25140 /* Check that the absolute value of the addend can be
25141 encoded in 8 bits. */
25142 if (addend_abs
>= 0x100)
25143 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25144 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
25145 (unsigned long) addend_abs
);
25147 /* Extract the instruction. */
25148 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25150 /* If the addend is negative, clear bit 23 of the instruction.
25151 Otherwise set it. */
25153 insn
&= ~(1 << 23);
25157 /* Place the first four bits of the absolute value of the addend
25158 into the first 4 bits of the instruction, and the remaining
25159 four into bits 8 .. 11. */
25160 insn
&= 0xfffff0f0;
25161 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
25163 /* Update the instruction. */
25164 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25168 case BFD_RELOC_ARM_LDC_PC_G0
:
25169 case BFD_RELOC_ARM_LDC_PC_G1
:
25170 case BFD_RELOC_ARM_LDC_PC_G2
:
25171 case BFD_RELOC_ARM_LDC_SB_G0
:
25172 case BFD_RELOC_ARM_LDC_SB_G1
:
25173 case BFD_RELOC_ARM_LDC_SB_G2
:
25174 gas_assert (!fixP
->fx_done
);
25175 if (!seg
->use_rela_p
)
25178 bfd_vma addend_abs
= llabs (value
);
25180 /* Check that the absolute value of the addend is a multiple of
25181 four and, when divided by four, fits in 8 bits. */
25182 if (addend_abs
& 0x3)
25183 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25184 _("bad offset 0x%08lX (must be word-aligned)"),
25185 (unsigned long) addend_abs
);
25187 if ((addend_abs
>> 2) > 0xff)
25188 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25189 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
25190 (unsigned long) addend_abs
);
25192 /* Extract the instruction. */
25193 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25195 /* If the addend is negative, clear bit 23 of the instruction.
25196 Otherwise set it. */
25198 insn
&= ~(1 << 23);
25202 /* Place the addend (divided by four) into the first eight
25203 bits of the instruction. */
25204 insn
&= 0xfffffff0;
25205 insn
|= addend_abs
>> 2;
25207 /* Update the instruction. */
25208 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25212 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
25214 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25215 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25216 && ARM_IS_FUNC (fixP
->fx_addsy
)
25217 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25219 /* Force a relocation for a branch 5 bits wide. */
25222 if (v8_1_branch_value_check (value
, 5, FALSE
) == FAIL
)
25223 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25226 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25228 addressT boff
= value
>> 1;
25230 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25231 newval
|= (boff
<< 7);
25232 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25236 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
25238 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25239 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25240 && ARM_IS_FUNC (fixP
->fx_addsy
)
25241 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25245 if ((value
& ~0x7f) && ((value
& ~0x3f) != ~0x3f))
25246 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25247 _("branch out of range"));
25249 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25251 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25253 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
25254 addressT diff
= value
- boff
;
25258 newval
|= 1 << 1; /* T bit. */
25260 else if (diff
!= 2)
25262 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25263 _("out of range label-relative fixup value"));
25265 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25269 case BFD_RELOC_ARM_THUMB_BF17
:
25271 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25272 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25273 && ARM_IS_FUNC (fixP
->fx_addsy
)
25274 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25276 /* Force a relocation for a branch 17 bits wide. */
25280 if (v8_1_branch_value_check (value
, 17, TRUE
) == FAIL
)
25281 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25284 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25287 addressT immA
, immB
, immC
;
25289 immA
= (value
& 0x0001f000) >> 12;
25290 immB
= (value
& 0x00000ffc) >> 2;
25291 immC
= (value
& 0x00000002) >> 1;
25293 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25294 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25296 newval2
|= (immC
<< 11) | (immB
<< 1);
25297 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25298 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25302 case BFD_RELOC_ARM_THUMB_BF19
:
25304 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25305 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25306 && ARM_IS_FUNC (fixP
->fx_addsy
)
25307 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25309 /* Force a relocation for a branch 19 bits wide. */
25313 if (v8_1_branch_value_check (value
, 19, TRUE
) == FAIL
)
25314 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25317 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25320 addressT immA
, immB
, immC
;
25322 immA
= (value
& 0x0007f000) >> 12;
25323 immB
= (value
& 0x00000ffc) >> 2;
25324 immC
= (value
& 0x00000002) >> 1;
25326 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25327 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25329 newval2
|= (immC
<< 11) | (immB
<< 1);
25330 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25331 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25335 case BFD_RELOC_ARM_THUMB_BF13
:
25337 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25338 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25339 && ARM_IS_FUNC (fixP
->fx_addsy
)
25340 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25342 /* Force a relocation for a branch 13 bits wide. */
25346 if (v8_1_branch_value_check (value
, 13, TRUE
) == FAIL
)
25347 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25350 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25353 addressT immA
, immB
, immC
;
25355 immA
= (value
& 0x00001000) >> 12;
25356 immB
= (value
& 0x00000ffc) >> 2;
25357 immC
= (value
& 0x00000002) >> 1;
25359 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25360 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25362 newval2
|= (immC
<< 11) | (immB
<< 1);
25363 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25364 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25368 case BFD_RELOC_ARM_THUMB_LOOP12
:
25370 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25371 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25372 && ARM_IS_FUNC (fixP
->fx_addsy
)
25373 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25375 /* Force a relocation for a branch 12 bits wide. */
25379 bfd_vma insn
= get_thumb32_insn (buf
);
25380 /* le lr, <label> or le <label> */
25381 if (((insn
& 0xffffffff) == 0xf00fc001)
25382 || ((insn
& 0xffffffff) == 0xf02fc001))
25385 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
25386 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25388 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25390 addressT imml
, immh
;
25392 immh
= (value
& 0x00000ffc) >> 2;
25393 imml
= (value
& 0x00000002) >> 1;
25395 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25396 newval
|= (imml
<< 11) | (immh
<< 1);
25397 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
25401 case BFD_RELOC_ARM_V4BX
:
25402 /* This will need to go in the object file. */
25406 case BFD_RELOC_UNUSED
:
25408 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25409 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
25413 /* Translate internal representation of relocation info to BFD target
25417 tc_gen_reloc (asection
*section
, fixS
*fixp
)
25420 bfd_reloc_code_real_type code
;
25422 reloc
= XNEW (arelent
);
25424 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
25425 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
25426 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
25428 if (fixp
->fx_pcrel
)
25430 if (section
->use_rela_p
)
25431 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
25433 fixp
->fx_offset
= reloc
->address
;
25435 reloc
->addend
= fixp
->fx_offset
;
25437 switch (fixp
->fx_r_type
)
25440 if (fixp
->fx_pcrel
)
25442 code
= BFD_RELOC_8_PCREL
;
25445 /* Fall through. */
25448 if (fixp
->fx_pcrel
)
25450 code
= BFD_RELOC_16_PCREL
;
25453 /* Fall through. */
25456 if (fixp
->fx_pcrel
)
25458 code
= BFD_RELOC_32_PCREL
;
25461 /* Fall through. */
25463 case BFD_RELOC_ARM_MOVW
:
25464 if (fixp
->fx_pcrel
)
25466 code
= BFD_RELOC_ARM_MOVW_PCREL
;
25469 /* Fall through. */
25471 case BFD_RELOC_ARM_MOVT
:
25472 if (fixp
->fx_pcrel
)
25474 code
= BFD_RELOC_ARM_MOVT_PCREL
;
25477 /* Fall through. */
25479 case BFD_RELOC_ARM_THUMB_MOVW
:
25480 if (fixp
->fx_pcrel
)
25482 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
25485 /* Fall through. */
25487 case BFD_RELOC_ARM_THUMB_MOVT
:
25488 if (fixp
->fx_pcrel
)
25490 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
25493 /* Fall through. */
25495 case BFD_RELOC_NONE
:
25496 case BFD_RELOC_ARM_PCREL_BRANCH
:
25497 case BFD_RELOC_ARM_PCREL_BLX
:
25498 case BFD_RELOC_RVA
:
25499 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
25500 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
25501 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
25502 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25503 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25504 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25505 case BFD_RELOC_VTABLE_ENTRY
:
25506 case BFD_RELOC_VTABLE_INHERIT
:
25508 case BFD_RELOC_32_SECREL
:
25510 code
= fixp
->fx_r_type
;
25513 case BFD_RELOC_THUMB_PCREL_BLX
:
25515 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
25516 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25519 code
= BFD_RELOC_THUMB_PCREL_BLX
;
25522 case BFD_RELOC_ARM_LITERAL
:
25523 case BFD_RELOC_ARM_HWLITERAL
:
25524 /* If this is called then the a literal has
25525 been referenced across a section boundary. */
25526 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25527 _("literal referenced across section boundary"));
25531 case BFD_RELOC_ARM_TLS_CALL
:
25532 case BFD_RELOC_ARM_THM_TLS_CALL
:
25533 case BFD_RELOC_ARM_TLS_DESCSEQ
:
25534 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
25535 case BFD_RELOC_ARM_GOT32
:
25536 case BFD_RELOC_ARM_GOTOFF
:
25537 case BFD_RELOC_ARM_GOT_PREL
:
25538 case BFD_RELOC_ARM_PLT32
:
25539 case BFD_RELOC_ARM_TARGET1
:
25540 case BFD_RELOC_ARM_ROSEGREL32
:
25541 case BFD_RELOC_ARM_SBREL32
:
25542 case BFD_RELOC_ARM_PREL31
:
25543 case BFD_RELOC_ARM_TARGET2
:
25544 case BFD_RELOC_ARM_TLS_LDO32
:
25545 case BFD_RELOC_ARM_PCREL_CALL
:
25546 case BFD_RELOC_ARM_PCREL_JUMP
:
25547 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25548 case BFD_RELOC_ARM_ALU_PC_G0
:
25549 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25550 case BFD_RELOC_ARM_ALU_PC_G1
:
25551 case BFD_RELOC_ARM_ALU_PC_G2
:
25552 case BFD_RELOC_ARM_LDR_PC_G0
:
25553 case BFD_RELOC_ARM_LDR_PC_G1
:
25554 case BFD_RELOC_ARM_LDR_PC_G2
:
25555 case BFD_RELOC_ARM_LDRS_PC_G0
:
25556 case BFD_RELOC_ARM_LDRS_PC_G1
:
25557 case BFD_RELOC_ARM_LDRS_PC_G2
:
25558 case BFD_RELOC_ARM_LDC_PC_G0
:
25559 case BFD_RELOC_ARM_LDC_PC_G1
:
25560 case BFD_RELOC_ARM_LDC_PC_G2
:
25561 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25562 case BFD_RELOC_ARM_ALU_SB_G0
:
25563 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25564 case BFD_RELOC_ARM_ALU_SB_G1
:
25565 case BFD_RELOC_ARM_ALU_SB_G2
:
25566 case BFD_RELOC_ARM_LDR_SB_G0
:
25567 case BFD_RELOC_ARM_LDR_SB_G1
:
25568 case BFD_RELOC_ARM_LDR_SB_G2
:
25569 case BFD_RELOC_ARM_LDRS_SB_G0
:
25570 case BFD_RELOC_ARM_LDRS_SB_G1
:
25571 case BFD_RELOC_ARM_LDRS_SB_G2
:
25572 case BFD_RELOC_ARM_LDC_SB_G0
:
25573 case BFD_RELOC_ARM_LDC_SB_G1
:
25574 case BFD_RELOC_ARM_LDC_SB_G2
:
25575 case BFD_RELOC_ARM_V4BX
:
25576 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
25577 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
25578 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
25579 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
25580 case BFD_RELOC_ARM_GOTFUNCDESC
:
25581 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
25582 case BFD_RELOC_ARM_FUNCDESC
:
25583 case BFD_RELOC_ARM_THUMB_BF17
:
25584 case BFD_RELOC_ARM_THUMB_BF19
:
25585 case BFD_RELOC_ARM_THUMB_BF13
:
25586 code
= fixp
->fx_r_type
;
25589 case BFD_RELOC_ARM_TLS_GOTDESC
:
25590 case BFD_RELOC_ARM_TLS_GD32
:
25591 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
25592 case BFD_RELOC_ARM_TLS_LE32
:
25593 case BFD_RELOC_ARM_TLS_IE32
:
25594 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
25595 case BFD_RELOC_ARM_TLS_LDM32
:
25596 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
25597 /* BFD will include the symbol's address in the addend.
25598 But we don't want that, so subtract it out again here. */
25599 if (!S_IS_COMMON (fixp
->fx_addsy
))
25600 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
25601 code
= fixp
->fx_r_type
;
25605 case BFD_RELOC_ARM_IMMEDIATE
:
25606 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25607 _("internal relocation (type: IMMEDIATE) not fixed up"));
25610 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
25611 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25612 _("ADRL used for a symbol not defined in the same file"));
25615 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
25616 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
25617 case BFD_RELOC_ARM_THUMB_LOOP12
:
25618 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25619 _("%s used for a symbol not defined in the same file"),
25620 bfd_get_reloc_code_name (fixp
->fx_r_type
));
25623 case BFD_RELOC_ARM_OFFSET_IMM
:
25624 if (section
->use_rela_p
)
25626 code
= fixp
->fx_r_type
;
25630 if (fixp
->fx_addsy
!= NULL
25631 && !S_IS_DEFINED (fixp
->fx_addsy
)
25632 && S_IS_LOCAL (fixp
->fx_addsy
))
25634 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25635 _("undefined local label `%s'"),
25636 S_GET_NAME (fixp
->fx_addsy
));
25640 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25641 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
25648 switch (fixp
->fx_r_type
)
25650 case BFD_RELOC_NONE
: type
= "NONE"; break;
25651 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
25652 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
25653 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
25654 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
25655 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
25656 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
25657 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
25658 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
25659 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
25660 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
25661 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
25662 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
25663 default: type
= _("<unknown>"); break;
25665 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25666 _("cannot represent %s relocation in this object file format"),
25673 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
25675 && fixp
->fx_addsy
== GOT_symbol
)
25677 code
= BFD_RELOC_ARM_GOTPC
;
25678 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
25682 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
25684 if (reloc
->howto
== NULL
)
25686 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25687 _("cannot represent %s relocation in this object file format"),
25688 bfd_get_reloc_code_name (code
));
25692 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
25693 vtable entry to be used in the relocation's section offset. */
25694 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
25695 reloc
->address
= fixp
->fx_offset
;
25700 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
25703 cons_fix_new_arm (fragS
* frag
,
25707 bfd_reloc_code_real_type reloc
)
25712 FIXME: @@ Should look at CPU word size. */
25716 reloc
= BFD_RELOC_8
;
25719 reloc
= BFD_RELOC_16
;
25723 reloc
= BFD_RELOC_32
;
25726 reloc
= BFD_RELOC_64
;
25731 if (exp
->X_op
== O_secrel
)
25733 exp
->X_op
= O_symbol
;
25734 reloc
= BFD_RELOC_32_SECREL
;
25738 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
25741 #if defined (OBJ_COFF)
25743 arm_validate_fix (fixS
* fixP
)
25745 /* If the destination of the branch is a defined symbol which does not have
25746 the THUMB_FUNC attribute, then we must be calling a function which has
25747 the (interfacearm) attribute. We look for the Thumb entry point to that
25748 function and change the branch to refer to that function instead. */
25749 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
25750 && fixP
->fx_addsy
!= NULL
25751 && S_IS_DEFINED (fixP
->fx_addsy
)
25752 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
25754 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
25761 arm_force_relocation (struct fix
* fixp
)
25763 #if defined (OBJ_COFF) && defined (TE_PE)
25764 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
25768 /* In case we have a call or a branch to a function in ARM ISA mode from
25769 a thumb function or vice-versa force the relocation. These relocations
25770 are cleared off for some cores that might have blx and simple transformations
25774 switch (fixp
->fx_r_type
)
25776 case BFD_RELOC_ARM_PCREL_JUMP
:
25777 case BFD_RELOC_ARM_PCREL_CALL
:
25778 case BFD_RELOC_THUMB_PCREL_BLX
:
25779 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
25783 case BFD_RELOC_ARM_PCREL_BLX
:
25784 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25785 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25786 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25787 if (ARM_IS_FUNC (fixp
->fx_addsy
))
25796 /* Resolve these relocations even if the symbol is extern or weak.
25797 Technically this is probably wrong due to symbol preemption.
25798 In practice these relocations do not have enough range to be useful
25799 at dynamic link time, and some code (e.g. in the Linux kernel)
25800 expects these references to be resolved. */
25801 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
25802 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
25803 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
25804 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
25805 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25806 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
25807 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
25808 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
25809 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
25810 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
25811 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
25812 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
25813 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
25814 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
25817 /* Always leave these relocations for the linker. */
25818 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
25819 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
25820 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
25823 /* Always generate relocations against function symbols. */
25824 if (fixp
->fx_r_type
== BFD_RELOC_32
25826 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
25829 return generic_force_reloc (fixp
);
25832 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25833 /* Relocations against function names must be left unadjusted,
25834 so that the linker can use this information to generate interworking
25835 stubs. The MIPS version of this function
25836 also prevents relocations that are mips-16 specific, but I do not
25837 know why it does this.
25840 There is one other problem that ought to be addressed here, but
25841 which currently is not: Taking the address of a label (rather
25842 than a function) and then later jumping to that address. Such
25843 addresses also ought to have their bottom bit set (assuming that
25844 they reside in Thumb code), but at the moment they will not. */
25847 arm_fix_adjustable (fixS
* fixP
)
25849 if (fixP
->fx_addsy
== NULL
)
25852 /* Preserve relocations against symbols with function type. */
25853 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
25856 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
25857 && fixP
->fx_subsy
== NULL
)
25860 /* We need the symbol name for the VTABLE entries. */
25861 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
25862 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
25865 /* Don't allow symbols to be discarded on GOT related relocs. */
25866 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
25867 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
25868 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
25869 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
25870 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
25871 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
25872 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
25873 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
25874 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
25875 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
25876 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
25877 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
25878 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
25879 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
25880 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
25881 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
25882 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
25885 /* Similarly for group relocations. */
25886 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
25887 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
25888 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
25891 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25892 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
25893 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
25894 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
25895 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
25896 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
25897 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
25898 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
25899 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
25902 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25903 offsets, so keep these symbols. */
25904 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25905 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
25910 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25914 elf32_arm_target_format (void)
25917 return (target_big_endian
25918 ? "elf32-bigarm-symbian"
25919 : "elf32-littlearm-symbian");
25920 #elif defined (TE_VXWORKS)
25921 return (target_big_endian
25922 ? "elf32-bigarm-vxworks"
25923 : "elf32-littlearm-vxworks");
25924 #elif defined (TE_NACL)
25925 return (target_big_endian
25926 ? "elf32-bigarm-nacl"
25927 : "elf32-littlearm-nacl");
25931 if (target_big_endian
)
25932 return "elf32-bigarm-fdpic";
25934 return "elf32-littlearm-fdpic";
25938 if (target_big_endian
)
25939 return "elf32-bigarm";
25941 return "elf32-littlearm";
25947 armelf_frob_symbol (symbolS
* symp
,
25950 elf_frob_symbol (symp
, puntp
);
25954 /* MD interface: Finalization. */
25959 literal_pool
* pool
;
25961 /* Ensure that all the IT blocks are properly closed. */
25962 check_it_blocks_finished ();
25964 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
25966 /* Put it at the end of the relevant section. */
25967 subseg_set (pool
->section
, pool
->sub_section
);
25969 arm_elf_change_section ();
25976 /* Remove any excess mapping symbols generated for alignment frags in
25977 SEC. We may have created a mapping symbol before a zero byte
25978 alignment; remove it if there's a mapping symbol after the
25981 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
25982 void *dummy ATTRIBUTE_UNUSED
)
25984 segment_info_type
*seginfo
= seg_info (sec
);
25987 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
25990 for (fragp
= seginfo
->frchainP
->frch_root
;
25992 fragp
= fragp
->fr_next
)
25994 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
25995 fragS
*next
= fragp
->fr_next
;
25997 /* Variable-sized frags have been converted to fixed size by
25998 this point. But if this was variable-sized to start with,
25999 there will be a fixed-size frag after it. So don't handle
26001 if (sym
== NULL
|| next
== NULL
)
26004 if (S_GET_VALUE (sym
) < next
->fr_address
)
26005 /* Not at the end of this frag. */
26007 know (S_GET_VALUE (sym
) == next
->fr_address
);
26011 if (next
->tc_frag_data
.first_map
!= NULL
)
26013 /* Next frag starts with a mapping symbol. Discard this
26015 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26019 if (next
->fr_next
== NULL
)
26021 /* This mapping symbol is at the end of the section. Discard
26023 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
26024 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26028 /* As long as we have empty frags without any mapping symbols,
26030 /* If the next frag is non-empty and does not start with a
26031 mapping symbol, then this mapping symbol is required. */
26032 if (next
->fr_address
!= next
->fr_next
->fr_address
)
26035 next
= next
->fr_next
;
26037 while (next
!= NULL
);
26042 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26046 arm_adjust_symtab (void)
26051 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26053 if (ARM_IS_THUMB (sym
))
26055 if (THUMB_IS_FUNC (sym
))
26057 /* Mark the symbol as a Thumb function. */
26058 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
26059 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
26060 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
26062 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
26063 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
26065 as_bad (_("%s: unexpected function type: %d"),
26066 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
26068 else switch (S_GET_STORAGE_CLASS (sym
))
26071 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
26074 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
26077 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
26085 if (ARM_IS_INTERWORK (sym
))
26086 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
26093 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26095 if (ARM_IS_THUMB (sym
))
26097 elf_symbol_type
* elf_sym
;
26099 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
26100 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
26102 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
26103 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
26105 /* If it's a .thumb_func, declare it as so,
26106 otherwise tag label as .code 16. */
26107 if (THUMB_IS_FUNC (sym
))
26108 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
26109 ST_BRANCH_TO_THUMB
);
26110 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26111 elf_sym
->internal_elf_sym
.st_info
=
26112 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
26117 /* Remove any overlapping mapping symbols generated by alignment frags. */
26118 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
26119 /* Now do generic ELF adjustments. */
26120 elf_adjust_symtab ();
26124 /* MD interface: Initialization. */
26127 set_constant_flonums (void)
26131 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
26132 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
26136 /* Auto-select Thumb mode if it's the only available instruction set for the
26137 given architecture. */
26140 autoselect_thumb_from_cpu_variant (void)
26142 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
26143 opcode_select (16);
26152 if ( (arm_ops_hsh
= hash_new ()) == NULL
26153 || (arm_cond_hsh
= hash_new ()) == NULL
26154 || (arm_shift_hsh
= hash_new ()) == NULL
26155 || (arm_psr_hsh
= hash_new ()) == NULL
26156 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
26157 || (arm_reg_hsh
= hash_new ()) == NULL
26158 || (arm_reloc_hsh
= hash_new ()) == NULL
26159 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
26160 as_fatal (_("virtual memory exhausted"));
26162 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
26163 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
26164 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
26165 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
26166 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
26167 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
26168 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
26169 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
26170 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
26171 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
26172 (void *) (v7m_psrs
+ i
));
26173 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
26174 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
26176 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
26178 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
26179 (void *) (barrier_opt_names
+ i
));
26181 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
26183 struct reloc_entry
* entry
= reloc_names
+ i
;
26185 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
26186 /* This makes encode_branch() use the EABI versions of this relocation. */
26187 entry
->reloc
= BFD_RELOC_UNUSED
;
26189 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
26193 set_constant_flonums ();
26195 /* Set the cpu variant based on the command-line options. We prefer
26196 -mcpu= over -march= if both are set (as for GCC); and we prefer
26197 -mfpu= over any other way of setting the floating point unit.
26198 Use of legacy options with new options are faulted. */
26201 if (mcpu_cpu_opt
|| march_cpu_opt
)
26202 as_bad (_("use of old and new-style options to set CPU type"));
26204 selected_arch
= *legacy_cpu
;
26206 else if (mcpu_cpu_opt
)
26208 selected_arch
= *mcpu_cpu_opt
;
26209 selected_ext
= *mcpu_ext_opt
;
26211 else if (march_cpu_opt
)
26213 selected_arch
= *march_cpu_opt
;
26214 selected_ext
= *march_ext_opt
;
26216 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
26221 as_bad (_("use of old and new-style options to set FPU type"));
26223 selected_fpu
= *legacy_fpu
;
26226 selected_fpu
= *mfpu_opt
;
26229 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
26230 || defined (TE_NetBSD) || defined (TE_VXWORKS))
26231 /* Some environments specify a default FPU. If they don't, infer it
26232 from the processor. */
26234 selected_fpu
= *mcpu_fpu_opt
;
26235 else if (march_fpu_opt
)
26236 selected_fpu
= *march_fpu_opt
;
26238 selected_fpu
= fpu_default
;
26242 if (ARM_FEATURE_ZERO (selected_fpu
))
26244 if (!no_cpu_selected ())
26245 selected_fpu
= fpu_default
;
26247 selected_fpu
= fpu_arch_fpa
;
26251 if (ARM_FEATURE_ZERO (selected_arch
))
26253 selected_arch
= cpu_default
;
26254 selected_cpu
= selected_arch
;
26256 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26258 /* Autodection of feature mode: allow all features in cpu_variant but leave
26259 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
26260 after all instruction have been processed and we can decide what CPU
26261 should be selected. */
26262 if (ARM_FEATURE_ZERO (selected_arch
))
26263 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
26265 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26268 autoselect_thumb_from_cpu_variant ();
26270 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
26272 #if defined OBJ_COFF || defined OBJ_ELF
26274 unsigned int flags
= 0;
26276 #if defined OBJ_ELF
26277 flags
= meabi_flags
;
26279 switch (meabi_flags
)
26281 case EF_ARM_EABI_UNKNOWN
:
26283 /* Set the flags in the private structure. */
26284 if (uses_apcs_26
) flags
|= F_APCS26
;
26285 if (support_interwork
) flags
|= F_INTERWORK
;
26286 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
26287 if (pic_code
) flags
|= F_PIC
;
26288 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
26289 flags
|= F_SOFT_FLOAT
;
26291 switch (mfloat_abi_opt
)
26293 case ARM_FLOAT_ABI_SOFT
:
26294 case ARM_FLOAT_ABI_SOFTFP
:
26295 flags
|= F_SOFT_FLOAT
;
26298 case ARM_FLOAT_ABI_HARD
:
26299 if (flags
& F_SOFT_FLOAT
)
26300 as_bad (_("hard-float conflicts with specified fpu"));
26304 /* Using pure-endian doubles (even if soft-float). */
26305 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
26306 flags
|= F_VFP_FLOAT
;
26308 #if defined OBJ_ELF
26309 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
26310 flags
|= EF_ARM_MAVERICK_FLOAT
;
26313 case EF_ARM_EABI_VER4
:
26314 case EF_ARM_EABI_VER5
:
26315 /* No additional flags to set. */
26322 bfd_set_private_flags (stdoutput
, flags
);
26324 /* We have run out flags in the COFF header to encode the
26325 status of ATPCS support, so instead we create a dummy,
26326 empty, debug section called .arm.atpcs. */
26331 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
26335 bfd_set_section_flags
26336 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
26337 bfd_set_section_size (stdoutput
, sec
, 0);
26338 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
26344 /* Record the CPU type as well. */
26345 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
26346 mach
= bfd_mach_arm_iWMMXt2
;
26347 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
26348 mach
= bfd_mach_arm_iWMMXt
;
26349 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
26350 mach
= bfd_mach_arm_XScale
;
26351 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
26352 mach
= bfd_mach_arm_ep9312
;
26353 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
26354 mach
= bfd_mach_arm_5TE
;
26355 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
26357 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
26358 mach
= bfd_mach_arm_5T
;
26360 mach
= bfd_mach_arm_5
;
26362 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
26364 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
26365 mach
= bfd_mach_arm_4T
;
26367 mach
= bfd_mach_arm_4
;
26369 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
26370 mach
= bfd_mach_arm_3M
;
26371 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
26372 mach
= bfd_mach_arm_3
;
26373 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
26374 mach
= bfd_mach_arm_2a
;
26375 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
26376 mach
= bfd_mach_arm_2
;
26378 mach
= bfd_mach_arm_unknown
;
26380 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
26383 /* Command line processing. */
26386 Invocation line includes a switch not recognized by the base assembler.
26387 See if it's a processor-specific option.
26389 This routine is somewhat complicated by the need for backwards
26390 compatibility (since older releases of gcc can't be changed).
26391 The new options try to make the interface as compatible as
26394 New options (supported) are:
26396 -mcpu=<cpu name> Assemble for selected processor
26397 -march=<architecture name> Assemble for selected architecture
26398 -mfpu=<fpu architecture> Assemble for selected FPU.
26399 -EB/-mbig-endian Big-endian
26400 -EL/-mlittle-endian Little-endian
26401 -k Generate PIC code
26402 -mthumb Start in Thumb mode
26403 -mthumb-interwork Code supports ARM/Thumb interworking
26405 -m[no-]warn-deprecated Warn about deprecated features
26406 -m[no-]warn-syms Warn when symbols match instructions
26408 For now we will also provide support for:
26410 -mapcs-32 32-bit Program counter
26411 -mapcs-26 26-bit Program counter
26412 -macps-float Floats passed in FP registers
26413 -mapcs-reentrant Reentrant code
26415 (sometime these will probably be replaced with -mapcs=<list of options>
26416 and -matpcs=<list of options>)
26418 The remaining options are only supported for back-wards compatibility.
26419 Cpu variants, the arm part is optional:
26420 -m[arm]1 Currently not supported.
26421 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
26422 -m[arm]3 Arm 3 processor
26423 -m[arm]6[xx], Arm 6 processors
26424 -m[arm]7[xx][t][[d]m] Arm 7 processors
26425 -m[arm]8[10] Arm 8 processors
26426 -m[arm]9[20][tdmi] Arm 9 processors
26427 -mstrongarm[110[0]] StrongARM processors
26428 -mxscale XScale processors
26429 -m[arm]v[2345[t[e]]] Arm architectures
26430 -mall All (except the ARM1)
26432 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
26433 -mfpe-old (No float load/store multiples)
26434 -mvfpxd VFP Single precision
26436 -mno-fpu Disable all floating point instructions
26438 The following CPU names are recognized:
26439 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
26440 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
26441 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
26442 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
26443 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
26444 arm10t arm10e, arm1020t, arm1020e, arm10200e,
26445 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
26449 const char * md_shortopts
= "m:k";
26451 #ifdef ARM_BI_ENDIAN
26452 #define OPTION_EB (OPTION_MD_BASE + 0)
26453 #define OPTION_EL (OPTION_MD_BASE + 1)
26455 #if TARGET_BYTES_BIG_ENDIAN
26456 #define OPTION_EB (OPTION_MD_BASE + 0)
26458 #define OPTION_EL (OPTION_MD_BASE + 1)
26461 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
26462 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
26464 struct option md_longopts
[] =
26467 {"EB", no_argument
, NULL
, OPTION_EB
},
26470 {"EL", no_argument
, NULL
, OPTION_EL
},
26472 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
26474 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
26476 {NULL
, no_argument
, NULL
, 0}
26479 size_t md_longopts_size
= sizeof (md_longopts
);
26481 struct arm_option_table
26483 const char * option
; /* Option name to match. */
26484 const char * help
; /* Help information. */
26485 int * var
; /* Variable to change. */
26486 int value
; /* What to change it to. */
26487 const char * deprecated
; /* If non-null, print this message. */
26490 struct arm_option_table arm_opts
[] =
26492 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
26493 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
26494 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
26495 &support_interwork
, 1, NULL
},
26496 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
26497 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
26498 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
26500 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
26501 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
26502 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
26503 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
26506 /* These are recognized by the assembler, but have no affect on code. */
26507 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
26508 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
26510 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
26511 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
26512 &warn_on_deprecated
, 0, NULL
},
26513 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
26514 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
26515 {NULL
, NULL
, NULL
, 0, NULL
}
26518 struct arm_legacy_option_table
26520 const char * option
; /* Option name to match. */
26521 const arm_feature_set
** var
; /* Variable to change. */
26522 const arm_feature_set value
; /* What to change it to. */
26523 const char * deprecated
; /* If non-null, print this message. */
26526 const struct arm_legacy_option_table arm_legacy_opts
[] =
26528 /* DON'T add any new processors to this list -- we want the whole list
26529 to go away... Add them to the processors table instead. */
26530 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
26531 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
26532 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
26533 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
26534 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
26535 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
26536 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
26537 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
26538 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
26539 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
26540 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
26541 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
26542 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
26543 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
26544 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
26545 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
26546 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
26547 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
26548 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
26549 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
26550 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
26551 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
26552 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
26553 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
26554 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
26555 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
26556 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
26557 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
26558 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
26559 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
26560 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
26561 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
26562 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
26563 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
26564 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
26565 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
26566 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
26567 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
26568 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
26569 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
26570 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
26571 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
26572 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
26573 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
26574 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
26575 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
26576 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26577 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26578 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26579 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26580 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
26581 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
26582 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
26583 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
26584 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
26585 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
26586 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
26587 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
26588 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
26589 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
26590 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
26591 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
26592 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
26593 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
26594 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
26595 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
26596 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
26597 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
26598 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
26599 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
26600 N_("use -mcpu=strongarm110")},
26601 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
26602 N_("use -mcpu=strongarm1100")},
26603 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
26604 N_("use -mcpu=strongarm1110")},
26605 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
26606 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
26607 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
26609 /* Architecture variants -- don't add any more to this list either. */
26610 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
26611 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
26612 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
26613 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
26614 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
26615 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
26616 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
26617 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
26618 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
26619 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
26620 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
26621 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
26622 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
26623 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
26624 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
26625 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
26626 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
26627 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
26629 /* Floating point variants -- don't add any more to this list either. */
26630 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
26631 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
26632 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
26633 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
26634 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
26636 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
26639 struct arm_cpu_option_table
26643 const arm_feature_set value
;
26644 const arm_feature_set ext
;
26645 /* For some CPUs we assume an FPU unless the user explicitly sets
26647 const arm_feature_set default_fpu
;
26648 /* The canonical name of the CPU, or NULL to use NAME converted to upper
26650 const char * canonical_name
;
26653 /* This list should, at a minimum, contain all the cpu names
26654 recognized by GCC. */
26655 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
26657 static const struct arm_cpu_option_table arm_cpus
[] =
26659 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
26662 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
26665 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
26668 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
26671 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
26674 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
26677 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
26680 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
26683 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
26686 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
26689 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
26692 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
26695 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
26698 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
26701 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
26704 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
26707 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
26710 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
26713 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
26716 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
26719 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
26722 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
26725 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
26728 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
26731 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
26734 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
26737 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
26740 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
26743 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
26746 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
26749 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
26752 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
26755 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
26758 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
26761 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
26764 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
26767 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
26770 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
26773 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
26776 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
26779 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
26782 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
26785 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
26788 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
26791 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
26794 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
26798 /* For V5 or later processors we default to using VFP; but the user
26799 should really set the FPU type explicitly. */
26800 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
26803 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
26806 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
26809 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
26812 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
26815 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
26818 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
26821 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
26824 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
26827 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
26830 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
26833 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
26836 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
26839 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
26842 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
26845 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
26848 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
26851 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
26854 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
26857 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
26860 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
26863 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
26866 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
26869 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
26872 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
26875 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
26878 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
26881 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
26884 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
26887 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
26890 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
26893 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
26896 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
26899 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
26902 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
26905 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
26908 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
26909 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26911 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
26913 FPU_ARCH_NEON_VFP_V4
),
26914 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
26915 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
26916 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
26917 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
26918 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26919 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
26920 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
26922 FPU_ARCH_NEON_VFP_V4
),
26923 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
26925 FPU_ARCH_NEON_VFP_V4
),
26926 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
26928 FPU_ARCH_NEON_VFP_V4
),
26929 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
26930 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26931 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26932 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
26933 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26934 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26935 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
26936 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26937 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26938 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
26939 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26940 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26941 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
26942 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26943 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26944 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
26945 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26946 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26947 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
26948 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26949 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26950 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
26951 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26952 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26953 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
26954 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26955 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26956 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
26957 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26958 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26959 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
26962 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
26964 FPU_ARCH_VFP_V3D16
),
26965 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
26966 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26968 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
26969 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26970 FPU_ARCH_VFP_V3D16
),
26971 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
26972 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26973 FPU_ARCH_VFP_V3D16
),
26974 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
26975 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26976 FPU_ARCH_NEON_VFP_ARMV8
),
26977 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
26978 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
26980 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
26983 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
26986 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
26989 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
26992 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
26995 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
26998 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
27001 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
27002 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27003 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27004 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
27005 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27006 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27007 /* ??? XSCALE is really an architecture. */
27008 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
27012 /* ??? iwmmxt is not a processor. */
27013 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
27016 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
27019 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
27024 ARM_CPU_OPT ("ep9312", "ARM920T",
27025 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
27026 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
27028 /* Marvell processors. */
27029 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
27030 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27031 FPU_ARCH_VFP_V3D16
),
27032 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
27033 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27034 FPU_ARCH_NEON_VFP_V4
),
27036 /* APM X-Gene family. */
27037 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
27039 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27040 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
27041 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27042 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27044 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27048 struct arm_ext_table
27052 const arm_feature_set merge
;
27053 const arm_feature_set clear
;
27056 struct arm_arch_option_table
27060 const arm_feature_set value
;
27061 const arm_feature_set default_fpu
;
27062 const struct arm_ext_table
* ext_table
;
27065 /* Used to add support for +E and +noE extension. */
27066 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27067 /* Used to add support for a +E extension. */
27068 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27069 /* Used to add support for a +noE extension. */
27070 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27072 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27073 ~0 & ~FPU_ENDIAN_PURE)
27075 static const struct arm_ext_table armv5te_ext_table
[] =
27077 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
27078 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27081 static const struct arm_ext_table armv7_ext_table
[] =
27083 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27084 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27087 static const struct arm_ext_table armv7ve_ext_table
[] =
27089 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
27090 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
27091 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27092 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27093 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27094 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
27095 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27097 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
27098 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27100 /* Aliases for +simd. */
27101 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27103 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27104 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27105 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27107 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27110 static const struct arm_ext_table armv7a_ext_table
[] =
27112 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27113 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27114 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27115 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27116 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27117 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
27118 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27120 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
27121 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27123 /* Aliases for +simd. */
27124 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27125 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27127 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27128 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27130 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
27131 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
27132 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27135 static const struct arm_ext_table armv7r_ext_table
[] =
27137 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
27138 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
27139 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27140 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27141 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
27142 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27143 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27144 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
27145 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27148 static const struct arm_ext_table armv7em_ext_table
[] =
27150 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
27151 /* Alias for +fp, used to be known as fpv4-sp-d16. */
27152 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
27153 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
27154 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27155 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
27156 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27159 static const struct arm_ext_table armv8a_ext_table
[] =
27161 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27162 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27163 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27164 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27166 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27167 should use the +simd option to turn on FP. */
27168 ARM_REMOVE ("fp", ALL_FP
),
27169 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27170 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27171 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27175 static const struct arm_ext_table armv81a_ext_table
[] =
27177 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27178 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27179 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27181 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27182 should use the +simd option to turn on FP. */
27183 ARM_REMOVE ("fp", ALL_FP
),
27184 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27185 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27186 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27189 static const struct arm_ext_table armv82a_ext_table
[] =
27191 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27192 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
27193 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
27194 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27195 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27196 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27198 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27199 should use the +simd option to turn on FP. */
27200 ARM_REMOVE ("fp", ALL_FP
),
27201 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27202 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27203 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27206 static const struct arm_ext_table armv84a_ext_table
[] =
27208 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27209 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27210 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27211 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27213 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27214 should use the +simd option to turn on FP. */
27215 ARM_REMOVE ("fp", ALL_FP
),
27216 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27217 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27218 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27221 static const struct arm_ext_table armv85a_ext_table
[] =
27223 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27224 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27225 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27226 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27228 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27229 should use the +simd option to turn on FP. */
27230 ARM_REMOVE ("fp", ALL_FP
),
27231 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27234 static const struct arm_ext_table armv8m_main_ext_table
[] =
27236 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27237 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27238 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
27239 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27240 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27243 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
27245 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27246 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27248 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27249 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
27252 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27253 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
27254 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27257 static const struct arm_ext_table armv8r_ext_table
[] =
27259 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27260 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27261 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27262 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27263 ARM_REMOVE ("fp", ALL_FP
),
27264 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
27265 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27268 /* This list should, at a minimum, contain all the architecture names
27269 recognized by GCC. */
27270 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
27271 #define ARM_ARCH_OPT2(N, V, DF, ext) \
27272 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
27274 static const struct arm_arch_option_table arm_archs
[] =
27276 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
27277 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
27278 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
27279 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27280 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27281 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
27282 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
27283 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
27284 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
27285 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
27286 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
27287 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
27288 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
27289 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
27290 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
27291 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
27292 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
27293 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
27294 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
27295 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
27296 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
27297 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
27298 kept to preserve existing behaviour. */
27299 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
27300 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
27301 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
27302 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
27303 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
27304 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
27305 kept to preserve existing behaviour. */
27306 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
27307 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
27308 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
27309 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
27310 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
27311 /* The official spelling of the ARMv7 profile variants is the dashed form.
27312 Accept the non-dashed form for compatibility with old toolchains. */
27313 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
27314 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
27315 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
27316 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
27317 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
27318 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
27319 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
27320 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
27321 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
27322 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
27324 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
27326 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
27327 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
27328 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
27329 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
27330 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
27331 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
27332 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
27333 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
27334 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
27335 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
27336 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27338 #undef ARM_ARCH_OPT
27340 /* ISA extensions in the co-processor and main instruction set space. */
27342 struct arm_option_extension_value_table
27346 const arm_feature_set merge_value
;
27347 const arm_feature_set clear_value
;
27348 /* List of architectures for which an extension is available. ARM_ARCH_NONE
27349 indicates that an extension is available for all architectures while
27350 ARM_ANY marks an empty entry. */
27351 const arm_feature_set allowed_archs
[2];
27354 /* The following table must be in alphabetical order with a NULL last entry. */
27356 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
27357 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
27359 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
27360 use the context sensitive approach using arm_ext_table's. */
27361 static const struct arm_option_extension_value_table arm_extensions
[] =
27363 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27364 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27365 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27366 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
27367 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27368 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
27369 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
27371 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27372 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27373 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
27374 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
27375 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27376 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27377 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27379 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27380 | ARM_EXT2_FP16_FML
),
27381 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27382 | ARM_EXT2_FP16_FML
),
27384 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27385 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27386 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
27387 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
27388 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
27389 Thumb divide instruction. Due to this having the same name as the
27390 previous entry, this will be ignored when doing command-line parsing and
27391 only considered by build attribute selection code. */
27392 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
27393 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
27394 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
27395 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
27396 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
27397 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
27398 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
27399 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
27400 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
27401 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
27402 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
27403 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
27404 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
27405 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
27406 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
27407 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
27408 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
27409 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
27410 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27411 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
27412 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
27414 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
27415 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
27416 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27417 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
27418 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
27419 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27420 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
27421 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
27423 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27424 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27425 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
27426 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
27427 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
27428 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
27429 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27430 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
27432 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
27433 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
27434 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
27435 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
27436 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
27440 /* ISA floating-point and Advanced SIMD extensions. */
27441 struct arm_option_fpu_value_table
27444 const arm_feature_set value
;
27447 /* This list should, at a minimum, contain all the fpu names
27448 recognized by GCC. */
27449 static const struct arm_option_fpu_value_table arm_fpus
[] =
27451 {"softfpa", FPU_NONE
},
27452 {"fpe", FPU_ARCH_FPE
},
27453 {"fpe2", FPU_ARCH_FPE
},
27454 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
27455 {"fpa", FPU_ARCH_FPA
},
27456 {"fpa10", FPU_ARCH_FPA
},
27457 {"fpa11", FPU_ARCH_FPA
},
27458 {"arm7500fe", FPU_ARCH_FPA
},
27459 {"softvfp", FPU_ARCH_VFP
},
27460 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
27461 {"vfp", FPU_ARCH_VFP_V2
},
27462 {"vfp9", FPU_ARCH_VFP_V2
},
27463 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
27464 {"vfp10", FPU_ARCH_VFP_V2
},
27465 {"vfp10-r0", FPU_ARCH_VFP_V1
},
27466 {"vfpxd", FPU_ARCH_VFP_V1xD
},
27467 {"vfpv2", FPU_ARCH_VFP_V2
},
27468 {"vfpv3", FPU_ARCH_VFP_V3
},
27469 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
27470 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
27471 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
27472 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
27473 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
27474 {"arm1020t", FPU_ARCH_VFP_V1
},
27475 {"arm1020e", FPU_ARCH_VFP_V2
},
27476 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
27477 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
27478 {"maverick", FPU_ARCH_MAVERICK
},
27479 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
27480 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
27481 {"neon-fp16", FPU_ARCH_NEON_FP16
},
27482 {"vfpv4", FPU_ARCH_VFP_V4
},
27483 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
27484 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
27485 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
27486 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
27487 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
27488 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
27489 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
27490 {"crypto-neon-fp-armv8",
27491 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
27492 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
27493 {"crypto-neon-fp-armv8.1",
27494 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
27495 {NULL
, ARM_ARCH_NONE
}
27498 struct arm_option_value_table
27504 static const struct arm_option_value_table arm_float_abis
[] =
27506 {"hard", ARM_FLOAT_ABI_HARD
},
27507 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
27508 {"soft", ARM_FLOAT_ABI_SOFT
},
27513 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
27514 static const struct arm_option_value_table arm_eabis
[] =
27516 {"gnu", EF_ARM_EABI_UNKNOWN
},
27517 {"4", EF_ARM_EABI_VER4
},
27518 {"5", EF_ARM_EABI_VER5
},
27523 struct arm_long_option_table
27525 const char * option
; /* Substring to match. */
27526 const char * help
; /* Help information. */
27527 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
27528 const char * deprecated
; /* If non-null, print this message. */
27532 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
27533 arm_feature_set
*ext_set
,
27534 const struct arm_ext_table
*ext_table
)
27536 /* We insist on extensions being specified in alphabetical order, and with
27537 extensions being added before being removed. We achieve this by having
27538 the global ARM_EXTENSIONS table in alphabetical order, and using the
27539 ADDING_VALUE variable to indicate whether we are adding an extension (1)
27540 or removing it (0) and only allowing it to change in the order
27542 const struct arm_option_extension_value_table
* opt
= NULL
;
27543 const arm_feature_set arm_any
= ARM_ANY
;
27544 int adding_value
= -1;
27546 while (str
!= NULL
&& *str
!= 0)
27553 as_bad (_("invalid architectural extension"));
27558 ext
= strchr (str
, '+');
27563 len
= strlen (str
);
27565 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
27567 if (adding_value
!= 0)
27570 opt
= arm_extensions
;
27578 if (adding_value
== -1)
27581 opt
= arm_extensions
;
27583 else if (adding_value
!= 1)
27585 as_bad (_("must specify extensions to add before specifying "
27586 "those to remove"));
27593 as_bad (_("missing architectural extension"));
27597 gas_assert (adding_value
!= -1);
27598 gas_assert (opt
!= NULL
);
27600 if (ext_table
!= NULL
)
27602 const struct arm_ext_table
* ext_opt
= ext_table
;
27603 bfd_boolean found
= FALSE
;
27604 for (; ext_opt
->name
!= NULL
; ext_opt
++)
27605 if (ext_opt
->name_len
== len
27606 && strncmp (ext_opt
->name
, str
, len
) == 0)
27610 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
27611 /* TODO: Option not supported. When we remove the
27612 legacy table this case should error out. */
27615 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
27619 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
27620 /* TODO: Option not supported. When we remove the
27621 legacy table this case should error out. */
27623 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
27635 /* Scan over the options table trying to find an exact match. */
27636 for (; opt
->name
!= NULL
; opt
++)
27637 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27639 int i
, nb_allowed_archs
=
27640 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
27641 /* Check we can apply the extension to this architecture. */
27642 for (i
= 0; i
< nb_allowed_archs
; i
++)
27645 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
27647 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
27650 if (i
== nb_allowed_archs
)
27652 as_bad (_("extension does not apply to the base architecture"));
27656 /* Add or remove the extension. */
27658 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
27660 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
27662 /* Allowing Thumb division instructions for ARMv7 in autodetection
27663 rely on this break so that duplicate extensions (extensions
27664 with the same name as a previous extension in the list) are not
27665 considered for command-line parsing. */
27669 if (opt
->name
== NULL
)
27671 /* Did we fail to find an extension because it wasn't specified in
27672 alphabetical order, or because it does not exist? */
27674 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
27675 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27678 if (opt
->name
== NULL
)
27679 as_bad (_("unknown architectural extension `%s'"), str
);
27681 as_bad (_("architectural extensions must be specified in "
27682 "alphabetical order"));
27688 /* We should skip the extension we've just matched the next time
27700 arm_parse_cpu (const char *str
)
27702 const struct arm_cpu_option_table
*opt
;
27703 const char *ext
= strchr (str
, '+');
27709 len
= strlen (str
);
27713 as_bad (_("missing cpu name `%s'"), str
);
27717 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
27718 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27720 mcpu_cpu_opt
= &opt
->value
;
27721 if (mcpu_ext_opt
== NULL
)
27722 mcpu_ext_opt
= XNEW (arm_feature_set
);
27723 *mcpu_ext_opt
= opt
->ext
;
27724 mcpu_fpu_opt
= &opt
->default_fpu
;
27725 if (opt
->canonical_name
)
27727 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
27728 strcpy (selected_cpu_name
, opt
->canonical_name
);
27734 if (len
>= sizeof selected_cpu_name
)
27735 len
= (sizeof selected_cpu_name
) - 1;
27737 for (i
= 0; i
< len
; i
++)
27738 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
27739 selected_cpu_name
[i
] = 0;
27743 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
27748 as_bad (_("unknown cpu `%s'"), str
);
27753 arm_parse_arch (const char *str
)
27755 const struct arm_arch_option_table
*opt
;
27756 const char *ext
= strchr (str
, '+');
27762 len
= strlen (str
);
27766 as_bad (_("missing architecture name `%s'"), str
);
27770 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
27771 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27773 march_cpu_opt
= &opt
->value
;
27774 if (march_ext_opt
== NULL
)
27775 march_ext_opt
= XNEW (arm_feature_set
);
27776 *march_ext_opt
= arm_arch_none
;
27777 march_fpu_opt
= &opt
->default_fpu
;
27778 strcpy (selected_cpu_name
, opt
->name
);
27781 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
27787 as_bad (_("unknown architecture `%s'\n"), str
);
27792 arm_parse_fpu (const char * str
)
27794 const struct arm_option_fpu_value_table
* opt
;
27796 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
27797 if (streq (opt
->name
, str
))
27799 mfpu_opt
= &opt
->value
;
27803 as_bad (_("unknown floating point format `%s'\n"), str
);
27808 arm_parse_float_abi (const char * str
)
27810 const struct arm_option_value_table
* opt
;
27812 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
27813 if (streq (opt
->name
, str
))
27815 mfloat_abi_opt
= opt
->value
;
27819 as_bad (_("unknown floating point abi `%s'\n"), str
);
27825 arm_parse_eabi (const char * str
)
27827 const struct arm_option_value_table
*opt
;
27829 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
27830 if (streq (opt
->name
, str
))
27832 meabi_flags
= opt
->value
;
27835 as_bad (_("unknown EABI `%s'\n"), str
);
27841 arm_parse_it_mode (const char * str
)
27843 bfd_boolean ret
= TRUE
;
27845 if (streq ("arm", str
))
27846 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
27847 else if (streq ("thumb", str
))
27848 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
27849 else if (streq ("always", str
))
27850 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
27851 else if (streq ("never", str
))
27852 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
27855 as_bad (_("unknown implicit IT mode `%s', should be "\
27856 "arm, thumb, always, or never."), str
);
27864 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
27866 codecomposer_syntax
= TRUE
;
27867 arm_comment_chars
[0] = ';';
27868 arm_line_separator_chars
[0] = 0;
27872 struct arm_long_option_table arm_long_opts
[] =
27874 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
27875 arm_parse_cpu
, NULL
},
27876 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
27877 arm_parse_arch
, NULL
},
27878 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
27879 arm_parse_fpu
, NULL
},
27880 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
27881 arm_parse_float_abi
, NULL
},
27883 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
27884 arm_parse_eabi
, NULL
},
27886 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
27887 arm_parse_it_mode
, NULL
},
27888 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
27889 arm_ccs_mode
, NULL
},
27890 {NULL
, NULL
, 0, NULL
}
27894 md_parse_option (int c
, const char * arg
)
27896 struct arm_option_table
*opt
;
27897 const struct arm_legacy_option_table
*fopt
;
27898 struct arm_long_option_table
*lopt
;
27904 target_big_endian
= 1;
27910 target_big_endian
= 0;
27914 case OPTION_FIX_V4BX
:
27922 #endif /* OBJ_ELF */
27925 /* Listing option. Just ignore these, we don't support additional
27930 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
27932 if (c
== opt
->option
[0]
27933 && ((arg
== NULL
&& opt
->option
[1] == 0)
27934 || streq (arg
, opt
->option
+ 1)))
27936 /* If the option is deprecated, tell the user. */
27937 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
27938 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
27939 arg
? arg
: "", _(opt
->deprecated
));
27941 if (opt
->var
!= NULL
)
27942 *opt
->var
= opt
->value
;
27948 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
27950 if (c
== fopt
->option
[0]
27951 && ((arg
== NULL
&& fopt
->option
[1] == 0)
27952 || streq (arg
, fopt
->option
+ 1)))
27954 /* If the option is deprecated, tell the user. */
27955 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
27956 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
27957 arg
? arg
: "", _(fopt
->deprecated
));
27959 if (fopt
->var
!= NULL
)
27960 *fopt
->var
= &fopt
->value
;
27966 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
27968 /* These options are expected to have an argument. */
27969 if (c
== lopt
->option
[0]
27971 && strncmp (arg
, lopt
->option
+ 1,
27972 strlen (lopt
->option
+ 1)) == 0)
27974 /* If the option is deprecated, tell the user. */
27975 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
27976 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
27977 _(lopt
->deprecated
));
27979 /* Call the sup-option parser. */
27980 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
27991 md_show_usage (FILE * fp
)
27993 struct arm_option_table
*opt
;
27994 struct arm_long_option_table
*lopt
;
27996 fprintf (fp
, _(" ARM-specific assembler options:\n"));
27998 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
27999 if (opt
->help
!= NULL
)
28000 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
28002 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
28003 if (lopt
->help
!= NULL
)
28004 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
28008 -EB assemble code for a big-endian cpu\n"));
28013 -EL assemble code for a little-endian cpu\n"));
28017 --fix-v4bx Allow BX in ARMv4 code\n"));
28021 --fdpic generate an FDPIC object file\n"));
28022 #endif /* OBJ_ELF */
28030 arm_feature_set flags
;
28031 } cpu_arch_ver_table
;
28033 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28034 chronologically for architectures, with an exception for ARMv6-M and
28035 ARMv6S-M due to legacy reasons. No new architecture should have a
28036 special case. This allows for build attribute selection results to be
28037 stable when new architectures are added. */
28038 static const cpu_arch_ver_table cpu_arch_ver
[] =
28040 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
28041 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
28042 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
28043 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
28044 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
28045 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
28046 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
28047 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
28048 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
28049 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
28050 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
28051 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
28052 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
28053 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
28054 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
28055 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
28056 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
28057 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
28058 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
28059 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
28060 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
28061 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
28062 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
28063 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
28065 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28066 always selected build attributes to match those of ARMv6-M
28067 (resp. ARMv6S-M). However, due to these architectures being a strict
28068 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28069 would be selected when fully respecting chronology of architectures.
28070 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28071 move them before ARMv7 architectures. */
28072 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
28073 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
28075 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
28076 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
28077 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
28078 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
28079 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
28080 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
28081 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
28082 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
28083 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
28084 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
28085 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
28086 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
28087 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
28088 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
28089 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
28090 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
28091 {-1, ARM_ARCH_NONE
}
28094 /* Set an attribute if it has not already been set by the user. */
28097 aeabi_set_attribute_int (int tag
, int value
)
28100 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28101 || !attributes_set_explicitly
[tag
])
28102 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
28106 aeabi_set_attribute_string (int tag
, const char *value
)
28109 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28110 || !attributes_set_explicitly
[tag
])
28111 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
28114 /* Return whether features in the *NEEDED feature set are available via
28115 extensions for the architecture whose feature set is *ARCH_FSET. */
28118 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
28119 const arm_feature_set
*needed
)
28121 int i
, nb_allowed_archs
;
28122 arm_feature_set ext_fset
;
28123 const struct arm_option_extension_value_table
*opt
;
28125 ext_fset
= arm_arch_none
;
28126 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28128 /* Extension does not provide any feature we need. */
28129 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
28133 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28134 for (i
= 0; i
< nb_allowed_archs
; i
++)
28137 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
28140 /* Extension is available, add it. */
28141 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
28142 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
28146 /* Can we enable all features in *needed? */
28147 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
28150 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
28151 a given architecture feature set *ARCH_EXT_FSET including extension feature
28152 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
28153 - if true, check for an exact match of the architecture modulo extensions;
28154 - otherwise, select build attribute value of the first superset
28155 architecture released so that results remains stable when new architectures
28157 For -march/-mcpu=all the build attribute value of the most featureful
28158 architecture is returned. Tag_CPU_arch_profile result is returned in
28162 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
28163 const arm_feature_set
*ext_fset
,
28164 char *profile
, int exact_match
)
28166 arm_feature_set arch_fset
;
28167 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
28169 /* Select most featureful architecture with all its extensions if building
28170 for -march=all as the feature sets used to set build attributes. */
28171 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
28173 /* Force revisiting of decision for each new architecture. */
28174 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8_1M_MAIN
);
28176 return TAG_CPU_ARCH_V8
;
28179 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
28181 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
28183 arm_feature_set known_arch_fset
;
28185 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
28188 /* Base architecture match user-specified architecture and
28189 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
28190 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
28195 /* Base architecture match user-specified architecture only
28196 (eg. ARMv6-M in the same case as above). Record it in case we
28197 find a match with above condition. */
28198 else if (p_ver_ret
== NULL
28199 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
28205 /* Architecture has all features wanted. */
28206 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
28208 arm_feature_set added_fset
;
28210 /* Compute features added by this architecture over the one
28211 recorded in p_ver_ret. */
28212 if (p_ver_ret
!= NULL
)
28213 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
28215 /* First architecture that match incl. with extensions, or the
28216 only difference in features over the recorded match is
28217 features that were optional and are now mandatory. */
28218 if (p_ver_ret
== NULL
28219 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
28225 else if (p_ver_ret
== NULL
)
28227 arm_feature_set needed_ext_fset
;
28229 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
28231 /* Architecture has all features needed when using some
28232 extensions. Record it and continue searching in case there
28233 exist an architecture providing all needed features without
28234 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
28236 if (have_ext_for_needed_feat_p (&known_arch_fset
,
28243 if (p_ver_ret
== NULL
)
28247 /* Tag_CPU_arch_profile. */
28248 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
28249 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
28250 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
28251 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
28253 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
28255 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
28259 return p_ver_ret
->val
;
28262 /* Set the public EABI object attributes. */
28265 aeabi_set_public_attributes (void)
28267 char profile
= '\0';
28270 int fp16_optional
= 0;
28271 int skip_exact_match
= 0;
28272 arm_feature_set flags
, flags_arch
, flags_ext
;
28274 /* Autodetection mode, choose the architecture based the instructions
28276 if (no_cpu_selected ())
28278 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
28280 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
28281 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
28283 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
28284 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
28286 /* Code run during relaxation relies on selected_cpu being set. */
28287 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
28288 flags_ext
= arm_arch_none
;
28289 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
28290 selected_ext
= flags_ext
;
28291 selected_cpu
= flags
;
28293 /* Otherwise, choose the architecture based on the capabilities of the
28297 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
28298 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
28299 flags_ext
= selected_ext
;
28300 flags
= selected_cpu
;
28302 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
28304 /* Allow the user to override the reported architecture. */
28305 if (!ARM_FEATURE_ZERO (selected_object_arch
))
28307 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
28308 flags_ext
= arm_arch_none
;
28311 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
28313 /* When this function is run again after relaxation has happened there is no
28314 way to determine whether an architecture or CPU was specified by the user:
28315 - selected_cpu is set above for relaxation to work;
28316 - march_cpu_opt is not set if only -mcpu or .cpu is used;
28317 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
28318 Therefore, if not in -march=all case we first try an exact match and fall
28319 back to autodetection. */
28320 if (!skip_exact_match
)
28321 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
28323 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
28325 as_bad (_("no architecture contains all the instructions used\n"));
28327 /* Tag_CPU_name. */
28328 if (selected_cpu_name
[0])
28332 q
= selected_cpu_name
;
28333 if (strncmp (q
, "armv", 4) == 0)
28338 for (i
= 0; q
[i
]; i
++)
28339 q
[i
] = TOUPPER (q
[i
]);
28341 aeabi_set_attribute_string (Tag_CPU_name
, q
);
28344 /* Tag_CPU_arch. */
28345 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
28347 /* Tag_CPU_arch_profile. */
28348 if (profile
!= '\0')
28349 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
28351 /* Tag_DSP_extension. */
28352 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
28353 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
28355 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
28356 /* Tag_ARM_ISA_use. */
28357 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
28358 || ARM_FEATURE_ZERO (flags_arch
))
28359 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
28361 /* Tag_THUMB_ISA_use. */
28362 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
28363 || ARM_FEATURE_ZERO (flags_arch
))
28367 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
28368 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
28370 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
28374 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
28377 /* Tag_VFP_arch. */
28378 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
28379 aeabi_set_attribute_int (Tag_VFP_arch
,
28380 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
28382 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
28383 aeabi_set_attribute_int (Tag_VFP_arch
,
28384 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
28386 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
28389 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
28391 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
28393 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
28396 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
28397 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
28398 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
28399 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
28400 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
28402 /* Tag_ABI_HardFP_use. */
28403 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
28404 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
28405 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
28407 /* Tag_WMMX_arch. */
28408 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
28409 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
28410 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
28411 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
28413 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
28414 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
28415 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
28416 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
28417 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
28418 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
28420 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
28422 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
28426 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
28431 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
28432 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
28433 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
28437 We set Tag_DIV_use to two when integer divide instructions have been used
28438 in ARM state, or when Thumb integer divide instructions have been used,
28439 but we have no architecture profile set, nor have we any ARM instructions.
28441 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
28442 by the base architecture.
28444 For new architectures we will have to check these tests. */
28445 gas_assert (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
28446 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
28447 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
28448 aeabi_set_attribute_int (Tag_DIV_use
, 0);
28449 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
28450 || (profile
== '\0'
28451 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
28452 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
28453 aeabi_set_attribute_int (Tag_DIV_use
, 2);
28455 /* Tag_MP_extension_use. */
28456 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
28457 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
28459 /* Tag Virtualization_use. */
28460 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
28462 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
28465 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
28468 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
28469 finished and free extension feature bits which will not be used anymore. */
28472 arm_md_post_relax (void)
28474 aeabi_set_public_attributes ();
28475 XDELETE (mcpu_ext_opt
);
28476 mcpu_ext_opt
= NULL
;
28477 XDELETE (march_ext_opt
);
28478 march_ext_opt
= NULL
;
28481 /* Add the default contents for the .ARM.attributes section. */
28486 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
28489 aeabi_set_public_attributes ();
28491 #endif /* OBJ_ELF */
28493 /* Parse a .cpu directive. */
28496 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
28498 const struct arm_cpu_option_table
*opt
;
28502 name
= input_line_pointer
;
28503 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28504 input_line_pointer
++;
28505 saved_char
= *input_line_pointer
;
28506 *input_line_pointer
= 0;
28508 /* Skip the first "all" entry. */
28509 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
28510 if (streq (opt
->name
, name
))
28512 selected_arch
= opt
->value
;
28513 selected_ext
= opt
->ext
;
28514 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
28515 if (opt
->canonical_name
)
28516 strcpy (selected_cpu_name
, opt
->canonical_name
);
28520 for (i
= 0; opt
->name
[i
]; i
++)
28521 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
28523 selected_cpu_name
[i
] = 0;
28525 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28527 *input_line_pointer
= saved_char
;
28528 demand_empty_rest_of_line ();
28531 as_bad (_("unknown cpu `%s'"), name
);
28532 *input_line_pointer
= saved_char
;
28533 ignore_rest_of_line ();
28536 /* Parse a .arch directive. */
28539 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
28541 const struct arm_arch_option_table
*opt
;
28545 name
= input_line_pointer
;
28546 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28547 input_line_pointer
++;
28548 saved_char
= *input_line_pointer
;
28549 *input_line_pointer
= 0;
28551 /* Skip the first "all" entry. */
28552 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
28553 if (streq (opt
->name
, name
))
28555 selected_arch
= opt
->value
;
28556 selected_ext
= arm_arch_none
;
28557 selected_cpu
= selected_arch
;
28558 strcpy (selected_cpu_name
, opt
->name
);
28559 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28560 *input_line_pointer
= saved_char
;
28561 demand_empty_rest_of_line ();
28565 as_bad (_("unknown architecture `%s'\n"), name
);
28566 *input_line_pointer
= saved_char
;
28567 ignore_rest_of_line ();
28570 /* Parse a .object_arch directive. */
28573 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
28575 const struct arm_arch_option_table
*opt
;
28579 name
= input_line_pointer
;
28580 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28581 input_line_pointer
++;
28582 saved_char
= *input_line_pointer
;
28583 *input_line_pointer
= 0;
28585 /* Skip the first "all" entry. */
28586 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
28587 if (streq (opt
->name
, name
))
28589 selected_object_arch
= opt
->value
;
28590 *input_line_pointer
= saved_char
;
28591 demand_empty_rest_of_line ();
28595 as_bad (_("unknown architecture `%s'\n"), name
);
28596 *input_line_pointer
= saved_char
;
28597 ignore_rest_of_line ();
28600 /* Parse a .arch_extension directive. */
28603 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
28605 const struct arm_option_extension_value_table
*opt
;
28608 int adding_value
= 1;
28610 name
= input_line_pointer
;
28611 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28612 input_line_pointer
++;
28613 saved_char
= *input_line_pointer
;
28614 *input_line_pointer
= 0;
28616 if (strlen (name
) >= 2
28617 && strncmp (name
, "no", 2) == 0)
28623 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28624 if (streq (opt
->name
, name
))
28626 int i
, nb_allowed_archs
=
28627 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
28628 for (i
= 0; i
< nb_allowed_archs
; i
++)
28631 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
28633 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
28637 if (i
== nb_allowed_archs
)
28639 as_bad (_("architectural extension `%s' is not allowed for the "
28640 "current base architecture"), name
);
28645 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
28648 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
28650 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
28651 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28652 *input_line_pointer
= saved_char
;
28653 demand_empty_rest_of_line ();
28654 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
28655 on this return so that duplicate extensions (extensions with the
28656 same name as a previous extension in the list) are not considered
28657 for command-line parsing. */
28661 if (opt
->name
== NULL
)
28662 as_bad (_("unknown architecture extension `%s'\n"), name
);
28664 *input_line_pointer
= saved_char
;
28665 ignore_rest_of_line ();
28668 /* Parse a .fpu directive. */
28671 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
28673 const struct arm_option_fpu_value_table
*opt
;
28677 name
= input_line_pointer
;
28678 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28679 input_line_pointer
++;
28680 saved_char
= *input_line_pointer
;
28681 *input_line_pointer
= 0;
28683 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
28684 if (streq (opt
->name
, name
))
28686 selected_fpu
= opt
->value
;
28687 #ifndef CPU_DEFAULT
28688 if (no_cpu_selected ())
28689 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
28692 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28693 *input_line_pointer
= saved_char
;
28694 demand_empty_rest_of_line ();
28698 as_bad (_("unknown floating point format `%s'\n"), name
);
28699 *input_line_pointer
= saved_char
;
28700 ignore_rest_of_line ();
28703 /* Copy symbol information. */
28706 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
28708 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
28712 /* Given a symbolic attribute NAME, return the proper integer value.
28713 Returns -1 if the attribute is not known. */
28716 arm_convert_symbolic_attribute (const char *name
)
28718 static const struct
28723 attribute_table
[] =
28725 /* When you modify this table you should
28726 also modify the list in doc/c-arm.texi. */
28727 #define T(tag) {#tag, tag}
28728 T (Tag_CPU_raw_name
),
28731 T (Tag_CPU_arch_profile
),
28732 T (Tag_ARM_ISA_use
),
28733 T (Tag_THUMB_ISA_use
),
28737 T (Tag_Advanced_SIMD_arch
),
28738 T (Tag_PCS_config
),
28739 T (Tag_ABI_PCS_R9_use
),
28740 T (Tag_ABI_PCS_RW_data
),
28741 T (Tag_ABI_PCS_RO_data
),
28742 T (Tag_ABI_PCS_GOT_use
),
28743 T (Tag_ABI_PCS_wchar_t
),
28744 T (Tag_ABI_FP_rounding
),
28745 T (Tag_ABI_FP_denormal
),
28746 T (Tag_ABI_FP_exceptions
),
28747 T (Tag_ABI_FP_user_exceptions
),
28748 T (Tag_ABI_FP_number_model
),
28749 T (Tag_ABI_align_needed
),
28750 T (Tag_ABI_align8_needed
),
28751 T (Tag_ABI_align_preserved
),
28752 T (Tag_ABI_align8_preserved
),
28753 T (Tag_ABI_enum_size
),
28754 T (Tag_ABI_HardFP_use
),
28755 T (Tag_ABI_VFP_args
),
28756 T (Tag_ABI_WMMX_args
),
28757 T (Tag_ABI_optimization_goals
),
28758 T (Tag_ABI_FP_optimization_goals
),
28759 T (Tag_compatibility
),
28760 T (Tag_CPU_unaligned_access
),
28761 T (Tag_FP_HP_extension
),
28762 T (Tag_VFP_HP_extension
),
28763 T (Tag_ABI_FP_16bit_format
),
28764 T (Tag_MPextension_use
),
28766 T (Tag_nodefaults
),
28767 T (Tag_also_compatible_with
),
28768 T (Tag_conformance
),
28770 T (Tag_Virtualization_use
),
28771 T (Tag_DSP_extension
),
28772 /* We deliberately do not include Tag_MPextension_use_legacy. */
28780 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
28781 if (streq (name
, attribute_table
[i
].name
))
28782 return attribute_table
[i
].tag
;
28787 /* Apply sym value for relocations only in the case that they are for
28788 local symbols in the same segment as the fixup and you have the
28789 respective architectural feature for blx and simple switches. */
28792 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
28795 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
28796 /* PR 17444: If the local symbol is in a different section then a reloc
28797 will always be generated for it, so applying the symbol value now
28798 will result in a double offset being stored in the relocation. */
28799 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
28800 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
28802 switch (fixP
->fx_r_type
)
28804 case BFD_RELOC_ARM_PCREL_BLX
:
28805 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
28806 if (ARM_IS_FUNC (fixP
->fx_addsy
))
28810 case BFD_RELOC_ARM_PCREL_CALL
:
28811 case BFD_RELOC_THUMB_PCREL_BLX
:
28812 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
28823 #endif /* OBJ_ELF */